From f5e71819494bca796057408ae1d3f45a929c3a2d Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Thu, 3 Apr 2025 23:06:54 +0800 Subject: [PATCH 01/51] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E7=BC=93=E5=AD=98?= =?UTF-8?q?=E5=99=A8=E6=96=87=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/message_buffer.py | 89 ++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 src/plugins/chat/message_buffer.py diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py new file mode 100644 index 000000000..93a7d6487 --- /dev/null +++ b/src/plugins/chat/message_buffer.py @@ -0,0 +1,89 @@ +from ..person_info import person_info +from src.common.logger import get_module_logger +import asyncio +from dataclasses import dataclass +from .message import MessageRecv +import hashlib +from typing import List, Dict +from dataclasses import dataclass, field + +logger = get_module_logger("message_buffer") + +@dataclass +class CacheMessages: + message: MessageRecv + cache_determination: asyncio.Event = field(default_factory=asyncio.Event) # 判断缓冲是否产生结果 + result: str = "U" + + +class MassageBuffer: + def __init__(self): + self.buffer_pool: Dict[str, List[CacheMessages]] = {} + self.lock = asyncio.Lock() + + def get_person_id_(self, platform:str, user_id:str, group_id:str): + """获取唯一id""" + group_id = group_id or "私聊" + key = f"{platform}_{user_id}_{group_id}" + return hashlib.md5(key.encode()).hexdigest() + + async def start_caching_messages(self, message:MessageRecv): + """添加消息并重置缓冲计时器""" + person_id_ = self.get_person_id_(message.chat_info.platform, + message.chat_info.user_info.user_id, + message.chat_info.group_info.group_id) + async with self.lock: + # 清空该用户之前的未处理消息 + if person_id_ in self.buffer_pool: + for old_msg in self.buffer_pool[person_id_]: + if old_msg.result == "U": + old_msg.cache_determination.set() + old_msg.result = "F" # 标记旧消息为失败 + logger.debug(f"被新消息覆盖信息id: {message.message_id}") + + # 添加新消息 + cache_msg = CacheMessages(message=message, result="U") + self.buffer_pool[person_id_] = [cache_msg] # 只保留最新消息 + + # 启动3秒缓冲计时器 + asyncio.create_task(self._debounce_processor(person_id_, cache_msg)) + + async def _debounce_processor(self, person_id_:str, cache_msg:CacheMessages): + """等待3秒无新消息""" + await asyncio.sleep(3) + + async with self.lock: + # 检查消息是否仍未被覆盖 + if (person_id_ in self.buffer_pool and + cache_msg in self.buffer_pool[person_id_] and + cache_msg.result == "U"): + + cache_msg.result = "T" # 标记为成功处理 + cache_msg.cache_determination.set() + + + async def query_buffer_result(self, message:MessageRecv) -> bool: + """查询缓冲结果""" + person_id_ = self.get_person_id_(message.chat_info.platform, + message.chat_info.user_info.user_id, + message.chat_info.group_info.group_id) + + async with self.lock: + if person_id_ not in self.buffer_pool or not self.buffer_pool[person_id_]: + return False + + cache_msg = self.buffer_pool[person_id_][-1] # 获取最新消息 + if cache_msg.message.message_id != message.message_id: + return False + + try: + await asyncio.wait_for(cache_msg.cache_determination.wait(), timeout=10) + return cache_msg.result == "T" + except asyncio.TimeoutError: + logger.debug(f"查询超时消息id: {message.message_id}") + return False + + + + +message_buffer = MassageBuffer() \ No newline at end of file From 78b5ee81d664f43d38cda0fafc280f633976b74e Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Fri, 4 Apr 2025 09:48:19 +0800 Subject: [PATCH 02/51] =?UTF-8?q?=E5=BF=83=E6=B5=81=E6=A8=A1=E5=BC=8F?= =?UTF-8?q?=E5=AE=8C=E6=88=90=E7=BC=93=E5=86=B2=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/message_buffer.py | 141 +++++++++++++----- .../think_flow_chat/think_flow_chat.py | 10 ++ src/plugins/person_info/person_info.py | 1 + 3 files changed, 112 insertions(+), 40 deletions(-) diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py index 93a7d6487..4e9aa5582 100644 --- a/src/plugins/chat/message_buffer.py +++ b/src/plugins/chat/message_buffer.py @@ -1,11 +1,13 @@ -from ..person_info import person_info +from ..person_info.person_info import person_info_manager from src.common.logger import get_module_logger import asyncio from dataclasses import dataclass from .message import MessageRecv import hashlib -from typing import List, Dict +from typing import Dict from dataclasses import dataclass, field +from collections import OrderedDict +import random logger = get_module_logger("message_buffer") @@ -18,7 +20,7 @@ class CacheMessages: class MassageBuffer: def __init__(self): - self.buffer_pool: Dict[str, List[CacheMessages]] = {} + self.buffer_pool: Dict[str, OrderedDict[str, CacheMessages]] = {} self.lock = asyncio.Lock() def get_person_id_(self, platform:str, user_id:str, group_id:str): @@ -28,62 +30,121 @@ class MassageBuffer: return hashlib.md5(key.encode()).hexdigest() async def start_caching_messages(self, message:MessageRecv): - """添加消息并重置缓冲计时器""" - person_id_ = self.get_person_id_(message.chat_info.platform, - message.chat_info.user_info.user_id, - message.chat_info.group_info.group_id) + """添加消息,启动缓冲""" + person_id_ = self.get_person_id_(message.message_info.platform, + message.message_info.user_info.user_id, + message.message_info.group_info.group_id) + async with self.lock: - # 清空该用户之前的未处理消息 - if person_id_ in self.buffer_pool: - for old_msg in self.buffer_pool[person_id_]: - if old_msg.result == "U": - old_msg.cache_determination.set() - old_msg.result = "F" # 标记旧消息为失败 - logger.debug(f"被新消息覆盖信息id: {message.message_id}") + if person_id_ not in self.buffer_pool: + self.buffer_pool[person_id_] = OrderedDict() + + # 查找最近的处理成功消息(T) + last_T_msg = None + recent_F_count = 0 + for msg_id in reversed(self.buffer_pool[person_id_]): + msg = self.buffer_pool[person_id_][msg_id] + if msg.result == "T": + last_T_msg = msg + break + elif msg.result == "F": + recent_F_count += 1 + + # 判断条件:最近T之后有超过3条F + if (recent_F_count >= random.randint(3, 5)): + new_msg = CacheMessages(message=message, result="T") + new_msg.cache_determination.set() + self.buffer_pool[person_id_][message.message_info.message_id] = new_msg + logger.debug(f"快速处理消息(已堆积{recent_F_count}条F): {message.message_info.message_id}") + return + + # 标记该用户之前的未处理消息 + for msg_id, cache_msg in self.buffer_pool[person_id_].items(): + if cache_msg.result == "U": + cache_msg.result = "F" + cache_msg.cache_determination.set() + logger.debug(f"被新消息覆盖信息id: {message.message_info.message_id}") # 添加新消息 - cache_msg = CacheMessages(message=message, result="U") - self.buffer_pool[person_id_] = [cache_msg] # 只保留最新消息 + self.buffer_pool[person_id_][message.message_info.message_id] = CacheMessages(message=message) # 启动3秒缓冲计时器 - asyncio.create_task(self._debounce_processor(person_id_, cache_msg)) + person_id = person_info_manager.get_person_id(message.message_info.user_info.platform, + message.message_info.user_info.user_id) + asyncio.create_task(self._debounce_processor(person_id_, + message.message_info.message_id, + person_id)) - async def _debounce_processor(self, person_id_:str, cache_msg:CacheMessages): + async def _debounce_processor(self, person_id_: str, message_id: str, person_id: str): """等待3秒无新消息""" - await asyncio.sleep(3) + interval_time = await person_info_manager.get_value(person_id, "msg_interval") + if not isinstance(interval_time, (int, str)) or not str(interval_time).isdigit(): + logger.debug("debounce_processor无效的时间") + return + interval_time = max(0.5, int(interval_time) / 1000) + await asyncio.sleep(interval_time) async with self.lock: - # 检查消息是否仍未被覆盖 - if (person_id_ in self.buffer_pool and - cache_msg in self.buffer_pool[person_id_] and - cache_msg.result == "U"): - - cache_msg.result = "T" # 标记为成功处理 + if (person_id_ not in self.buffer_pool or + message_id not in self.buffer_pool[person_id_]): + logger.debug(f"消息异常被清理,msgid: {message_id}") + return + + cache_msg = self.buffer_pool[person_id_][message_id] + if cache_msg.result == "U": + cache_msg.result = "T" cache_msg.cache_determination.set() async def query_buffer_result(self, message:MessageRecv) -> bool: - """查询缓冲结果""" - person_id_ = self.get_person_id_(message.chat_info.platform, - message.chat_info.user_info.user_id, - message.chat_info.group_info.group_id) + """查询缓冲结果,并清理""" + person_id_ = self.get_person_id_(message.message_info.platform, + message.message_info.user_info.user_id, + message.message_info.group_info.group_id) + async with self.lock: - if person_id_ not in self.buffer_pool or not self.buffer_pool[person_id_]: - return False - - cache_msg = self.buffer_pool[person_id_][-1] # 获取最新消息 - if cache_msg.message.message_id != message.message_id: - return False - + user_msgs = self.buffer_pool.get(person_id_, {}) + cache_msg = user_msgs.get(message.message_info.message_id) + + if not cache_msg: + logger.debug(f"查询异常,消息不存在,msgid: {message.message_info.message_id}") + return False # 消息不存在或已清理 + try: await asyncio.wait_for(cache_msg.cache_determination.wait(), timeout=10) - return cache_msg.result == "T" + result = cache_msg.result == "T" + + if result: + async with self.lock: # 再次加锁 + # 清理所有早于当前消息的已处理消息, 收集所有早于当前消息的F消息的processed_plain_text + keep_msgs = OrderedDict() + combined_text = [] + found = False + for msg_id, msg in self.buffer_pool[person_id_].items(): + if msg_id == message.message_info.message_id: + found = True + combined_text.append(msg.message.processed_plain_text) + continue + if found: + keep_msgs[msg_id] = msg + elif msg.result == "F": + # 收集F消息的文本内容 + if hasattr(msg.message, 'processed_plain_text') and msg.message.processed_plain_text: + combined_text.append(msg.message.processed_plain_text) + elif msg.result == "U": + logger.debug(f"异常未处理信息id: {msg.message.message_info.message_id}") + + # 更新当前消息的processed_plain_text + if combined_text and combined_text[0] != message.processed_plain_text: + message.processed_plain_text = "".join(combined_text) + logger.debug(f"整合了{len(combined_text)-1}条F消息的内容到当前消息") + + self.buffer_pool[person_id_] = keep_msgs + return result except asyncio.TimeoutError: - logger.debug(f"查询超时消息id: {message.message_id}") + logger.debug(f"查询超时消息id: {message.message_info.message_id}") return False - - message_buffer = MassageBuffer() \ No newline at end of file diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py index 87ea1575a..de034e25b 100644 --- a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py +++ b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py @@ -18,6 +18,7 @@ from src.heart_flow.heartflow import heartflow from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig from ...chat.chat_stream import chat_manager from ...person_info.relationship_manager import relationship_manager +from ...chat.message_buffer import message_buffer # 定义日志配置 chat_config = LogConfig( @@ -161,6 +162,8 @@ class ThinkFlowChat: userinfo = message.message_info.user_info messageinfo = message.message_info + # 消息加入缓冲池 + await message_buffer.start_caching_messages(message) # 创建聊天流 chat = await chat_manager.get_or_create_stream( @@ -192,8 +195,15 @@ class ThinkFlowChat: timing_results["记忆激活"] = timer2 - timer1 logger.debug(f"记忆激活: {interested_rate}") + # 查询缓冲器结果,会整合前面跳过的消息,改变processed_plain_text + buffer_result = await message_buffer.query_buffer_result(message) + if not buffer_result: + logger.info(f"触发缓冲,已炸飞消息:{message.processed_plain_text}") + return + is_mentioned = is_mentioned_bot_in_message(message) + # 计算回复意愿 current_willing_old = willing_manager.get_willing(chat_stream=chat) # current_willing_new = (heartflow.get_subheartflow(chat.stream_id).current_state.willing - 5) / 4 diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py index f940c0fca..3373366a0 100644 --- a/src/plugins/person_info/person_info.py +++ b/src/plugins/person_info/person_info.py @@ -30,6 +30,7 @@ person_info_default = { # "impression" : None, # "gender" : Unkown, "konw_time" : 0, + "msg_interval": 3000 } # 个人信息的各项与默认值在此定义,以下处理会自动创建/补全每一项 class PersonInfoManager: From 255d4ea575ef62a59b482a4ffc1b22aeccf72c08 Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Fri, 4 Apr 2025 13:26:46 +0800 Subject: [PATCH 03/51] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E4=BA=86=E4=B8=80?= =?UTF-8?q?=E4=BA=9Bbug=EF=BC=8C=E5=8C=85=E6=8B=AC=E7=BC=93=E5=86=B2?= =?UTF-8?q?=E5=99=A8=E5=AF=B9=E4=BA=8E=E6=B6=88=E6=81=AF=E7=B1=BB=E5=9E=8B?= =?UTF-8?q?=E7=9A=84=E5=A4=84=E7=90=86=EF=BC=8Cpersoninfo=E7=9A=84?= =?UTF-8?q?=E6=8B=B7=E8=B4=9D=E9=97=AE=E9=A2=98=EF=BC=8C=E6=B7=BB=E5=8A=A0?= =?UTF-8?q?=E7=94=A8=E6=88=B7=E4=BF=A1=E6=81=AF=E9=97=B4=E9=9A=94=E7=9A=84?= =?UTF-8?q?=E6=94=B6=E9=9B=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/main.py | 3 +- src/plugins/chat/message_buffer.py | 33 ++++++++++--- .../think_flow_chat/think_flow_chat.py | 7 ++- src/plugins/person_info/person_info.py | 47 +++++++++++++++++-- 4 files changed, 78 insertions(+), 12 deletions(-) diff --git a/src/main.py b/src/main.py index e3bbf38d1..932fbfcfe 100644 --- a/src/main.py +++ b/src/main.py @@ -56,8 +56,9 @@ class MainSystem: self.mood_manager.start_mood_update(update_interval=global_config.mood_update_interval) logger.success("情绪管理器启动成功") - # 检查并清除person_info冗余字段 + # 检查并清除person_info冗余字段,启动个人习惯推断 await person_info_manager.del_all_undefined_field() + # asyncio.create_task(person_info_manager.personal_habit_deduction()) # 启动愿望管理器 await willing_manager.ensure_started() diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py index 4e9aa5582..9919e6cf7 100644 --- a/src/plugins/chat/message_buffer.py +++ b/src/plugins/chat/message_buffer.py @@ -3,11 +3,13 @@ from src.common.logger import get_module_logger import asyncio from dataclasses import dataclass from .message import MessageRecv +from ..message.message_base import BaseMessageInfo import hashlib from typing import Dict from dataclasses import dataclass, field from collections import OrderedDict import random +import time logger = get_module_logger("message_buffer") @@ -40,17 +42,15 @@ class MassageBuffer: self.buffer_pool[person_id_] = OrderedDict() # 查找最近的处理成功消息(T) - last_T_msg = None recent_F_count = 0 for msg_id in reversed(self.buffer_pool[person_id_]): msg = self.buffer_pool[person_id_][msg_id] if msg.result == "T": - last_T_msg = msg break elif msg.result == "F": recent_F_count += 1 - # 判断条件:最近T之后有超过3条F + # 判断条件:最近T之后有超过3-5条F if (recent_F_count >= random.randint(3, 5)): new_msg = CacheMessages(message=message, result="T") new_msg.cache_determination.set() @@ -63,7 +63,7 @@ class MassageBuffer: if cache_msg.result == "U": cache_msg.result = "F" cache_msg.cache_determination.set() - logger.debug(f"被新消息覆盖信息id: {message.message_info.message_id}") + logger.debug(f"被新消息覆盖信息id: {cache_msg.message.message_info.message_id}") # 添加新消息 self.buffer_pool[person_id_][message.message_info.message_id] = CacheMessages(message=message) @@ -71,6 +71,7 @@ class MassageBuffer: # 启动3秒缓冲计时器 person_id = person_info_manager.get_person_id(message.message_info.user_info.platform, message.message_info.user_info.user_id) + asyncio.create_task(self.save_message_interval(person_id, message.message_info)) asyncio.create_task(self._debounce_processor(person_id_, message.message_info.message_id, person_id)) @@ -121,22 +122,26 @@ class MassageBuffer: keep_msgs = OrderedDict() combined_text = [] found = False + is_text = False for msg_id, msg in self.buffer_pool[person_id_].items(): if msg_id == message.message_info.message_id: found = True + is_text = msg.message.message_segment.type == "text" combined_text.append(msg.message.processed_plain_text) continue if found: keep_msgs[msg_id] = msg elif msg.result == "F": # 收集F消息的文本内容 - if hasattr(msg.message, 'processed_plain_text') and msg.message.processed_plain_text: + if (hasattr(msg.message, 'processed_plain_text') + and msg.message.message_segment.type == "text" + and msg.message.processed_plain_text): combined_text.append(msg.message.processed_plain_text) elif msg.result == "U": logger.debug(f"异常未处理信息id: {msg.message.message_info.message_id}") # 更新当前消息的processed_plain_text - if combined_text and combined_text[0] != message.processed_plain_text: + if combined_text and combined_text[0] != message.processed_plain_text and is_text: message.processed_plain_text = "".join(combined_text) logger.debug(f"整合了{len(combined_text)-1}条F消息的内容到当前消息") @@ -145,6 +150,22 @@ class MassageBuffer: except asyncio.TimeoutError: logger.debug(f"查询超时消息id: {message.message_info.message_id}") return False + + async def save_message_interval(self, person_id:str, message:BaseMessageInfo): + message_interval_list = await person_info_manager.get_value(person_id, "msg_interval_list") + now_time_ms = int(round(time.time() * 1000)) + if len(message_interval_list) < 1000: + message_interval_list.append(now_time_ms) + else: + message_interval_list = message_interval_list.pop(0) + message_interval_list.append(now_time_ms) + data = { + "platform" : message.platform, + "user_id" : message.user_info.user_id, + "nickname" : message.user_info.user_nickname, + "konw_time" : int(time.time()) + } + await person_info_manager.update_one_field(person_id, "msg_interval_list", message_interval_list, data) message_buffer = MassageBuffer() \ No newline at end of file diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py index de034e25b..8f5322e22 100644 --- a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py +++ b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py @@ -198,7 +198,12 @@ class ThinkFlowChat: # 查询缓冲器结果,会整合前面跳过的消息,改变processed_plain_text buffer_result = await message_buffer.query_buffer_result(message) if not buffer_result: - logger.info(f"触发缓冲,已炸飞消息:{message.processed_plain_text}") + if message.message_segment.type == "text": + logger.info(f"触发缓冲,已炸飞消息:{message.processed_plain_text}") + elif message.message_segment.type == "image": + logger.info(f"触发缓冲,已炸飞表情包/图片") + elif message.message_segment.type == "seglist": + logger.info(f"触发缓冲,已炸飞消息列") return is_mentioned = is_mentioned_bot_in_message(message) diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py index 3373366a0..20ab2db8b 100644 --- a/src/plugins/person_info/person_info.py +++ b/src/plugins/person_info/person_info.py @@ -4,6 +4,7 @@ import copy import hashlib from typing import Any, Callable, Dict, TypeVar T = TypeVar('T') # 泛型类型 +import datetime """ PersonInfoManager 类方法功能摘要: @@ -15,6 +16,7 @@ PersonInfoManager 类方法功能摘要: 6. get_values - 批量获取字段值(任一字段无效则返回空字典) 7. del_all_undefined_field - 清理全集合中未定义的字段 8. get_specific_value_list - 根据指定条件,返回person_id,value字典 +9. personal_habit_deduction - 定时推断个人习惯 """ logger = get_module_logger("person_info") @@ -30,11 +32,13 @@ person_info_default = { # "impression" : None, # "gender" : Unkown, "konw_time" : 0, - "msg_interval": 3000 + "msg_interval": 3000, + "msg_interval_list": [] } # 个人信息的各项与默认值在此定义,以下处理会自动创建/补全每一项 class PersonInfoManager: def __init__(self): + self.start_time = datetime.datetime.now() if "person_info" not in db.list_collection_names(): db.create_collection("person_info") db.person_info.create_index("person_id", unique=True) @@ -109,8 +113,9 @@ class PersonInfoManager: if document and field_name in document: return document[field_name] else: - logger.debug(f"获取{person_id}的{field_name}失败,已返回默认值{person_info_default[field_name]}") - return person_info_default[field_name] + default_value = copy.deepcopy(person_info_default[field_name]) + logger.debug(f"获取{person_id}的{field_name}失败,已返回默认值{default_value}") + return default_value async def get_values(self, person_id: str, field_names: list) -> dict: """获取指定person_id文档的多个字段值,若不存在该字段,则返回该字段的全局默认值""" @@ -134,7 +139,10 @@ class PersonInfoManager: result = {} for field in field_names: - result[field] = document.get(field, person_info_default[field]) if document else person_info_default[field] + result[field] = copy.deepcopy( + document.get(field, person_info_default[field]) + if document else person_info_default[field] + ) return result @@ -210,5 +218,36 @@ class PersonInfoManager: except Exception as e: logger.error(f"数据库查询失败: {str(e)}", exc_info=True) return {} + + async def personal_habit_deduction(self): + """启动个人信息推断,每天根据一定条件推断一次""" + try: + logger.info(f"个人信息推断启动: {self.start_time.strftime('%Y-%m-%d %H:%M:%S')}") + # # 初始化日程 + # await self.check_and_create_today_schedule() + # self.print_schedule() + + # while True: + # # print(self.get_current_num_task(1, True)) + + # current_time = datetime.datetime.now() + + # # 检查是否需要重新生成日程(日期变化) + # if current_time.date() != self.start_time.date(): + # logger.info("检测到日期变化,重新生成日程") + # self.start_time = current_time + # await self.check_and_create_today_schedule() + # self.print_schedule() + + # # 执行当前活动 + # # mind_thinking = heartflow.current_state.current_mind + + # await self.move_doing() + + # await asyncio.sleep(self.schedule_doing_update_interval) + + except Exception as e: + logger.error(f"个人信息推断运行时出错: {str(e)}") + logger.exception("详细错误信息:") person_info_manager = PersonInfoManager() \ No newline at end of file From 87f3fc7b33daebcd3c9335a7a37a4c3794e2f79a Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Fri, 4 Apr 2025 14:22:26 +0800 Subject: [PATCH 04/51] =?UTF-8?q?=E6=9B=B4=E6=94=B9=E5=A4=84=E7=90=86?= =?UTF-8?q?=E4=B8=AA=E4=BA=BA=E5=A4=8D=E5=90=88=E6=B6=88=E6=81=AF=E7=9A=84?= =?UTF-8?q?=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/message_buffer.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py index 9919e6cf7..a7be8abcd 100644 --- a/src/plugins/chat/message_buffer.py +++ b/src/plugins/chat/message_buffer.py @@ -122,11 +122,11 @@ class MassageBuffer: keep_msgs = OrderedDict() combined_text = [] found = False - is_text = False + type = "text" for msg_id, msg in self.buffer_pool[person_id_].items(): if msg_id == message.message_info.message_id: found = True - is_text = msg.message.message_segment.type == "text" + type = msg.message.message_segment.type combined_text.append(msg.message.processed_plain_text) continue if found: @@ -141,9 +141,15 @@ class MassageBuffer: logger.debug(f"异常未处理信息id: {msg.message.message_info.message_id}") # 更新当前消息的processed_plain_text - if combined_text and combined_text[0] != message.processed_plain_text and is_text: - message.processed_plain_text = "".join(combined_text) - logger.debug(f"整合了{len(combined_text)-1}条F消息的内容到当前消息") + if combined_text and combined_text[0] != message.processed_plain_text: + if type == "text": + message.processed_plain_text = "".join(combined_text) + logger.debug(f"整合了{len(combined_text)-1}条F消息的内容到当前消息") + elif type == "image": + combined_text.pop() + message.processed_plain_text = "".join(combined_text) + message.is_emoji = False + logger.debug(f"整合了{len(combined_text)-1}条F消息的内容,覆盖当前image消息") self.buffer_pool[person_id_] = keep_msgs return result From 2746072edbe5f4dbcb9fee11a813b49549a9618f Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Fri, 4 Apr 2025 16:54:58 +0800 Subject: [PATCH 05/51] =?UTF-8?q?=E5=AE=8C=E6=88=90=E6=B6=88=E6=81=AF?= =?UTF-8?q?=E9=97=B4=E9=9A=94=E6=8E=A8=E6=96=AD=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/main.py | 2 +- src/plugins/chat/message_buffer.py | 2 +- src/plugins/person_info/person_info.py | 70 +++++++++++++++++++------- 3 files changed, 54 insertions(+), 20 deletions(-) diff --git a/src/main.py b/src/main.py index 932fbfcfe..9ab75f46a 100644 --- a/src/main.py +++ b/src/main.py @@ -58,7 +58,7 @@ class MainSystem: # 检查并清除person_info冗余字段,启动个人习惯推断 await person_info_manager.del_all_undefined_field() - # asyncio.create_task(person_info_manager.personal_habit_deduction()) + asyncio.create_task(person_info_manager.personal_habit_deduction()) # 启动愿望管理器 await willing_manager.ensure_started() diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py index a7be8abcd..c1bac3b14 100644 --- a/src/plugins/chat/message_buffer.py +++ b/src/plugins/chat/message_buffer.py @@ -163,7 +163,7 @@ class MassageBuffer: if len(message_interval_list) < 1000: message_interval_list.append(now_time_ms) else: - message_interval_list = message_interval_list.pop(0) + message_interval_list.pop(0) message_interval_list.append(now_time_ms) data = { "platform" : message.platform, diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py index 20ab2db8b..7df27b632 100644 --- a/src/plugins/person_info/person_info.py +++ b/src/plugins/person_info/person_info.py @@ -5,6 +5,11 @@ import hashlib from typing import Any, Callable, Dict, TypeVar T = TypeVar('T') # 泛型类型 import datetime +import asyncio +import numpy + +import matplotlib.pyplot as plt +from pathlib import Path """ PersonInfoManager 类方法功能摘要: @@ -38,7 +43,6 @@ person_info_default = { class PersonInfoManager: def __init__(self): - self.start_time = datetime.datetime.now() if "person_info" not in db.list_collection_names(): db.create_collection("person_info") db.person_info.create_index("person_id", unique=True) @@ -222,29 +226,59 @@ class PersonInfoManager: async def personal_habit_deduction(self): """启动个人信息推断,每天根据一定条件推断一次""" try: - logger.info(f"个人信息推断启动: {self.start_time.strftime('%Y-%m-%d %H:%M:%S')}") - # # 初始化日程 - # await self.check_and_create_today_schedule() - # self.print_schedule() + while(1): + await asyncio.sleep(60) + current_time = datetime.datetime.now() + logger.info(f"个人信息推断启动: {current_time.strftime('%Y-%m-%d %H:%M:%S')}") - # while True: - # # print(self.get_current_num_task(1, True)) + # "msg_interval"推断 + msg_interval_lists = await self.get_specific_value_list( + "msg_interval_list", + lambda x: isinstance(x, list) and len(x) >= 100 + ) + for person_id, msg_interval_list_ in msg_interval_lists.items(): + try: + time_interval = [] + for t1, t2 in zip(msg_interval_list_, msg_interval_list_[1:]): + delta = t2 - t1 + if delta < 6000: # 小于6秒 + time_interval.append(delta) - # current_time = datetime.datetime.now() + if len(time_interval) > 30: + time_interval.sort() - # # 检查是否需要重新生成日程(日期变化) - # if current_time.date() != self.start_time.date(): - # logger.info("检测到日期变化,重新生成日程") - # self.start_time = current_time - # await self.check_and_create_today_schedule() - # self.print_schedule() + # 画图(log) + log_dir = Path("logs/person_info") + log_dir.mkdir(parents=True, exist_ok=True) + plt.figure(figsize=(10, 6)) - # # 执行当前活动 - # # mind_thinking = heartflow.current_state.current_mind + plt.hist(time_interval, bins=30, density=True, alpha=0.5, color='g') - # await self.move_doing() + plt.grid(True, alpha=0.3) - # await asyncio.sleep(self.schedule_doing_update_interval) + plt.title(f"Message Interval Density (User: {person_id[:8]}...)") + plt.xlabel("Interval (ms)") + plt.ylabel("Density") + + img_path = log_dir / f"interval_density_{person_id[:8]}.png" + plt.savefig(img_path) + plt.close() + logger.info(f"已保存分布图到: {img_path}") + # 画图 + + filtered_intervals = [t for t in time_interval if t >= 500] + if len(filtered_intervals) > 25: + msg_interval = numpy.percentile(filtered_intervals, 90) + await self.update_one_field(person_id, "msg_interval", int(msg_interval)) + logger.debug(f"用户{person_id}的msg_interval已经被更新为{msg_interval}") + except Exception as e: + logger.debug(f"处理用户{person_id}msg_interval推断时出错: {str(e)}") + continue + + # 其他... + + logger.info(f"个人信息推断结束: {current_time.strftime('%Y-%m-%d %H:%M:%S')}") + await asyncio.sleep(86400) except Exception as e: logger.error(f"个人信息推断运行时出错: {str(e)}") From 280e488f713efca4c63eb036283801e92ef312f4 Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Fri, 4 Apr 2025 17:56:49 +0800 Subject: [PATCH 06/51] =?UTF-8?q?=E4=BC=98=E9=9B=85=E7=9A=84=E7=94=BB?= =?UTF-8?q?=E5=9B=BE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/person_info/person_info.py | 27 ++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py index 7df27b632..8f5949c5e 100644 --- a/src/plugins/person_info/person_info.py +++ b/src/plugins/person_info/person_info.py @@ -10,6 +10,7 @@ import numpy import matplotlib.pyplot as plt from pathlib import Path +import pandas as pd """ PersonInfoManager 类方法功能摘要: @@ -232,6 +233,7 @@ class PersonInfoManager: logger.info(f"个人信息推断启动: {current_time.strftime('%Y-%m-%d %H:%M:%S')}") # "msg_interval"推断 + msg_interval_map = False msg_interval_lists = await self.get_specific_value_list( "msg_interval_list", lambda x: isinstance(x, list) and len(x) >= 100 @@ -241,35 +243,42 @@ class PersonInfoManager: time_interval = [] for t1, t2 in zip(msg_interval_list_, msg_interval_list_[1:]): delta = t2 - t1 - if delta < 6000: # 小于6秒 + if delta < 6000 and delta > 0: # 小于6秒 time_interval.append(delta) if len(time_interval) > 30: time_interval.sort() # 画图(log) + msg_interval_map = True log_dir = Path("logs/person_info") log_dir.mkdir(parents=True, exist_ok=True) plt.figure(figsize=(10, 6)) - plt.hist(time_interval, bins=30, density=True, alpha=0.5, color='g') + time_series = pd.Series(time_interval) - plt.grid(True, alpha=0.3) + # 绘制直方图 + plt.hist(time_series, bins=50, density=True, alpha=0.4, color='pink', label='Histogram') - plt.title(f"Message Interval Density (User: {person_id[:8]}...)") + # 绘制KDE曲线(使用相同的实际数据) + time_series.plot(kind='kde', color='mediumpurple', linewidth=1, label='Density') + + plt.grid(True, alpha=0.2) + plt.xlim(0, 6000) + plt.title(f"Message Interval Distribution (User: {person_id[:8]}...)") plt.xlabel("Interval (ms)") plt.ylabel("Density") + plt.legend(framealpha=0.9, facecolor='white') - img_path = log_dir / f"interval_density_{person_id[:8]}.png" + img_path = log_dir / f"interval_distribution_{person_id[:8]}.png" plt.savefig(img_path) plt.close() - logger.info(f"已保存分布图到: {img_path}") # 画图 filtered_intervals = [t for t in time_interval if t >= 500] if len(filtered_intervals) > 25: - msg_interval = numpy.percentile(filtered_intervals, 90) - await self.update_one_field(person_id, "msg_interval", int(msg_interval)) + msg_interval = int(round(numpy.percentile(filtered_intervals, 90))) + await self.update_one_field(person_id, "msg_interval", msg_interval) logger.debug(f"用户{person_id}的msg_interval已经被更新为{msg_interval}") except Exception as e: logger.debug(f"处理用户{person_id}msg_interval推断时出错: {str(e)}") @@ -277,6 +286,8 @@ class PersonInfoManager: # 其他... + if msg_interval_map: + logger.info(f"已保存分布图到: logs/person_info") logger.info(f"个人信息推断结束: {current_time.strftime('%Y-%m-%d %H:%M:%S')}") await asyncio.sleep(86400) From 80759f3fa166891986612cd89e653940082209e2 Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Fri, 4 Apr 2025 18:00:12 +0800 Subject: [PATCH 07/51] =?UTF-8?q?=E9=80=82=E9=85=8D=E6=8E=A8=E7=90=86?= =?UTF-8?q?=E6=A8=A1=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../chat_module/reasoning_chat/reasoning_chat.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py index 0163a306e..677baac52 100644 --- a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py +++ b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py @@ -17,6 +17,7 @@ from ...message import UserInfo, Seg from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig from ...chat.chat_stream import chat_manager from ...person_info.relationship_manager import relationship_manager +from ...chat.message_buffer import message_buffer # 定义日志配置 chat_config = LogConfig( @@ -143,6 +144,8 @@ class ReasoningChat: userinfo = message.message_info.user_info messageinfo = message.message_info + # 消息加入缓冲池 + await message_buffer.start_caching_messages(message) # logger.info("使用推理聊天模式") @@ -172,6 +175,17 @@ class ReasoningChat: timer2 = time.time() timing_results["记忆激活"] = timer2 - timer1 + # 查询缓冲器结果,会整合前面跳过的消息,改变processed_plain_text + buffer_result = await message_buffer.query_buffer_result(message) + if not buffer_result: + if message.message_segment.type == "text": + logger.info(f"触发缓冲,已炸飞消息:{message.processed_plain_text}") + elif message.message_segment.type == "image": + logger.info(f"触发缓冲,已炸飞表情包/图片") + elif message.message_segment.type == "seglist": + logger.info(f"触发缓冲,已炸飞消息列") + return + is_mentioned = is_mentioned_bot_in_message(message) # 计算回复意愿 From 132861d16b1de3c62e8e62474d7a8f4f564dff80 Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Fri, 4 Apr 2025 18:24:43 +0800 Subject: [PATCH 08/51] ruff --- src/plugins/chat/message_buffer.py | 8 ++++---- src/plugins/chat_module/reasoning_chat/reasoning_chat.py | 4 ++-- .../chat_module/think_flow_chat/think_flow_chat.py | 4 ++-- src/plugins/person_info/person_info.py | 3 +-- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py index c1bac3b14..d42fc9973 100644 --- a/src/plugins/chat/message_buffer.py +++ b/src/plugins/chat/message_buffer.py @@ -6,7 +6,7 @@ from .message import MessageRecv from ..message.message_base import BaseMessageInfo import hashlib from typing import Dict -from dataclasses import dataclass, field +from dataclasses import field from collections import OrderedDict import random import time @@ -20,7 +20,7 @@ class CacheMessages: result: str = "U" -class MassageBuffer: +class MessageBuffer: def __init__(self): self.buffer_pool: Dict[str, OrderedDict[str, CacheMessages]] = {} self.lock = asyncio.Lock() @@ -59,7 +59,7 @@ class MassageBuffer: return # 标记该用户之前的未处理消息 - for msg_id, cache_msg in self.buffer_pool[person_id_].items(): + for cache_msg in self.buffer_pool[person_id_].values: if cache_msg.result == "U": cache_msg.result = "F" cache_msg.cache_determination.set() @@ -174,4 +174,4 @@ class MassageBuffer: await person_info_manager.update_one_field(person_id, "msg_interval_list", message_interval_list, data) -message_buffer = MassageBuffer() \ No newline at end of file +message_buffer = MessageBuffer() \ No newline at end of file diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py index 677baac52..13f055185 100644 --- a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py +++ b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py @@ -181,9 +181,9 @@ class ReasoningChat: if message.message_segment.type == "text": logger.info(f"触发缓冲,已炸飞消息:{message.processed_plain_text}") elif message.message_segment.type == "image": - logger.info(f"触发缓冲,已炸飞表情包/图片") + logger.info("触发缓冲,已炸飞表情包/图片") elif message.message_segment.type == "seglist": - logger.info(f"触发缓冲,已炸飞消息列") + logger.info("触发缓冲,已炸飞消息列") return is_mentioned = is_mentioned_bot_in_message(message) diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py index 0f8d3298b..42e2b8e43 100644 --- a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py +++ b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py @@ -204,9 +204,9 @@ class ThinkFlowChat: if message.message_segment.type == "text": logger.info(f"触发缓冲,已炸飞消息:{message.processed_plain_text}") elif message.message_segment.type == "image": - logger.info(f"触发缓冲,已炸飞表情包/图片") + logger.info("触发缓冲,已炸飞表情包/图片") elif message.message_segment.type == "seglist": - logger.info(f"触发缓冲,已炸飞消息列") + logger.info("触发缓冲,已炸飞消息列") return is_mentioned = is_mentioned_bot_in_message(message) diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py index 8f5949c5e..3cbccbe8a 100644 --- a/src/plugins/person_info/person_info.py +++ b/src/plugins/person_info/person_info.py @@ -3,15 +3,14 @@ from ...common.database import db import copy import hashlib from typing import Any, Callable, Dict, TypeVar -T = TypeVar('T') # 泛型类型 import datetime import asyncio import numpy - import matplotlib.pyplot as plt from pathlib import Path import pandas as pd + """ PersonInfoManager 类方法功能摘要: 1. get_person_id - 根据平台和用户ID生成MD5哈希的唯一person_id From 68014de2723f5df8e254d081437fe7576a7420ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=A6=E6=BA=AA=E7=95=94?= <130263765+na10xi27da@users.noreply.github.com> Date: Fri, 4 Apr 2025 18:27:10 +0800 Subject: [PATCH 09/51] ruff --- src/plugins/person_info/person_info.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py index 3cbccbe8a..9bb5408e4 100644 --- a/src/plugins/person_info/person_info.py +++ b/src/plugins/person_info/person_info.py @@ -2,7 +2,7 @@ from src.common.logger import get_module_logger from ...common.database import db import copy import hashlib -from typing import Any, Callable, Dict, TypeVar +from typing import Any, Callable, Dict import datetime import asyncio import numpy @@ -286,7 +286,7 @@ class PersonInfoManager: # 其他... if msg_interval_map: - logger.info(f"已保存分布图到: logs/person_info") + logger.info("已保存分布图到: logs/person_info") logger.info(f"个人信息推断结束: {current_time.strftime('%Y-%m-%d %H:%M:%S')}") await asyncio.sleep(86400) @@ -294,4 +294,4 @@ class PersonInfoManager: logger.error(f"个人信息推断运行时出错: {str(e)}") logger.exception("详细错误信息:") -person_info_manager = PersonInfoManager() \ No newline at end of file +person_info_manager = PersonInfoManager() From 9ba6c8dacd6c0f30112be241625976e692728c6e Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Fri, 4 Apr 2025 20:31:26 +0800 Subject: [PATCH 10/51] debug... --- src/plugins/chat/message_buffer.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py index d42fc9973..e139a122b 100644 --- a/src/plugins/chat/message_buffer.py +++ b/src/plugins/chat/message_buffer.py @@ -1,12 +1,11 @@ from ..person_info.person_info import person_info_manager from src.common.logger import get_module_logger import asyncio -from dataclasses import dataclass +from dataclasses import dataclass, field from .message import MessageRecv from ..message.message_base import BaseMessageInfo import hashlib from typing import Dict -from dataclasses import field from collections import OrderedDict import random import time @@ -59,7 +58,7 @@ class MessageBuffer: return # 标记该用户之前的未处理消息 - for cache_msg in self.buffer_pool[person_id_].values: + for cache_msg in self.buffer_pool[person_id_].values(): if cache_msg.result == "U": cache_msg.result = "F" cache_msg.cache_determination.set() From 72d1ff7cd91741161b33f2c0aeadaf4b7be9550c Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 4 Apr 2025 21:15:14 +0800 Subject: [PATCH 11/51] =?UTF-8?q?fix=EF=BC=9A=E5=B0=9D=E8=AF=95=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E6=A8=A1=E5=9E=8B=E8=AF=B7=E6=B1=82=E5=A4=B1=E8=B4=A5?= =?UTF-8?q?=E7=82=B8=E5=BE=AA=E7=8E=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/message/api.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/plugins/message/api.py b/src/plugins/message/api.py index a29ce429e..855ff8b95 100644 --- a/src/plugins/message/api.py +++ b/src/plugins/message/api.py @@ -29,7 +29,10 @@ class BaseMessageHandler: try: tasks.append(handler(message)) except Exception as e: - raise RuntimeError(str(e)) from e + logger.error(f"消息处理出错: {str(e)}") + logger.error(traceback.format_exc()) + # 不抛出异常,而是记录错误并继续处理其他消息 + continue if tasks: await asyncio.gather(*tasks, return_exceptions=True) From 34f9bc64b232475dd5792d77be162124245bcc5b Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 4 Apr 2025 21:21:45 +0800 Subject: [PATCH 12/51] =?UTF-8?q?fix=EF=BC=9APFC=E8=AF=B4=E5=87=BA?= =?UTF-8?q?=E4=B8=8D=E8=AF=9D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/PFC/pfc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/PFC/pfc.py b/src/plugins/PFC/pfc.py index 667a6f035..405ca02dc 100644 --- a/src/plugins/PFC/pfc.py +++ b/src/plugins/PFC/pfc.py @@ -821,7 +821,7 @@ class DirectMessageSender: if not end_point: raise ValueError(f"未找到平台:{chat_stream.platform} 的url配置") - await global_api.send_message(end_point, message_json) + await global_api.send_message_REST(end_point, message_json) # 存储消息 await self.storage.store_message(message, message.chat_stream) From d108a9ab2aaa8c808efe7b714d974aadf8d20e57 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 4 Apr 2025 21:26:48 +0800 Subject: [PATCH 13/51] Update bot.py --- src/plugins/chat/bot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py index 68afd2e76..cd5b758f7 100644 --- a/src/plugins/chat/bot.py +++ b/src/plugins/chat/bot.py @@ -96,11 +96,11 @@ class ChatBot: await self._create_PFC_chat(message) else: if groupinfo.group_id in global_config.talk_allowed_groups: - logger.debug(f"开始群聊模式{message_data}") + logger.debug(f"开始群聊模式{str(message_data)[:50]}...") if global_config.response_mode == "heart_flow": await self.think_flow_chat.process_message(message_data) elif global_config.response_mode == "reasoning": - logger.debug(f"开始推理模式{message_data}") + logger.debug(f"开始推理模式{str(message_data)[:50]}...") await self.reasoning_chat.process_message(message_data) else: logger.error(f"未知的回复模式,请检查配置文件!!: {global_config.response_mode}") From 9d74e28e1b054b0be3dcf804538df9293c5a0717 Mon Sep 17 00:00:00 2001 From: undefined Date: Fri, 4 Apr 2025 16:41:36 +0100 Subject: [PATCH 14/51] flake: add scipy to dependencies scipy relies on libstdc++.so and cannot be used directly with venv in nix environment. adding it to buildInputs solves this issue. --- flake.nix | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/flake.nix b/flake.nix index 404f7555c..23b82bb77 100644 --- a/flake.nix +++ b/flake.nix @@ -18,10 +18,11 @@ devShells.default = pkgs.mkShell { name = "python-venv"; venvDir = "./.venv"; - buildInputs = [ - pythonPackages.python - pythonPackages.venvShellHook - pythonPackages.numpy + buildInputs = with pythonPackages; [ + python + venvShellHook + scipy + numpy ]; postVenvCreation = '' @@ -35,4 +36,4 @@ ''; }; }); -} \ No newline at end of file +} From 6e8953437647478cfdb1a5a7fba9848fbc8892d2 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Apr 2025 00:11:28 +0800 Subject: [PATCH 15/51] =?UTF-8?q?fix=EF=BC=9Apfc=E5=A4=9A=E9=87=8D?= =?UTF-8?q?=E5=AD=98=E5=9C=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/PFC/chat_observer.py | 9 + src/plugins/PFC/pfc.py | 250 ++++++++---------- src/plugins/PFC/pfc_utils.py | 72 +++++ src/plugins/chat/bot.py | 15 +- .../reasoning_prompt_builder.py | 3 +- .../think_flow_prompt_builder.py | 2 +- 6 files changed, 204 insertions(+), 147 deletions(-) create mode 100644 src/plugins/PFC/pfc_utils.py diff --git a/src/plugins/PFC/chat_observer.py b/src/plugins/PFC/chat_observer.py index 4fa6951e2..6781145b6 100644 --- a/src/plugins/PFC/chat_observer.py +++ b/src/plugins/PFC/chat_observer.py @@ -57,6 +57,15 @@ class ChatObserver: self._update_event = asyncio.Event() # 触发更新的事件 self._update_complete = asyncio.Event() # 更新完成的事件 + def check(self) -> bool: + """检查距离上一次观察之后是否有了新消息 + + Returns: + bool: 是否有新消息 + """ + return self.new_message_after(self.last_check_time) + + def new_message_after(self, time_point: float) -> bool: """判断是否在指定时间点后有新消息 diff --git a/src/plugins/PFC/pfc.py b/src/plugins/PFC/pfc.py index 405ca02dc..db92fd80a 100644 --- a/src/plugins/PFC/pfc.py +++ b/src/plugins/PFC/pfc.py @@ -17,6 +17,7 @@ from ..storage.storage import MessageStorage from .chat_observer import ChatObserver from .pfc_KnowledgeFetcher import KnowledgeFetcher from .reply_checker import ReplyChecker +from .pfc_utils import get_items_from_json import json import time @@ -128,43 +129,18 @@ judge_conversation: 判断对话是否结束,当发现对话目标已经达到 content, _ = await self.llm.generate_response_async(prompt) logger.debug(f"LLM原始返回内容: {content}") - # 清理内容,尝试提取JSON部分 - content = content.strip() - try: - # 尝试直接解析 - result = json.loads(content) - except json.JSONDecodeError: - # 如果直接解析失败,尝试查找和提取JSON部分 - import re - json_pattern = r'\{[^{}]*\}' - json_match = re.search(json_pattern, content) - if json_match: - try: - result = json.loads(json_match.group()) - except json.JSONDecodeError: - logger.error("提取的JSON内容解析失败,返回默认行动") - return "direct_reply", "JSON解析失败,选择直接回复" - else: - # 如果找不到JSON,尝试从文本中提取行动和原因 - if "direct_reply" in content.lower(): - return "direct_reply", "从文本中提取的行动" - elif "fetch_knowledge" in content.lower(): - return "fetch_knowledge", "从文本中提取的行动" - elif "wait" in content.lower(): - return "wait", "从文本中提取的行动" - elif "listening" in content.lower(): - return "listening", "从文本中提取的行动" - elif "rethink_goal" in content.lower(): - return "rethink_goal", "从文本中提取的行动" - elif "judge_conversation" in content.lower(): - return "judge_conversation", "从文本中提取的行动" - else: - logger.error("无法从返回内容中提取行动类型") - return "direct_reply", "无法解析响应,选择直接回复" + # 使用简化函数提取JSON内容 + success, result = get_items_from_json( + content, + "action", "reason", + default_values={"action": "direct_reply", "reason": "默认原因"} + ) - # 验证JSON字段 - action = result.get("action", "direct_reply") - reason = result.get("reason", "默认原因") + if not success: + return "direct_reply", "JSON解析失败,选择直接回复" + + action = result["action"] + reason = result["reason"] # 验证action类型 if action not in ["direct_reply", "fetch_knowledge", "wait", "listening", "rethink_goal", "judge_conversation"]: @@ -195,6 +171,8 @@ class GoalAnalyzer: self.name = global_config.BOT_NICKNAME self.nick_name = global_config.BOT_ALIAS_NAMES self.chat_observer = ChatObserver.get_instance(stream_id) + + self.current_goal_and_reason = None async def analyze_goal(self) -> Tuple[str, str, str]: """分析对话历史并设定目标 @@ -239,48 +217,20 @@ class GoalAnalyzer: content, _ = await self.llm.generate_response_async(prompt) logger.debug(f"LLM原始返回内容: {content}") - # 清理和验证返回内容 - if not content or not isinstance(content, str): - logger.error("LLM返回内容为空或格式不正确") - continue - - # 尝试提取JSON部分 - content = content.strip() - try: - # 尝试直接解析 - result = json.loads(content) - except json.JSONDecodeError: - # 如果直接解析失败,尝试查找和提取JSON部分 - import re - json_pattern = r'\{[^{}]*\}' - json_match = re.search(json_pattern, content) - if json_match: - try: - result = json.loads(json_match.group()) - except json.JSONDecodeError: - logger.error(f"提取的JSON内容解析失败,重试第{retry + 1}次") - continue - else: - logger.error(f"无法在返回内容中找到有效的JSON,重试第{retry + 1}次") - continue + # 使用简化函数提取JSON内容 + success, result = get_items_from_json( + content, + "goal", "reasoning", + required_types={"goal": str, "reasoning": str} + ) - # 验证JSON字段 - if not all(key in result for key in ["goal", "reasoning"]): - logger.error(f"JSON缺少必要字段,实际内容: {result},重试第{retry + 1}次") + if not success: + logger.error(f"无法解析JSON,重试第{retry + 1}次") continue goal = result["goal"] reasoning = result["reasoning"] - # 验证字段内容 - if not isinstance(goal, str) or not isinstance(reasoning, str): - logger.error(f"JSON字段类型错误,goal和reasoning必须是字符串,重试第{retry + 1}次") - continue - - if not goal.strip() or not reasoning.strip(): - logger.error(f"JSON字段内容为空,重试第{retry + 1}次") - continue - # 使用默认的方法 method = "以友好的态度回应" return goal, method, reasoning @@ -330,58 +280,21 @@ class GoalAnalyzer: content, _ = await self.llm.generate_response_async(prompt) logger.debug(f"LLM原始返回内容: {content}") - # 清理和验证返回内容 - if not content or not isinstance(content, str): - logger.error("LLM返回内容为空或格式不正确") - return False, False, "确保对话顺利进行" - - # 尝试提取JSON部分 - content = content.strip() - try: - # 尝试直接解析 - result = json.loads(content) - except json.JSONDecodeError: - # 如果直接解析失败,尝试查找和提取JSON部分 - import re - json_pattern = r'\{[^{}]*\}' - json_match = re.search(json_pattern, content) - if json_match: - try: - result = json.loads(json_match.group()) - except json.JSONDecodeError as e: - logger.error(f"提取的JSON内容解析失败: {e}") - return False, False, "确保对话顺利进行" - else: - logger.error("无法在返回内容中找到有效的JSON") - return False, False, "确保对话顺利进行" + # 使用简化函数提取JSON内容 + success, result = get_items_from_json( + content, + "goal_achieved", "stop_conversation", "reason", + required_types={ + "goal_achieved": bool, + "stop_conversation": bool, + "reason": str + } + ) - # 验证JSON字段 - if not all(key in result for key in ["goal_achieved", "stop_conversation", "reason"]): - logger.error(f"JSON缺少必要字段,实际内容: {result}") - return False, False, "确保对话顺利进行" - - goal_achieved = result["goal_achieved"] - stop_conversation = result["stop_conversation"] - reason = result["reason"] - - # 验证字段类型 - if not isinstance(goal_achieved, bool): - logger.error("goal_achieved 必须是布尔值") - return False, False, "确保对话顺利进行" - - if not isinstance(stop_conversation, bool): - logger.error("stop_conversation 必须是布尔值") - return False, False, "确保对话顺利进行" - - if not isinstance(reason, str): - logger.error("reason 必须是字符串") - return False, False, "确保对话顺利进行" - - if not reason.strip(): - logger.error("reason 不能为空") + if not success: return False, False, "确保对话顺利进行" - return goal_achieved, stop_conversation, reason + return result["goal_achieved"], result["stop_conversation"], result["reason"] except Exception as e: logger.error(f"分析对话目标时出错: {str(e)}") @@ -536,25 +449,66 @@ class ReplyGenerator: class Conversation: # 类级别的实例管理 _instances: Dict[str, 'Conversation'] = {} + _instance_lock = asyncio.Lock() # 类级别的全局锁 + _init_events: Dict[str, asyncio.Event] = {} # 初始化完成事件 + _initializing: Dict[str, bool] = {} # 标记是否正在初始化 @classmethod - def get_instance(cls, stream_id: str) -> 'Conversation': - """获取或创建对话实例""" - if stream_id not in cls._instances: - cls._instances[stream_id] = cls(stream_id) - logger.info(f"创建新的对话实例: {stream_id}") - return cls._instances[stream_id] + async def get_instance(cls, stream_id: str) -> Optional['Conversation']: + """获取或创建对话实例 + + Args: + stream_id: 聊天流ID + + Returns: + Optional[Conversation]: 对话实例,如果创建或等待失败则返回None + """ + try: + # 使用全局锁来确保线程安全 + async with cls._instance_lock: + # 如果已经在初始化中,等待初始化完成 + if stream_id in cls._initializing and cls._initializing[stream_id]: + # 释放锁等待初始化 + cls._instance_lock.release() + try: + await asyncio.wait_for(cls._init_events[stream_id].wait(), timeout=5.0) + except asyncio.TimeoutError: + logger.error(f"等待实例 {stream_id} 初始化超时") + return None + finally: + await cls._instance_lock.acquire() + + # 如果实例不存在,创建新实例 + if stream_id not in cls._instances: + cls._instances[stream_id] = cls(stream_id) + cls._init_events[stream_id] = asyncio.Event() + cls._initializing[stream_id] = True + logger.info(f"创建新的对话实例: {stream_id}") + + return cls._instances[stream_id] + except Exception as e: + logger.error(f"获取对话实例失败: {e}") + return None @classmethod - def remove_instance(cls, stream_id: str): - """删除对话实例""" - if stream_id in cls._instances: - # 停止相关组件 - instance = cls._instances[stream_id] - instance.chat_observer.stop() - # 删除实例 - del cls._instances[stream_id] - logger.info(f"已删除对话实例 {stream_id}") + async def remove_instance(cls, stream_id: str): + """删除对话实例 + + Args: + stream_id: 聊天流ID + """ + async with cls._instance_lock: + if stream_id in cls._instances: + # 停止相关组件 + instance = cls._instances[stream_id] + instance.chat_observer.stop() + # 删除实例 + del cls._instances[stream_id] + if stream_id in cls._init_events: + del cls._init_events[stream_id] + if stream_id in cls._initializing: + del cls._initializing[stream_id] + logger.info(f"已删除对话实例 {stream_id}") def __init__(self, stream_id: str): """初始化对话系统""" @@ -592,13 +546,21 @@ class Conversation: async def start(self): """开始对话流程""" - logger.info("对话系统启动") - self.should_continue = True - self.chat_observer.start() # 启动观察器 - await asyncio.sleep(1) - # 启动对话循环 - await self._conversation_loop() - + try: + logger.info("对话系统启动") + self.should_continue = True + self.chat_observer.start() # 启动观察器 + await asyncio.sleep(1) + # 启动对话循环 + await self._conversation_loop() + except Exception as e: + logger.error(f"启动对话系统失败: {e}") + raise + finally: + # 标记初始化完成 + self._init_events[self.stream_id].set() + self._initializing[self.stream_id] = False + async def _conversation_loop(self): """对话循环""" # 获取最近的消息历史 @@ -724,7 +686,7 @@ class Conversation: self.should_continue = False self.state = ConversationState.ENDED # 删除实例(这会同时停止chat_observer) - self.remove_instance(self.stream_id) + await self.remove_instance(self.stream_id) async def _send_timeout_message(self): """发送超时结束消息""" diff --git a/src/plugins/PFC/pfc_utils.py b/src/plugins/PFC/pfc_utils.py new file mode 100644 index 000000000..2b94e6c4d --- /dev/null +++ b/src/plugins/PFC/pfc_utils.py @@ -0,0 +1,72 @@ +import json +import re +from typing import Dict, Any, Optional, List, Tuple, Union +from src.common.logger import get_module_logger + +logger = get_module_logger("pfc_utils") + +def get_items_from_json( + content: str, + *items: str, + default_values: Optional[Dict[str, Any]] = None, + required_types: Optional[Dict[str, type]] = None +) -> Tuple[bool, Dict[str, Any]]: + """从文本中提取JSON内容并获取指定字段 + + Args: + content: 包含JSON的文本 + *items: 要提取的字段名 + default_values: 字段的默认值,格式为 {字段名: 默认值} + required_types: 字段的必需类型,格式为 {字段名: 类型} + + Returns: + Tuple[bool, Dict[str, Any]]: (是否成功, 提取的字段字典) + """ + content = content.strip() + result = {} + + # 设置默认值 + if default_values: + result.update(default_values) + + # 尝试解析JSON + try: + json_data = json.loads(content) + except json.JSONDecodeError: + # 如果直接解析失败,尝试查找和提取JSON部分 + json_pattern = r'\{[^{}]*\}' + json_match = re.search(json_pattern, content) + if json_match: + try: + json_data = json.loads(json_match.group()) + except json.JSONDecodeError: + logger.error("提取的JSON内容解析失败") + return False, result + else: + logger.error("无法在返回内容中找到有效的JSON") + return False, result + + # 提取字段 + for item in items: + if item in json_data: + result[item] = json_data[item] + + # 验证必需字段 + if not all(item in result for item in items): + logger.error(f"JSON缺少必要字段,实际内容: {json_data}") + return False, result + + # 验证字段类型 + if required_types: + for field, expected_type in required_types.items(): + if field in result and not isinstance(result[field], expected_type): + logger.error(f"{field} 必须是 {expected_type.__name__} 类型") + return False, result + + # 验证字符串字段不为空 + for field in items: + if isinstance(result[field], str) and not result[field].strip(): + logger.error(f"{field} 不能为空") + return False, result + + return True, result \ No newline at end of file diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py index cd5b758f7..cfdfbdb32 100644 --- a/src/plugins/chat/bot.py +++ b/src/plugins/chat/bot.py @@ -42,11 +42,24 @@ class ChatBot: if global_config.enable_pfc_chatting: # 获取或创建对话实例 - conversation = Conversation.get_instance(chat_id) + conversation = await Conversation.get_instance(chat_id) + if conversation is None: + logger.error(f"创建或获取对话实例失败: {chat_id}") + return + # 如果是新创建的实例,启动对话系统 if conversation.state == ConversationState.INIT: asyncio.create_task(conversation.start()) logger.info(f"为聊天 {chat_id} 创建新的对话实例") + elif conversation.state == ConversationState.ENDED: + # 如果实例已经结束,重新创建 + await Conversation.remove_instance(chat_id) + conversation = await Conversation.get_instance(chat_id) + if conversation is None: + logger.error(f"重新创建对话实例失败: {chat_id}") + return + asyncio.create_task(conversation.start()) + logger.info(f"为聊天 {chat_id} 重新创建对话实例") except Exception as e: logger.error(f"创建PFC聊天流失败: {e}") diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py b/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py index e3015fe1e..af18fe6ae 100644 --- a/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py +++ b/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py @@ -142,12 +142,13 @@ class PromptBuilder: logger.info("开始构建prompt") prompt = f""" +{relation_prompt_all} {memory_prompt} {prompt_info} {schedule_prompt} {chat_target} {chat_talking_prompt} -现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。{relation_prompt_all}\n +现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n 你的网名叫{global_config.BOT_NICKNAME},有人也叫你{"/".join(global_config.BOT_ALIAS_NAMES)},{prompt_personality}。 你正在{chat_target_2},现在请你读读之前的聊天记录,{mood_prompt},然后给出日常且口语化的回复,平淡一些, 尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger} diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py index 3cd6096e7..d79878258 100644 --- a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py +++ b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py @@ -123,7 +123,7 @@ class PromptBuilder: {chat_talking_prompt} 你刚刚脑子里在想: {current_mind_info} -现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。{relation_prompt_all}\n +现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n 你的网名叫{global_config.BOT_NICKNAME},有人也叫你{"/".join(global_config.BOT_ALIAS_NAMES)},{prompt_personality}。 你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些, 尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger} From 060e8ce6c7fe87bb2ebbb31404fdb09f66144ef7 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Apr 2025 00:12:15 +0800 Subject: [PATCH 16/51] Update config.py --- src/plugins/config/config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plugins/config/config.py b/src/plugins/config/config.py index c1bb35dbc..680c2bafb 100644 --- a/src/plugins/config/config.py +++ b/src/plugins/config/config.py @@ -25,8 +25,8 @@ config_config = LogConfig( logger = get_module_logger("config", config=config_config) #考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码 -mai_version_main = "0.6.0" -mai_version_fix = "" +mai_version_main = "0.6.1" +mai_version_fix = "snapshot-1" if mai_version_fix: mai_version = f"{mai_version_main}-{mai_version_fix}" else: From 449ac12145d1fdb0e3052c3d91c53aa43f8e47d0 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Apr 2025 00:58:01 +0800 Subject: [PATCH 17/51] =?UTF-8?q?fix=EF=BC=9Apfc=E4=BC=98=E5=8C=96?= =?UTF-8?q?=EF=BC=8C=E4=BC=9A=E6=A3=80=E6=9F=A5=E6=96=B0=E6=B6=88=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/PFC/chat_observer.py | 25 +++++- src/plugins/PFC/pfc.py | 131 +++++++++++++++++++++++-------- src/plugins/chat/chat_stream.py | 4 +- src/plugins/chat/message.py | 2 +- 4 files changed, 124 insertions(+), 38 deletions(-) diff --git a/src/plugins/PFC/chat_observer.py b/src/plugins/PFC/chat_observer.py index 6781145b6..532afc9db 100644 --- a/src/plugins/PFC/chat_observer.py +++ b/src/plugins/PFC/chat_observer.py @@ -1,6 +1,6 @@ import time import asyncio -from typing import Optional, Dict, Any, List +from typing import Optional, Dict, Any, List, Tuple from src.common.logger import get_module_logger from src.common.database import db from ..message.message_base import UserInfo @@ -63,8 +63,28 @@ class ChatObserver: Returns: bool: 是否有新消息 """ - return self.new_message_after(self.last_check_time) + logger.debug(f"检查距离上一次观察之后是否有了新消息: {self.last_check_time}") + + query = { + "chat_id": self.stream_id, + "time": {"$gt": self.last_check_time} + } + + # 只需要查询是否存在,不需要获取具体消息 + new_message_exists = db.messages.find_one(query) is not None + + if new_message_exists: + logger.debug("发现新消息") + self.last_check_time = time.time() + + return new_message_exists + def get_new_message(self) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]: + """获取上一次观察的时间点后的新消息,插入到历史记录中,并返回新消息和历史记录两个对象""" + messages = self.get_message_history(self.last_check_time) + for message in messages: + self._add_message_to_history(message) + return messages, self.message_history def new_message_after(self, time_point: float) -> bool: """判断是否在指定时间点后有新消息 @@ -75,6 +95,7 @@ class ChatObserver: Returns: bool: 是否有新消息 """ + logger.debug(f"判断是否在指定时间点后有新消息: {self.last_message_time} > {time_point}") return self.last_message_time is None or self.last_message_time > time_point def _add_message_to_history(self, message: Dict[str, Any]): diff --git a/src/plugins/PFC/pfc.py b/src/plugins/PFC/pfc.py index db92fd80a..e02409ce8 100644 --- a/src/plugins/PFC/pfc.py +++ b/src/plugins/PFC/pfc.py @@ -348,19 +348,18 @@ class ReplyGenerator: knowledge_cache: Dict[str, str], previous_reply: Optional[str] = None, retry_count: int = 0 - ) -> Tuple[str, bool]: + ) -> str: """生成回复 Args: goal: 对话目标 - method: 实现方式 chat_history: 聊天历史 knowledge_cache: 知识缓存 previous_reply: 上一次生成的回复(如果有) retry_count: 当前重试次数 Returns: - Tuple[str, bool]: (生成的回复, 是否需要重新规划) + str: 生成的回复 """ # 构建提示词 logger.debug(f"开始生成回复:当前目标: {goal}") @@ -421,29 +420,40 @@ class ReplyGenerator: try: content, _ = await self.llm.generate_response_async(prompt) logger.info(f"生成的回复: {content}") + is_new = self.chat_observer.check() + logger.debug(f"再看一眼聊天记录,{'有' if is_new else '没有'}新消息") - # 检查生成的回复是否合适 - is_suitable, reason, need_replan = await self.reply_checker.check( - content, goal, retry_count - ) - - if not is_suitable: - logger.warning(f"生成的回复不合适,原因: {reason}") - if need_replan: - logger.info("需要重新规划对话目标") - return "让我重新思考一下...", True - else: - # 递归调用,将当前回复作为previous_reply传入 - return await self.generate( - goal, chat_history, knowledge_cache, - content, retry_count + 1 - ) + # 如果有新消息,重新生成回复 + if is_new: + logger.info("检测到新消息,重新生成回复") + return await self.generate( + goal, chat_history, knowledge_cache, + None, retry_count + ) - return content, False + return content except Exception as e: logger.error(f"生成回复时出错: {e}") - return "抱歉,我现在有点混乱,让我重新思考一下...", True + return "抱歉,我现在有点混乱,让我重新思考一下..." + + async def check_reply( + self, + reply: str, + goal: str, + retry_count: int = 0 + ) -> Tuple[bool, str, bool]: + """检查回复是否合适 + + Args: + reply: 生成的回复 + goal: 对话目标 + retry_count: 当前重试次数 + + Returns: + Tuple[bool, str, bool]: (是否合适, 原因, 是否需要重新规划) + """ + return await self.reply_checker.check(reply, goal, retry_count) class Conversation: @@ -620,17 +630,53 @@ class Conversation: if action == "direct_reply": self.state = ConversationState.GENERATING messages = self.chat_observer.get_message_history(limit=30) - self.generated_reply, need_replan = await self.reply_generator.generate( + self.generated_reply = await self.reply_generator.generate( self.current_goal, self.current_method, [self._convert_to_message(msg) for msg in messages], self.knowledge_cache ) - if need_replan: - self.state = ConversationState.RETHINKING - self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal() - else: - await self._send_reply() + + # 检查回复是否合适 + is_suitable, reason, need_replan = await self.reply_generator.check_reply( + self.generated_reply, + self.current_goal + ) + + if not is_suitable: + logger.warning(f"生成的回复不合适,原因: {reason}") + if need_replan: + self.state = ConversationState.RETHINKING + self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal() + return + else: + # 重新生成回复 + self.generated_reply = await self.reply_generator.generate( + self.current_goal, + self.current_method, + [self._convert_to_message(msg) for msg in messages], + self.knowledge_cache, + self.generated_reply # 将不合适的回复作为previous_reply传入 + ) + + while self.chat_observer.check(): + if not is_suitable: + logger.warning(f"生成的回复不合适,原因: {reason}") + if need_replan: + self.state = ConversationState.RETHINKING + self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal() + return + else: + # 重新生成回复 + self.generated_reply = await self.reply_generator.generate( + self.current_goal, + self.current_method, + [self._convert_to_message(msg) for msg in messages], + self.knowledge_cache, + self.generated_reply # 将不合适的回复作为previous_reply传入 + ) + + await self._send_reply() elif action == "fetch_knowledge": self.state = ConversationState.GENERATING @@ -644,17 +690,36 @@ class Conversation: if knowledge != "未找到相关知识": self.knowledge_cache[sources] = knowledge - self.generated_reply, need_replan = await self.reply_generator.generate( + self.generated_reply = await self.reply_generator.generate( self.current_goal, self.current_method, [self._convert_to_message(msg) for msg in messages], self.knowledge_cache ) - if need_replan: - self.state = ConversationState.RETHINKING - self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal() - else: - await self._send_reply() + + # 检查回复是否合适 + is_suitable, reason, need_replan = await self.reply_generator.check_reply( + self.generated_reply, + self.current_goal + ) + + if not is_suitable: + logger.warning(f"生成的回复不合适,原因: {reason}") + if need_replan: + self.state = ConversationState.RETHINKING + self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal() + return + else: + # 重新生成回复 + self.generated_reply = await self.reply_generator.generate( + self.current_goal, + self.current_method, + [self._convert_to_message(msg) for msg in messages], + self.knowledge_cache, + self.generated_reply # 将不合适的回复作为previous_reply传入 + ) + + await self._send_reply() elif action == "rethink_goal": self.state = ConversationState.RETHINKING diff --git a/src/plugins/chat/chat_stream.py b/src/plugins/chat/chat_stream.py index 8cddb9376..694e685fa 100644 --- a/src/plugins/chat/chat_stream.py +++ b/src/plugins/chat/chat_stream.py @@ -28,7 +28,7 @@ class ChatStream: self.platform = platform self.user_info = user_info self.group_info = group_info - self.create_time = data.get("create_time", int(time.time())) if data else int(time.time()) + self.create_time = data.get("create_time", time.time()) if data else time.time() self.last_active_time = data.get("last_active_time", self.create_time) if data else self.create_time self.saved = False @@ -60,7 +60,7 @@ class ChatStream: def update_active_time(self): """更新最后活跃时间""" - self.last_active_time = int(time.time()) + self.last_active_time = time.time() self.saved = False diff --git a/src/plugins/chat/message.py b/src/plugins/chat/message.py index 22487831f..f3369d7bb 100644 --- a/src/plugins/chat/message.py +++ b/src/plugins/chat/message.py @@ -168,7 +168,7 @@ class MessageProcessBase(Message): # 调用父类初始化 super().__init__( message_id=message_id, - time=int(time.time()), + time=round(time.time(), 3), # 保留3位小数 chat_stream=chat_stream, user_info=bot_user_info, message_segment=message_segment, From 02643d729e1ad5d35e32247e4ab2a690f698f376 Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Sat, 5 Apr 2025 01:00:05 +0800 Subject: [PATCH 18/51] =?UTF-8?q?=E5=90=88=E5=B9=B6=E6=9D=A1=E4=BB=B6?= =?UTF-8?q?=E6=9B=B4=E4=B8=A5=E6=A0=BC=EF=BC=8C=E5=A2=9E=E5=8A=A0=E9=85=8D?= =?UTF-8?q?=E7=BD=AE=E9=80=89=E9=A1=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/message_buffer.py | 17 ++++++++++++++--- src/plugins/config/config.py | 3 +++ src/plugins/person_info/person_info.py | 4 ++-- template/bot_config_template.toml | 3 ++- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py index e139a122b..ccfcf81cc 100644 --- a/src/plugins/chat/message_buffer.py +++ b/src/plugins/chat/message_buffer.py @@ -9,6 +9,7 @@ from typing import Dict from collections import OrderedDict import random import time +from ..config.config import global_config logger = get_module_logger("message_buffer") @@ -32,6 +33,11 @@ class MessageBuffer: async def start_caching_messages(self, message:MessageRecv): """添加消息,启动缓冲""" + if not global_config.message_buffer: + person_id = person_info_manager.get_person_id(message.message_info.user_info.platform, + message.message_info.user_info.user_id) + asyncio.create_task(self.save_message_interval(person_id, message.message_info)) + return person_id_ = self.get_person_id_(message.message_info.platform, message.message_info.user_info.user_id, message.message_info.group_info.group_id) @@ -98,6 +104,8 @@ class MessageBuffer: async def query_buffer_result(self, message:MessageRecv) -> bool: """查询缓冲结果,并清理""" + if not global_config.message_buffer: + return True person_id_ = self.get_person_id_(message.message_info.platform, message.message_info.user_info.user_id, message.message_info.group_info.group_id) @@ -122,6 +130,7 @@ class MessageBuffer: combined_text = [] found = False type = "text" + is_update = True for msg_id, msg in self.buffer_pool[person_id_].items(): if msg_id == message.message_info.message_id: found = True @@ -133,14 +142,16 @@ class MessageBuffer: elif msg.result == "F": # 收集F消息的文本内容 if (hasattr(msg.message, 'processed_plain_text') - and msg.message.message_segment.type == "text" and msg.message.processed_plain_text): - combined_text.append(msg.message.processed_plain_text) + if msg.message.message_segment.type == "text": + combined_text.append(msg.message.processed_plain_text) + elif msg.message.message_segment.type != "text": + is_update = False elif msg.result == "U": logger.debug(f"异常未处理信息id: {msg.message.message_info.message_id}") # 更新当前消息的processed_plain_text - if combined_text and combined_text[0] != message.processed_plain_text: + if combined_text and combined_text[0] != message.processed_plain_text and is_update: if type == "text": message.processed_plain_text = "".join(combined_text) logger.debug(f"整合了{len(combined_text)-1}条F消息的内容到当前消息") diff --git a/src/plugins/config/config.py b/src/plugins/config/config.py index 2422b0d1f..bf06b8947 100644 --- a/src/plugins/config/config.py +++ b/src/plugins/config/config.py @@ -159,6 +159,7 @@ class BotConfig: emoji_chance: float = 0.2 # 发送表情包的基础概率 thinking_timeout: int = 120 # 思考时间 max_response_length: int = 1024 # 最大回复长度 + message_buffer: bool = True # 消息缓冲器 ban_words = set() ban_msgs_regex = set() @@ -502,6 +503,8 @@ class BotConfig: if config.INNER_VERSION in SpecifierSet(">=0.0.11"): config.max_response_length = msg_config.get("max_response_length", config.max_response_length) + if config.INNER_VERSION in SpecifierSet(">=1.1.4"): + config.message_buffer = msg_config.get("message_buffer", config.message_buffer) def memory(parent: dict): memory_config = parent["memory"] diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py index 9bb5408e4..543fdb3ee 100644 --- a/src/plugins/person_info/person_info.py +++ b/src/plugins/person_info/person_info.py @@ -242,7 +242,7 @@ class PersonInfoManager: time_interval = [] for t1, t2 in zip(msg_interval_list_, msg_interval_list_[1:]): delta = t2 - t1 - if delta < 6000 and delta > 0: # 小于6秒 + if delta < 8000 and delta > 0: # 小于8秒 time_interval.append(delta) if len(time_interval) > 30: @@ -263,7 +263,7 @@ class PersonInfoManager: time_series.plot(kind='kde', color='mediumpurple', linewidth=1, label='Density') plt.grid(True, alpha=0.2) - plt.xlim(0, 6000) + plt.xlim(0, 8000) plt.title(f"Message Interval Distribution (User: {person_id[:8]}...)") plt.xlabel("Interval (ms)") plt.ylabel("Density") diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 7df6a6e8e..97a06b700 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "1.1.3" +version = "1.1.4" #以下是给开发人员阅读的,一般用户不需要阅读 @@ -72,6 +72,7 @@ max_context_size = 12 # 麦麦获得的上文数量,建议12,太短太长都 emoji_chance = 0.2 # 麦麦使用表情包的概率 thinking_timeout = 60 # 麦麦最长思考时间,超过这个时间的思考会放弃 max_response_length = 256 # 麦麦回答的最大token数 +message_buffer = true # 启用消息缓冲器? ban_words = [ # "403","张三" ] From aadd494151b1789bcbd48db98c35aa3548ea15e8 Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Sat, 5 Apr 2025 01:54:31 +0800 Subject: [PATCH 19/51] =?UTF-8?q?=E4=BC=98=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/message_buffer.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py index ccfcf81cc..e5f26e53e 100644 --- a/src/plugins/chat/message_buffer.py +++ b/src/plugins/chat/message_buffer.py @@ -46,6 +46,13 @@ class MessageBuffer: if person_id_ not in self.buffer_pool: self.buffer_pool[person_id_] = OrderedDict() + # 标记该用户之前的未处理消息 + for cache_msg in self.buffer_pool[person_id_].values(): + if cache_msg.result == "U": + cache_msg.result = "F" + cache_msg.cache_determination.set() + logger.debug(f"被新消息覆盖信息id: {cache_msg.message.message_info.message_id}") + # 查找最近的处理成功消息(T) recent_F_count = 0 for msg_id in reversed(self.buffer_pool[person_id_]): @@ -62,13 +69,6 @@ class MessageBuffer: self.buffer_pool[person_id_][message.message_info.message_id] = new_msg logger.debug(f"快速处理消息(已堆积{recent_F_count}条F): {message.message_info.message_id}") return - - # 标记该用户之前的未处理消息 - for cache_msg in self.buffer_pool[person_id_].values(): - if cache_msg.result == "U": - cache_msg.result = "F" - cache_msg.cache_determination.set() - logger.debug(f"被新消息覆盖信息id: {cache_msg.message.message_info.message_id}") # 添加新消息 self.buffer_pool[person_id_][message.message_info.message_id] = CacheMessages(message=message) @@ -93,7 +93,7 @@ class MessageBuffer: async with self.lock: if (person_id_ not in self.buffer_pool or message_id not in self.buffer_pool[person_id_]): - logger.debug(f"消息异常被清理,msgid: {message_id}") + logger.debug(f"消息已被清理,msgid: {message_id}") return cache_msg = self.buffer_pool[person_id_][message_id] From 81acf7afd7d9ba3efb92f86121a51b6869b42c04 Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Sat, 5 Apr 2025 02:09:10 +0800 Subject: [PATCH 20/51] =?UTF-8?q?=E7=B1=BB=E5=9E=8B=E5=B0=8Fbug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/message_buffer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py index e5f26e53e..1d0291a1c 100644 --- a/src/plugins/chat/message_buffer.py +++ b/src/plugins/chat/message_buffer.py @@ -155,11 +155,11 @@ class MessageBuffer: if type == "text": message.processed_plain_text = "".join(combined_text) logger.debug(f"整合了{len(combined_text)-1}条F消息的内容到当前消息") - elif type == "image": + elif type == "emoji": combined_text.pop() message.processed_plain_text = "".join(combined_text) message.is_emoji = False - logger.debug(f"整合了{len(combined_text)-1}条F消息的内容,覆盖当前image消息") + logger.debug(f"整合了{len(combined_text)-1}条F消息的内容,覆盖当前emoji消息") self.buffer_pool[person_id_] = keep_msgs return result From 1dc3f24cfe72ffc375b4fcd75e4e2c25846b8a1c Mon Sep 17 00:00:00 2001 From: UnCLAS-Prommer Date: Sat, 5 Apr 2025 11:59:42 +0800 Subject: [PATCH 21/51] =?UTF-8?q?=E5=A2=9E=E5=8A=A0exception=E6=A3=80?= =?UTF-8?q?=E6=9F=A5=E4=B8=8Efallback?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/message_sender.py | 18 ++++++++++++------ src/plugins/message/api.py | 5 ++--- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/plugins/chat/message_sender.py b/src/plugins/chat/message_sender.py index 5b4adc8d1..e86efb64e 100644 --- a/src/plugins/chat/message_sender.py +++ b/src/plugins/chat/message_sender.py @@ -43,6 +43,12 @@ class Message_Sender: # 按thinking_start_time排序,时间早的在前面 return recalled_messages + async def send_via_ws(self, message: MessageSending) -> None: + try: + await global_api.send_message(message) + except Exception as e: + raise ValueError(f"未找到平台:{message.message_info.platform} 的url配置,请检查配置文件") from e + async def send_message( self, message: MessageSending, @@ -69,14 +75,14 @@ class Message_Sender: if end_point: # logger.info(f"发送消息到{end_point}") # logger.info(message_json) - await global_api.send_message_REST(end_point, message_json) - else: try: - await global_api.send_message(message) + await global_api.send_message_REST(end_point, message_json) except Exception as e: - raise ValueError( - f"未找到平台:{message.message_info.platform} 的url配置,请检查配置文件" - ) from e + logger.error(f"REST方式发送失败,出现错误: {str(e)}") + logger.info("尝试使用ws发送") + await self.send_via_ws(message) + else: + await self.send_via_ws(message) logger.success(f"发送消息“{message_preview}”成功") except Exception as e: logger.error(f"发送消息“{message_preview}”失败: {str(e)}") diff --git a/src/plugins/message/api.py b/src/plugins/message/api.py index 855ff8b95..2a6a2b6fc 100644 --- a/src/plugins/message/api.py +++ b/src/plugins/message/api.py @@ -215,9 +215,8 @@ class MessageServer(BaseMessageHandler): try: async with session.post(url, json=data, headers={"Content-Type": "application/json"}) as response: return await response.json() - except Exception: - # logger.error(f"发送消息失败: {str(e)}") - pass + except Exception as e: + raise e class BaseMessageAPI: From bd753acd2c2511a2fa3c1589177d3de7f113eca3 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Apr 2025 12:28:43 +0800 Subject: [PATCH 22/51] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E8=A1=A8?= =?UTF-8?q?=E6=83=85=E5=8C=85=E6=89=93=E5=AD=97=E6=97=B6=E9=97=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- changelogs/changelog_dev.md | 4 ++++ src/plugins/chat/message_sender.py | 2 +- src/plugins/chat/utils.py | 7 ++++++- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/changelogs/changelog_dev.md b/changelogs/changelog_dev.md index acfb7e03f..82190eb0e 100644 --- a/changelogs/changelog_dev.md +++ b/changelogs/changelog_dev.md @@ -1,4 +1,8 @@ 这里放置了测试版本的细节更新 + +## [test-0.6.1-snapshot-1] - 2025-4-5 +- 修复pfc回复出错bug + ## [test-0.6.0-snapshot-9] - 2025-4-4 - 可以识别gif表情包 diff --git a/src/plugins/chat/message_sender.py b/src/plugins/chat/message_sender.py index e86efb64e..9621819cc 100644 --- a/src/plugins/chat/message_sender.py +++ b/src/plugins/chat/message_sender.py @@ -64,7 +64,7 @@ class Message_Sender: logger.warning(f"消息“{message.processed_plain_text}”已被撤回,不发送") break if not is_recalled: - typing_time = calculate_typing_time(message.processed_plain_text) + typing_time = calculate_typing_time(message.processed_plain_text,message.is_emoji) await asyncio.sleep(typing_time) message_json = message.to_dict() diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py index 9646fe73b..028335640 100644 --- a/src/plugins/chat/utils.py +++ b/src/plugins/chat/utils.py @@ -334,16 +334,18 @@ def process_llm_response(text: str) -> List[str]: return sentences -def calculate_typing_time(input_string: str, chinese_time: float = 0.2, english_time: float = 0.1) -> float: +def calculate_typing_time(input_string: str, chinese_time: float = 0.2, english_time: float = 0.1, is_emoji: bool = False) -> float: """ 计算输入字符串所需的时间,中文和英文字符有不同的输入时间 input_string (str): 输入的字符串 chinese_time (float): 中文字符的输入时间,默认为0.2秒 english_time (float): 英文字符的输入时间,默认为0.1秒 + is_emoji (bool): 是否为emoji,默认为False 特殊情况: - 如果只有一个中文字符,将使用3倍的中文输入时间 - 在所有输入结束后,额外加上回车时间0.3秒 + - 如果is_emoji为True,将使用固定1秒的输入时间 """ # 如果输入是列表,将其连接成字符串 @@ -376,6 +378,9 @@ def calculate_typing_time(input_string: str, chinese_time: float = 0.2, english_ else: # 其他字符(如英文) total_time += english_time + if is_emoji: + total_time = 0.7 + return total_time + 0.3 # 加上回车时间 From e4445ee5dbaf1334f30e795fbd8aa4796290f2ae Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Apr 2025 12:39:47 +0800 Subject: [PATCH 23/51] Update changelog_dev.md --- changelogs/changelog_dev.md | 1 + 1 file changed, 1 insertion(+) diff --git a/changelogs/changelog_dev.md b/changelogs/changelog_dev.md index 82190eb0e..3a5f9740f 100644 --- a/changelogs/changelog_dev.md +++ b/changelogs/changelog_dev.md @@ -2,6 +2,7 @@ ## [test-0.6.1-snapshot-1] - 2025-4-5 - 修复pfc回复出错bug +- 修复表情包打字时间,不会卡表情包 ## [test-0.6.0-snapshot-9] - 2025-4-4 - 可以识别gif表情包 From df015ff663555cf9f68f1dd6e33f068d57c44e93 Mon Sep 17 00:00:00 2001 From: Cookie987 Date: Sat, 5 Apr 2025 13:44:01 +0800 Subject: [PATCH 24/51] =?UTF-8?q?fix:=20=E6=94=AF=E6=8C=81dev=E5=88=86?= =?UTF-8?q?=E6=94=AF=EF=BC=8C=E4=BF=AE=E6=94=B9=E9=83=A8=E5=88=86=E6=8F=90?= =?UTF-8?q?=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- scripts/run.sh | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/scripts/run.sh b/scripts/run.sh index c1fe4973f..342a23feb 100644 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -4,7 +4,7 @@ # 适用于Arch/Ubuntu 24.10/Debian 12/CentOS 9 # 请小心使用任何一键脚本! -INSTALLER_VERSION="0.0.1-refactor" +INSTALLER_VERSION="0.0.2-refactor" LANG=C.UTF-8 # 如无法访问GitHub请修改此处镜像地址 @@ -62,7 +62,7 @@ show_menu() { "4" "启动Nonebot adapter" \ "5" "停止Nonebot adapter" \ "6" "重启Nonebot adapter" \ - "7" "更新MaiCore及其依赖" \ + "7" "拉取最新MaiCore仓库" \ "8" "切换分支" \ "9" "退出" 3>&1 1>&2 2>&3) @@ -111,6 +111,8 @@ show_menu() { # 更新依赖 update_dependencies() { + whiptail --title "⚠" --msgbox "更新后请阅读教程" 10 60 + systemctl stop ${SERVICE_NAME} cd "${INSTALL_DIR}/MaiBot" || { whiptail --msgbox "🚫 无法进入安装目录!" 10 60 return 1 @@ -126,8 +128,7 @@ update_dependencies() { return 1 fi deactivate - systemctl restart ${SERVICE_NAME} - whiptail --msgbox "✅ 依赖已更新并重启服务!" 10 60 + whiptail --msgbox "✅ 已停止服务并拉取最新仓库提交" 10 60 } # 切换分支 @@ -157,7 +158,7 @@ switch_branch() { whiptail --msgbox "🚫 代码拉取失败!" 10 60 return 1 fi - + systemctl stop ${SERVICE_NAME} source "${INSTALL_DIR}/venv/bin/activate" pip install -r requirements.txt deactivate @@ -165,8 +166,7 @@ switch_branch() { sed -i "s/^BRANCH=.*/BRANCH=${new_branch}/" /etc/maicore_install.conf BRANCH="${new_branch}" check_eula - systemctl restart ${SERVICE_NAME} - whiptail --msgbox "✅ 已切换到分支 ${new_branch} 并重启服务!" 10 60 + whiptail --msgbox "✅ 已停止服务并切换到分支 ${new_branch} !" 10 60 } check_eula() { @@ -228,6 +228,8 @@ run_installation() { fi fi + whiptail --title "ℹ️ 提示" --msgbox "如果您没有特殊需求,请优先使用docker方式部署。" 10 60 + # 协议确认 if ! (whiptail --title "ℹ️ [1/6] 使用协议" --yes-button "我同意" --no-button "我拒绝" --yesno "使用MaiCore及此脚本前请先阅读EULA协议及隐私协议\nhttps://github.com/MaiM-with-u/MaiBot/blob/refactor/EULA.md\nhttps://github.com/MaiM-with-u/MaiBot/blob/refactor/PRIVACY.md\n\n您是否同意上述协议?" 12 70); then exit 1 @@ -370,12 +372,13 @@ run_installation() { # 选择分支 choose_branch() { BRANCH=$(whiptail --title "🔀 选择分支" --radiolist "请选择要安装的分支:" 15 60 4 \ - "main" "稳定最新版(推荐)" ON \ - "classical" "经典版" OFF \ + "main" "稳定版本(推荐)" ON \ + "dev" "开发版(不知道什么意思就别选)" OFF \ + "classical" "经典版(0.6.0以前的版本)" OFF \ "custom" "自定义分支" OFF 3>&1 1>&2 2>&3) RETVAL=$? if [ $RETVAL -ne 0 ]; then - whiptail --msgbox "操作取消!" 10 60 + whiptail --msgbox "🚫 操作取消!" 10 60 exit 1 fi @@ -383,7 +386,7 @@ run_installation() { BRANCH=$(whiptail --title "🔀 自定义分支" --inputbox "请输入自定义分支名称:" 10 60 "refactor" 3>&1 1>&2 2>&3) RETVAL=$? if [ $RETVAL -ne 0 ]; then - whiptail --msgbox "输入取消!" 10 60 + whiptail --msgbox "🚫 输入取消!" 10 60 exit 1 fi if [[ -z "$BRANCH" ]]; then From 3f570a01dd4759f8e3a8bf6ac7ddd938d1cacdd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=A6=E6=BA=AA=E7=95=94?= <130263765+na10xi27da@users.noreply.github.com> Date: Sat, 5 Apr 2025 13:56:53 +0800 Subject: [PATCH 25/51] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E9=85=8D=E7=BD=AE?= =?UTF-8?q?=E9=A1=B9=E8=AF=B4=E6=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- template/bot_config_template.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 97a06b700..bcb62ddc6 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -72,7 +72,7 @@ max_context_size = 12 # 麦麦获得的上文数量,建议12,太短太长都 emoji_chance = 0.2 # 麦麦使用表情包的概率 thinking_timeout = 60 # 麦麦最长思考时间,超过这个时间的思考会放弃 max_response_length = 256 # 麦麦回答的最大token数 -message_buffer = true # 启用消息缓冲器? +message_buffer = true # 启用消息缓冲器?启用此项以解决消息的拆分问题,但会使麦麦的回复延迟 ban_words = [ # "403","张三" ] @@ -237,4 +237,4 @@ pri_out = 1.26 name = "Qwen/Qwen2.5-32B-Instruct" provider = "SILICONFLOW" pri_in = 1.26 -pri_out = 1.26 \ No newline at end of file +pri_out = 1.26 From cf26961421b2c18906115ca54c41f4e4d2cf2f8c Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Apr 2025 14:06:46 +0800 Subject: [PATCH 26/51] test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit test改动 --- template/bot_config_template.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 7df6a6e8e..1906b00e5 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -162,7 +162,7 @@ pfc_chatting = false # 是否启用PFC聊天 # stream = : 用于指定模型是否是使用流式输出 # 如果不指定,则该项是 False -[model.llm_reasoning] #暂时未使用 +[model.llm_reasoning] #只在回复模式为reasoning时启用 name = "Pro/deepseek-ai/DeepSeek-R1" # name = "Qwen/QwQ-32B" provider = "SILICONFLOW" From db14d9c39bca718b836ac5243bf3d06510ee73f4 Mon Sep 17 00:00:00 2001 From: Voyager1 <2496196079@qq.com> Date: Sat, 5 Apr 2025 17:31:34 +0800 Subject: [PATCH 27/51] =?UTF-8?q?=E4=BC=98=E5=8C=96=E4=BA=86=E7=8E=B0?= =?UTF-8?q?=E6=9C=89=E7=9A=84=E7=9F=A5=E8=AF=86=E5=BA=93=E7=B3=BB=E7=BB=9F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../reasoning_prompt_builder.py | 178 ++++++++++++++++-- src/plugins/zhishi/knowledge_library.py | 68 ++----- 2 files changed, 173 insertions(+), 73 deletions(-) diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py b/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py index e3015fe1e..d9e2cf75b 100644 --- a/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py +++ b/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py @@ -1,16 +1,19 @@ import random import time -from typing import Optional +from typing import Optional, Union +import re +import jieba +import numpy as np from ....common.database import db -from ...memory_system.Hippocampus import HippocampusManager -from ...moods.moods import MoodManager -from ...schedule.schedule_generator import bot_schedule -from ...config.config import global_config from ...chat.utils import get_embedding, get_recent_group_detailed_plain_text, get_recent_group_speaker from ...chat.chat_stream import chat_manager -from src.common.logger import get_module_logger +from ...moods.moods import MoodManager +from ...memory_system.Hippocampus import HippocampusManager +from ...schedule.schedule_generator import bot_schedule +from ...config.config import global_config from ...person_info.relationship_manager import relationship_manager +from src.common.logger import get_module_logger logger = get_module_logger("prompt") @@ -128,7 +131,7 @@ class PromptBuilder: # 知识构建 start_time = time.time() prompt_info = "" - prompt_info = await self.get_prompt_info(message_txt, threshold=0.5) + prompt_info = await self.get_prompt_info(message_txt, threshold=0.38) if prompt_info: prompt_info = f"""\n你有以下这些**知识**:\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n""" @@ -158,16 +161,156 @@ class PromptBuilder: return prompt async def get_prompt_info(self, message: str, threshold: float): + start_time = time.time() related_info = "" logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}") - embedding = await get_embedding(message, request_type="prompt_build") - related_info += self.get_info_from_db(embedding, limit=1, threshold=threshold) - + + # 1. 先从LLM获取主题,类似于记忆系统的做法 + topics = [] + try: + # 先尝试使用记忆系统的方法获取主题 + hippocampus = HippocampusManager.get_instance()._hippocampus + topic_num = min(5, max(1, int(len(message) * 0.1))) + topics_response = await hippocampus.llm_topic_judge.generate_response(hippocampus.find_topic_llm(message, topic_num)) + + # 提取关键词 + topics = re.findall(r"<([^>]+)>", topics_response[0]) + if not topics: + topics = [] + else: + topics = [ + topic.strip() + for topic in ",".join(topics).replace(",", ",").replace("、", ",").replace(" ", ",").split(",") + if topic.strip() + ] + + logger.info(f"从LLM提取的主题: {', '.join(topics)}") + except Exception as e: + logger.error(f"从LLM提取主题失败: {str(e)}") + # 如果LLM提取失败,使用jieba分词提取关键词作为备选 + words = jieba.cut(message) + topics = [word for word in words if len(word) > 1][:5] + logger.info(f"使用jieba提取的主题: {', '.join(topics)}") + + # 如果无法提取到主题,直接使用整个消息 + if not topics: + logger.info("未能提取到任何主题,使用整个消息进行查询") + embedding = await get_embedding(message, request_type="prompt_build") + if not embedding: + logger.error("获取消息嵌入向量失败") + return "" + + related_info = self.get_info_from_db(embedding, limit=3, threshold=threshold) + logger.info(f"知识库检索完成,总耗时: {time.time() - start_time:.3f}秒") + return related_info + + # 2. 对每个主题进行知识库查询 + logger.info(f"开始处理{len(topics)}个主题的知识库查询") + + # 优化:批量获取嵌入向量,减少API调用 + embeddings = {} + topics_batch = [topic for topic in topics if len(topic) > 0] + if message: # 确保消息非空 + topics_batch.append(message) + + # 批量获取嵌入向量 + embed_start_time = time.time() + for text in topics_batch: + if not text or len(text.strip()) == 0: + continue + + try: + embedding = await get_embedding(text, request_type="prompt_build") + if embedding: + embeddings[text] = embedding + else: + logger.warning(f"获取'{text}'的嵌入向量失败") + except Exception as e: + logger.error(f"获取'{text}'的嵌入向量时发生错误: {str(e)}") + + logger.info(f"批量获取嵌入向量完成,耗时: {time.time() - embed_start_time:.3f}秒") + + if not embeddings: + logger.error("所有嵌入向量获取失败") + return "" + + # 3. 对每个主题进行知识库查询 + all_results = [] + query_start_time = time.time() + + # 首先添加原始消息的查询结果 + if message in embeddings: + original_results = self.get_info_from_db(embeddings[message], limit=3, threshold=threshold, return_raw=True) + if original_results: + for result in original_results: + result["topic"] = "原始消息" + all_results.extend(original_results) + logger.info(f"原始消息查询到{len(original_results)}条结果") + + # 然后添加每个主题的查询结果 + for topic in topics: + if not topic or topic not in embeddings: + continue + + try: + topic_results = self.get_info_from_db(embeddings[topic], limit=3, threshold=threshold, return_raw=True) + if topic_results: + # 添加主题标记 + for result in topic_results: + result["topic"] = topic + all_results.extend(topic_results) + logger.info(f"主题'{topic}'查询到{len(topic_results)}条结果") + except Exception as e: + logger.error(f"查询主题'{topic}'时发生错误: {str(e)}") + + logger.info(f"知识库查询完成,耗时: {time.time() - query_start_time:.3f}秒,共获取{len(all_results)}条结果") + + # 4. 去重和过滤 + process_start_time = time.time() + unique_contents = set() + filtered_results = [] + for result in all_results: + content = result["content"] + if content not in unique_contents: + unique_contents.add(content) + filtered_results.append(result) + + # 5. 按相似度排序 + filtered_results.sort(key=lambda x: x["similarity"], reverse=True) + + # 6. 限制总数量(最多10条) + filtered_results = filtered_results[:10] + logger.info(f"结果处理完成,耗时: {time.time() - process_start_time:.3f}秒,过滤后剩余{len(filtered_results)}条结果") + + # 7. 格式化输出 + if filtered_results: + format_start_time = time.time() + grouped_results = {} + for result in filtered_results: + topic = result["topic"] + if topic not in grouped_results: + grouped_results[topic] = [] + grouped_results[topic].append(result) + + # 按主题组织输出 + for topic, results in grouped_results.items(): + related_info += f"【主题: {topic}】\n" + for i, result in enumerate(results, 1): + similarity = result["similarity"] + content = result["content"].strip() + # 调试:为内容添加序号和相似度信息 + # related_info += f"{i}. [{similarity:.2f}] {content}\n" + related_info += f"{content}\n" + related_info += "\n" + + logger.info(f"格式化输出完成,耗时: {time.time() - format_start_time:.3f}秒") + + logger.info(f"知识库检索总耗时: {time.time() - start_time:.3f}秒") return related_info - def get_info_from_db(self, query_embedding: list, limit: int = 1, threshold: float = 0.5) -> str: + def get_info_from_db(self, query_embedding: list, limit: int = 1, threshold: float = 0.5, return_raw: bool = False) -> Union[str, list]: if not query_embedding: - return "" + return "" if not return_raw else [] # 使用余弦相似度计算 pipeline = [ { @@ -221,13 +364,16 @@ class PromptBuilder: ] results = list(db.knowledges.aggregate(pipeline)) - # print(f"\033[1;34m[调试]\033[0m获取知识库内容结果: {results}") + logger.debug(f"知识库查询结果数量: {len(results)}") if not results: - return "" + return "" if not return_raw else [] - # 返回所有找到的内容,用换行分隔 - return "\n".join(str(result["content"]) for result in results) + if return_raw: + return results + else: + # 返回所有找到的内容,用换行分隔 + return "\n".join(str(result["content"]) for result in results) prompt_builder = PromptBuilder() diff --git a/src/plugins/zhishi/knowledge_library.py b/src/plugins/zhishi/knowledge_library.py index a95a096e6..cf38874ce 100644 --- a/src/plugins/zhishi/knowledge_library.py +++ b/src/plugins/zhishi/knowledge_library.py @@ -41,7 +41,7 @@ class KnowledgeLibrary: return f.read() def split_content(self, content: str, max_length: int = 512) -> list: - """将内容分割成适当大小的块,保持段落完整性 + """将内容分割成适当大小的块,按空行分割 Args: content: 要分割的文本内容 @@ -50,67 +50,21 @@ class KnowledgeLibrary: Returns: list: 分割后的文本块列表 """ - # 首先按段落分割 + # 按空行分割内容 paragraphs = [p.strip() for p in content.split("\n\n") if p.strip()] chunks = [] - current_chunk = [] - current_length = 0 - + for para in paragraphs: para_length = len(para) - - # 如果单个段落就超过最大长度 - if para_length > max_length: - # 如果当前chunk不为空,先保存 - if current_chunk: - chunks.append("\n".join(current_chunk)) - current_chunk = [] - current_length = 0 - - # 将长段落按句子分割 - sentences = [ - s.strip() - for s in para.replace("。", "。\n").replace("!", "!\n").replace("?", "?\n").split("\n") - if s.strip() - ] - temp_chunk = [] - temp_length = 0 - - for sentence in sentences: - sentence_length = len(sentence) - if sentence_length > max_length: - # 如果单个句子超长,强制按长度分割 - if temp_chunk: - chunks.append("\n".join(temp_chunk)) - temp_chunk = [] - temp_length = 0 - for i in range(0, len(sentence), max_length): - chunks.append(sentence[i : i + max_length]) - elif temp_length + sentence_length + 1 <= max_length: - temp_chunk.append(sentence) - temp_length += sentence_length + 1 - else: - chunks.append("\n".join(temp_chunk)) - temp_chunk = [sentence] - temp_length = sentence_length - - if temp_chunk: - chunks.append("\n".join(temp_chunk)) - - # 如果当前段落加上现有chunk不超过最大长度 - elif current_length + para_length + 1 <= max_length: - current_chunk.append(para) - current_length += para_length + 1 + + # 如果段落长度小于等于最大长度,直接添加 + if para_length <= max_length: + chunks.append(para) else: - # 保存当前chunk并开始新的chunk - chunks.append("\n".join(current_chunk)) - current_chunk = [para] - current_length = para_length - - # 添加最后一个chunk - if current_chunk: - chunks.append("\n".join(current_chunk)) - + # 如果段落超过最大长度,则按最大长度切分 + for i in range(0, para_length, max_length): + chunks.append(para[i:i + max_length]) + return chunks def get_embedding(self, text: str) -> list: From 028a08703493dbd6d3d9c92bebd9b4d516ea4358 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Apr 2025 18:57:41 +0800 Subject: [PATCH 28/51] =?UTF-8?q?fix=EF=BC=9A=E5=BD=BB=E5=BA=95=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E8=A1=A8=E6=83=85=E5=8C=85=E6=89=93=E5=AD=97=E6=97=B6?= =?UTF-8?q?=E9=97=B4=E7=82=B8=E8=A3=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/message_sender.py | 10 ++++++- src/plugins/chat/utils.py | 26 +++++++++---------- .../think_flow_chat/think_flow_chat.py | 6 +++-- 3 files changed, 26 insertions(+), 16 deletions(-) diff --git a/src/plugins/chat/message_sender.py b/src/plugins/chat/message_sender.py index 9621819cc..566fe295e 100644 --- a/src/plugins/chat/message_sender.py +++ b/src/plugins/chat/message_sender.py @@ -64,8 +64,14 @@ class Message_Sender: logger.warning(f"消息“{message.processed_plain_text}”已被撤回,不发送") break if not is_recalled: - typing_time = calculate_typing_time(message.processed_plain_text,message.is_emoji) + # print(message.processed_plain_text + str(message.is_emoji)) + typing_time = calculate_typing_time( + input_string=message.processed_plain_text, + thinking_start_time=message.thinking_start_time, + is_emoji=message.is_emoji) + logger.debug(f"{message.processed_plain_text},{typing_time},计算输入时间结束") await asyncio.sleep(typing_time) + logger.debug(f"{message.processed_plain_text},{typing_time},等待输入时间结束") message_json = message.to_dict() @@ -220,6 +226,8 @@ class MessageManager: await message_earliest.process() + # print(f"message_earliest.thinking_start_tim22222e:{message_earliest.thinking_start_time}") + await message_sender.send_message(message_earliest) await self.storage.store_message(message_earliest, message_earliest.chat_stream) diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py index 028335640..26bd3a171 100644 --- a/src/plugins/chat/utils.py +++ b/src/plugins/chat/utils.py @@ -334,7 +334,7 @@ def process_llm_response(text: str) -> List[str]: return sentences -def calculate_typing_time(input_string: str, chinese_time: float = 0.2, english_time: float = 0.1, is_emoji: bool = False) -> float: +def calculate_typing_time(input_string: str, thinking_start_time: float, chinese_time: float = 0.2, english_time: float = 0.1, is_emoji: bool = False) -> float: """ 计算输入字符串所需的时间,中文和英文字符有不同的输入时间 input_string (str): 输入的字符串 @@ -347,15 +347,6 @@ def calculate_typing_time(input_string: str, chinese_time: float = 0.2, english_ - 在所有输入结束后,额外加上回车时间0.3秒 - 如果is_emoji为True,将使用固定1秒的输入时间 """ - - # 如果输入是列表,将其连接成字符串 - if isinstance(input_string, list): - input_string = ''.join(input_string) - - # 确保现在是字符串类型 - if not isinstance(input_string, str): - input_string = str(input_string) - mood_manager = MoodManager.get_instance() # 将0-1的唤醒度映射到-1到1 mood_arousal = mood_manager.current_mood.arousal @@ -378,10 +369,19 @@ def calculate_typing_time(input_string: str, chinese_time: float = 0.2, english_ else: # 其他字符(如英文) total_time += english_time - if is_emoji: - total_time = 0.7 - return total_time + 0.3 # 加上回车时间 + if is_emoji: + total_time = 1 + + if time.time() - thinking_start_time > 10: + total_time = 1 + + # print(f"thinking_start_time:{thinking_start_time}") + # print(f"nowtime:{time.time()}") + # print(f"nowtime - thinking_start_time:{time.time() - thinking_start_time}") + # print(f"{total_time}") + + return total_time # 加上回车时间 def cosine_similarity(v1, v2): diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py index c5ab77b6d..5bb11b53a 100644 --- a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py +++ b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py @@ -95,6 +95,8 @@ class ThinkFlowChat: ) if not mark_head: mark_head = True + + # print(f"thinking_start_time:{bot_message.thinking_start_time}") message_set.add_message(bot_message) message_manager.add_message(message_set) @@ -272,11 +274,11 @@ class ThinkFlowChat: timer2 = time.time() timing_results["发送消息"] = timer2 - timer1 - # 处理表情包 + # 发送表情包 timer1 = time.time() await self._handle_emoji(message, chat, response_set) timer2 = time.time() - timing_results["处理表情包"] = timer2 - timer1 + timing_results["发送表情包"] = timer2 - timer1 # 更新心流 timer1 = time.time() From 6c08fed7017e72cbd7f96647dfd69045946463b4 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Apr 2025 19:02:29 +0800 Subject: [PATCH 29/51] =?UTF-8?q?fix=EF=BC=9A=E8=A7=A3=E5=86=B3ban=5Fuser?= =?UTF-8?q?=E5=A4=B1=E6=95=88=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/bot.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py index cfdfbdb32..32308bfa9 100644 --- a/src/plugins/chat/bot.py +++ b/src/plugins/chat/bot.py @@ -91,8 +91,13 @@ class ChatBot: try: message = MessageRecv(message_data) groupinfo = message.message_info.group_info - logger.debug(f"处理消息:{str(message_data)[:50]}...") + userinfo = message.message_info.user_info + logger.debug(f"处理消息:{str(message_data)[:80]}...") + if userinfo.user_id in global_config.ban_user_id: + logger.debug(f"用户{userinfo.user_id}被禁止回复") + return + if global_config.enable_pfc_chatting: try: if groupinfo is None and global_config.enable_friend_chat: From 94e1c89dcb1e0c566d2e6b38c288123d5b61485e Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Apr 2025 19:10:51 +0800 Subject: [PATCH 30/51] =?UTF-8?q?fix=20=E5=A2=9E=E5=8A=A0MongoDB=20SRV?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E6=95=B0=E6=8D=AE=E5=BA=93URI=E6=94=AF?= =?UTF-8?q?=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/database.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/common/database.py b/src/common/database.py index a3e5b4e3b..ee0ead0bd 100644 --- a/src/common/database.py +++ b/src/common/database.py @@ -15,9 +15,16 @@ def __create_database_instance(): password = os.getenv("MONGODB_PASSWORD") auth_source = os.getenv("MONGODB_AUTH_SOURCE") - if uri and uri.startswith("mongodb://"): - # 优先使用URI连接 - return MongoClient(uri) + if uri: + # 支持标准mongodb://和mongodb+srv://连接字符串 + if uri.startswith(("mongodb://", "mongodb+srv://")): + return MongoClient(uri) + else: + raise ValueError( + "Invalid MongoDB URI format. URI must start with 'mongodb://' or 'mongodb+srv://'. " + "For MongoDB Atlas, use 'mongodb+srv://' format. " + "See: https://www.mongodb.com/docs/manual/reference/connection-string/" + ) if username and password: # 如果有用户名和密码,使用认证连接 From 848f5c53fc5c849ba0e77d45c55cfd2764eace65 Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Sat, 5 Apr 2025 20:39:06 +0800 Subject: [PATCH 31/51] =?UTF-8?q?=E8=B0=83=E6=95=B4=E5=8F=82=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/person_info/person_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py index 543fdb3ee..0bf15e177 100644 --- a/src/plugins/person_info/person_info.py +++ b/src/plugins/person_info/person_info.py @@ -276,7 +276,7 @@ class PersonInfoManager: filtered_intervals = [t for t in time_interval if t >= 500] if len(filtered_intervals) > 25: - msg_interval = int(round(numpy.percentile(filtered_intervals, 90))) + msg_interval = int(round(numpy.percentile(filtered_intervals, 80))) await self.update_one_field(person_id, "msg_interval", msg_interval) logger.debug(f"用户{person_id}的msg_interval已经被更新为{msg_interval}") except Exception as e: From 8cc4e1994753988783f5e935bd534b82c88f3437 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Apr 2025 21:00:54 +0800 Subject: [PATCH 32/51] =?UTF-8?q?feat=EF=BC=9A=E4=B8=BA=E5=BF=83=E6=B5=81?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E7=9F=A5=E8=AF=86=E5=92=8C=E7=9F=A5=E8=AF=86?= =?UTF-8?q?=E7=BC=93=E5=AD=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/heart_flow/sub_heartflow.py | 300 ++++++++++++++++-- .../reasoning_prompt_builder.py | 44 +-- .../think_flow_chat/think_flow_chat.py | 121 ++++--- 3 files changed, 360 insertions(+), 105 deletions(-) diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py index fcbe9332f..1312b7aae 100644 --- a/src/heart_flow/sub_heartflow.py +++ b/src/heart_flow/sub_heartflow.py @@ -8,6 +8,9 @@ import time from src.plugins.schedule.schedule_generator import bot_schedule from src.plugins.memory_system.Hippocampus import HippocampusManager from src.common.logger import get_module_logger, LogConfig, SUB_HEARTFLOW_STYLE_CONFIG # noqa: E402 +from src.plugins.chat.utils import get_embedding +from src.common.database import db +from typing import Union subheartflow_config = LogConfig( # 使用海马体专用样式 @@ -53,6 +56,8 @@ class SubHeartflow: self.is_active = False self.observations: list[Observation] = [] + + self.running_knowledges = [] def add_observation(self, observation: Observation): """添加一个新的observation对象到列表中,如果已存在相同id的observation则不添加""" @@ -98,49 +103,49 @@ class SubHeartflow: logger.info(f"子心流 {self.subheartflow_id} 已经5分钟没有激活,正在销毁...") break # 退出循环,销毁自己 - async def do_a_thinking(self): - current_thinking_info = self.current_mind - mood_info = self.current_state.mood + # async def do_a_thinking(self): + # current_thinking_info = self.current_mind + # mood_info = self.current_state.mood - observation = self.observations[0] - chat_observe_info = observation.observe_info - # print(f"chat_observe_info:{chat_observe_info}") + # observation = self.observations[0] + # chat_observe_info = observation.observe_info + # # print(f"chat_observe_info:{chat_observe_info}") - # 调取记忆 - related_memory = await HippocampusManager.get_instance().get_memory_from_text( - text=chat_observe_info, max_memory_num=2, max_memory_length=2, max_depth=3, fast_retrieval=False - ) + # # 调取记忆 + # related_memory = await HippocampusManager.get_instance().get_memory_from_text( + # text=chat_observe_info, max_memory_num=2, max_memory_length=2, max_depth=3, fast_retrieval=False + # ) - if related_memory: - related_memory_info = "" - for memory in related_memory: - related_memory_info += memory[1] - else: - related_memory_info = "" + # if related_memory: + # related_memory_info = "" + # for memory in related_memory: + # related_memory_info += memory[1] + # else: + # related_memory_info = "" - # print(f"相关记忆:{related_memory_info}") + # # print(f"相关记忆:{related_memory_info}") - schedule_info = bot_schedule.get_current_num_task(num=1, time_info=False) + # schedule_info = bot_schedule.get_current_num_task(num=1, time_info=False) - prompt = "" - prompt += f"你刚刚在做的事情是:{schedule_info}\n" - # prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n" - prompt += f"你{self.personality_info}\n" - if related_memory_info: - prompt += f"你想起来你之前见过的回忆:{related_memory_info}。\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n" - prompt += f"刚刚你的想法是{current_thinking_info}。\n" - prompt += "-----------------------------------\n" - prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n" - prompt += f"你现在{mood_info}\n" - prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长," - prompt += "但是记得结合上述的消息,要记得维持住你的人设,关注聊天和新内容,不要思考太多:" - reponse, reasoning_content = await self.llm_model.generate_response_async(prompt) + # prompt = "" + # prompt += f"你刚刚在做的事情是:{schedule_info}\n" + # # prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n" + # prompt += f"你{self.personality_info}\n" + # if related_memory_info: + # prompt += f"你想起来你之前见过的回忆:{related_memory_info}。\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n" + # prompt += f"刚刚你的想法是{current_thinking_info}。\n" + # prompt += "-----------------------------------\n" + # prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n" + # prompt += f"你现在{mood_info}\n" + # prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长," + # prompt += "但是记得结合上述的消息,要记得维持住你的人设,关注聊天和新内容,不要思考太多:" + # reponse, reasoning_content = await self.llm_model.generate_response_async(prompt) - self.update_current_mind(reponse) + # self.update_current_mind(reponse) - self.current_mind = reponse - logger.debug(f"prompt:\n{prompt}\n") - logger.info(f"麦麦的脑内状态:{self.current_mind}") + # self.current_mind = reponse + # logger.debug(f"prompt:\n{prompt}\n") + # logger.info(f"麦麦的脑内状态:{self.current_mind}") async def do_observe(self): observation = self.observations[0] @@ -166,6 +171,13 @@ class SubHeartflow: else: related_memory_info = "" + related_info,grouped_results = await self.get_prompt_info(chat_observe_info + message_txt, 0.4) + print(related_info) + for topic, results in grouped_results.items(): + for result in results: + print(result) + self.running_knowledges.append(result) + # print(f"相关记忆:{related_memory_info}") schedule_info = bot_schedule.get_current_num_task(num=1, time_info=False) @@ -176,6 +188,8 @@ class SubHeartflow: prompt += f"你刚刚在做的事情是:{schedule_info}\n" if related_memory_info: prompt += f"你想起来你之前见过的回忆:{related_memory_info}。\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n" + if related_info: + prompt += f"你想起你知道:{related_info}\n" prompt += f"刚刚你的想法是{current_thinking_info}。\n" prompt += "-----------------------------------\n" prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n" @@ -249,6 +263,222 @@ class SubHeartflow: def update_current_mind(self, reponse): self.past_mind.append(self.current_mind) self.current_mind = reponse + + + async def get_prompt_info(self, message: str, threshold: float): + start_time = time.time() + related_info = "" + logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}") + + # 1. 先从LLM获取主题,类似于记忆系统的做法 + topics = [] + # try: + # # 先尝试使用记忆系统的方法获取主题 + # hippocampus = HippocampusManager.get_instance()._hippocampus + # topic_num = min(5, max(1, int(len(message) * 0.1))) + # topics_response = await hippocampus.llm_topic_judge.generate_response(hippocampus.find_topic_llm(message, topic_num)) + + # # 提取关键词 + # topics = re.findall(r"<([^>]+)>", topics_response[0]) + # if not topics: + # topics = [] + # else: + # topics = [ + # topic.strip() + # for topic in ",".join(topics).replace(",", ",").replace("、", ",").replace(" ", ",").split(",") + # if topic.strip() + # ] + + # logger.info(f"从LLM提取的主题: {', '.join(topics)}") + # except Exception as e: + # logger.error(f"从LLM提取主题失败: {str(e)}") + # # 如果LLM提取失败,使用jieba分词提取关键词作为备选 + # words = jieba.cut(message) + # topics = [word for word in words if len(word) > 1][:5] + # logger.info(f"使用jieba提取的主题: {', '.join(topics)}") + + # 如果无法提取到主题,直接使用整个消息 + if not topics: + logger.info("未能提取到任何主题,使用整个消息进行查询") + embedding = await get_embedding(message, request_type="info_retrieval") + if not embedding: + logger.error("获取消息嵌入向量失败") + return "" + + related_info = self.get_info_from_db(embedding, limit=3, threshold=threshold) + logger.info(f"知识库检索完成,总耗时: {time.time() - start_time:.3f}秒") + return related_info, {} + + # 2. 对每个主题进行知识库查询 + logger.info(f"开始处理{len(topics)}个主题的知识库查询") + + # 优化:批量获取嵌入向量,减少API调用 + embeddings = {} + topics_batch = [topic for topic in topics if len(topic) > 0] + if message: # 确保消息非空 + topics_batch.append(message) + + # 批量获取嵌入向量 + embed_start_time = time.time() + for text in topics_batch: + if not text or len(text.strip()) == 0: + continue + + try: + embedding = await get_embedding(text, request_type="info_retrieval") + if embedding: + embeddings[text] = embedding + else: + logger.warning(f"获取'{text}'的嵌入向量失败") + except Exception as e: + logger.error(f"获取'{text}'的嵌入向量时发生错误: {str(e)}") + + logger.info(f"批量获取嵌入向量完成,耗时: {time.time() - embed_start_time:.3f}秒") + + if not embeddings: + logger.error("所有嵌入向量获取失败") + return "" + + # 3. 对每个主题进行知识库查询 + all_results = [] + query_start_time = time.time() + + # 首先添加原始消息的查询结果 + if message in embeddings: + original_results = self.get_info_from_db(embeddings[message], limit=3, threshold=threshold, return_raw=True) + if original_results: + for result in original_results: + result["topic"] = "原始消息" + all_results.extend(original_results) + logger.info(f"原始消息查询到{len(original_results)}条结果") + + # 然后添加每个主题的查询结果 + for topic in topics: + if not topic or topic not in embeddings: + continue + + try: + topic_results = self.get_info_from_db(embeddings[topic], limit=3, threshold=threshold, return_raw=True) + if topic_results: + # 添加主题标记 + for result in topic_results: + result["topic"] = topic + all_results.extend(topic_results) + logger.info(f"主题'{topic}'查询到{len(topic_results)}条结果") + except Exception as e: + logger.error(f"查询主题'{topic}'时发生错误: {str(e)}") + + logger.info(f"知识库查询完成,耗时: {time.time() - query_start_time:.3f}秒,共获取{len(all_results)}条结果") + + # 4. 去重和过滤 + process_start_time = time.time() + unique_contents = set() + filtered_results = [] + for result in all_results: + content = result["content"] + if content not in unique_contents: + unique_contents.add(content) + filtered_results.append(result) + + # 5. 按相似度排序 + filtered_results.sort(key=lambda x: x["similarity"], reverse=True) + + # 6. 限制总数量(最多10条) + filtered_results = filtered_results[:10] + logger.info(f"结果处理完成,耗时: {time.time() - process_start_time:.3f}秒,过滤后剩余{len(filtered_results)}条结果") + + # 7. 格式化输出 + if filtered_results: + format_start_time = time.time() + grouped_results = {} + for result in filtered_results: + topic = result["topic"] + if topic not in grouped_results: + grouped_results[topic] = [] + grouped_results[topic].append(result) + + # 按主题组织输出 + for topic, results in grouped_results.items(): + related_info += f"【主题: {topic}】\n" + for i, result in enumerate(results, 1): + similarity = result["similarity"] + content = result["content"].strip() + # 调试:为内容添加序号和相似度信息 + # related_info += f"{i}. [{similarity:.2f}] {content}\n" + related_info += f"{content}\n" + related_info += "\n" + + logger.info(f"格式化输出完成,耗时: {time.time() - format_start_time:.3f}秒") + + logger.info(f"知识库检索总耗时: {time.time() - start_time:.3f}秒") + return related_info,grouped_results + + def get_info_from_db(self, query_embedding: list, limit: int = 1, threshold: float = 0.5, return_raw: bool = False) -> Union[str, list]: + if not query_embedding: + return "" if not return_raw else [] + # 使用余弦相似度计算 + pipeline = [ + { + "$addFields": { + "dotProduct": { + "$reduce": { + "input": {"$range": [0, {"$size": "$embedding"}]}, + "initialValue": 0, + "in": { + "$add": [ + "$$value", + { + "$multiply": [ + {"$arrayElemAt": ["$embedding", "$$this"]}, + {"$arrayElemAt": [query_embedding, "$$this"]}, + ] + }, + ] + }, + } + }, + "magnitude1": { + "$sqrt": { + "$reduce": { + "input": "$embedding", + "initialValue": 0, + "in": {"$add": ["$$value", {"$multiply": ["$$this", "$$this"]}]}, + } + } + }, + "magnitude2": { + "$sqrt": { + "$reduce": { + "input": query_embedding, + "initialValue": 0, + "in": {"$add": ["$$value", {"$multiply": ["$$this", "$$this"]}]}, + } + } + }, + } + }, + {"$addFields": {"similarity": {"$divide": ["$dotProduct", {"$multiply": ["$magnitude1", "$magnitude2"]}]}}}, + { + "$match": { + "similarity": {"$gte": threshold} # 只保留相似度大于等于阈值的结果 + } + }, + {"$sort": {"similarity": -1}}, + {"$limit": limit}, + {"$project": {"content": 1, "similarity": 1}}, + ] + + results = list(db.knowledges.aggregate(pipeline)) + logger.debug(f"知识库查询结果数量: {len(results)}") + + if not results: + return "" if not return_raw else [] + + if return_raw: + return results + else: + # 返回所有找到的内容,用换行分隔 + return "\n".join(str(result["content"]) for result in results) # subheartflow = SubHeartflow() diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py b/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py index d9e2cf75b..87fc14045 100644 --- a/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py +++ b/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py @@ -167,30 +167,30 @@ class PromptBuilder: # 1. 先从LLM获取主题,类似于记忆系统的做法 topics = [] - try: - # 先尝试使用记忆系统的方法获取主题 - hippocampus = HippocampusManager.get_instance()._hippocampus - topic_num = min(5, max(1, int(len(message) * 0.1))) - topics_response = await hippocampus.llm_topic_judge.generate_response(hippocampus.find_topic_llm(message, topic_num)) + # try: + # # 先尝试使用记忆系统的方法获取主题 + # hippocampus = HippocampusManager.get_instance()._hippocampus + # topic_num = min(5, max(1, int(len(message) * 0.1))) + # topics_response = await hippocampus.llm_topic_judge.generate_response(hippocampus.find_topic_llm(message, topic_num)) - # 提取关键词 - topics = re.findall(r"<([^>]+)>", topics_response[0]) - if not topics: - topics = [] - else: - topics = [ - topic.strip() - for topic in ",".join(topics).replace(",", ",").replace("、", ",").replace(" ", ",").split(",") - if topic.strip() - ] + # # 提取关键词 + # topics = re.findall(r"<([^>]+)>", topics_response[0]) + # if not topics: + # topics = [] + # else: + # topics = [ + # topic.strip() + # for topic in ",".join(topics).replace(",", ",").replace("、", ",").replace(" ", ",").split(",") + # if topic.strip() + # ] - logger.info(f"从LLM提取的主题: {', '.join(topics)}") - except Exception as e: - logger.error(f"从LLM提取主题失败: {str(e)}") - # 如果LLM提取失败,使用jieba分词提取关键词作为备选 - words = jieba.cut(message) - topics = [word for word in words if len(word) > 1][:5] - logger.info(f"使用jieba提取的主题: {', '.join(topics)}") + # logger.info(f"从LLM提取的主题: {', '.join(topics)}") + # except Exception as e: + # logger.error(f"从LLM提取主题失败: {str(e)}") + # # 如果LLM提取失败,使用jieba分词提取关键词作为备选 + # words = jieba.cut(message) + # topics = [word for word in words if len(word) > 1][:5] + # logger.info(f"使用jieba提取的主题: {', '.join(topics)}") # 如果无法提取到主题,直接使用整个消息 if not topics: diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py index c5ab77b6d..725fd3f72 100644 --- a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py +++ b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py @@ -236,59 +236,84 @@ class ThinkFlowChat: do_reply = False if random() < reply_probability: - do_reply = True - - # 创建思考消息 - timer1 = time.time() - thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo) - timer2 = time.time() - timing_results["创建思考消息"] = timer2 - timer1 - - # 观察 - timer1 = time.time() - await heartflow.get_subheartflow(chat.stream_id).do_observe() - timer2 = time.time() - timing_results["观察"] = timer2 - timer1 - - # 思考前脑内状态 - timer1 = time.time() - await heartflow.get_subheartflow(chat.stream_id).do_thinking_before_reply(message.processed_plain_text) - timer2 = time.time() - timing_results["思考前脑内状态"] = timer2 - timer1 - - # 生成回复 - timer1 = time.time() - response_set = await self.gpt.generate_response(message) - timer2 = time.time() - timing_results["生成回复"] = timer2 - timer1 + try: + do_reply = True + + # 创建思考消息 + try: + timer1 = time.time() + thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo) + timer2 = time.time() + timing_results["创建思考消息"] = timer2 - timer1 + except Exception as e: + logger.error(f"心流创建思考消息失败: {e}") + + try: + # 观察 + timer1 = time.time() + await heartflow.get_subheartflow(chat.stream_id).do_observe() + timer2 = time.time() + timing_results["观察"] = timer2 - timer1 + except Exception as e: + logger.error(f"心流观察失败: {e}") - if not response_set: - logger.info("为什么生成回复失败?") - return + # 思考前脑内状态 + try: + timer1 = time.time() + await heartflow.get_subheartflow(chat.stream_id).do_thinking_before_reply(message.processed_plain_text) + timer2 = time.time() + timing_results["思考前脑内状态"] = timer2 - timer1 + except Exception as e: + logger.error(f"心流思考前脑内状态失败: {e}") + + # 生成回复 + timer1 = time.time() + response_set = await self.gpt.generate_response(message) + timer2 = time.time() + timing_results["生成回复"] = timer2 - timer1 - # 发送消息 - timer1 = time.time() - await self._send_response_messages(message, chat, response_set, thinking_id) - timer2 = time.time() - timing_results["发送消息"] = timer2 - timer1 + if not response_set: + logger.info("为什么生成回复失败?") + return - # 处理表情包 - timer1 = time.time() - await self._handle_emoji(message, chat, response_set) - timer2 = time.time() - timing_results["处理表情包"] = timer2 - timer1 + # 发送消息 + try: + timer1 = time.time() + await self._send_response_messages(message, chat, response_set, thinking_id) + timer2 = time.time() + timing_results["发送消息"] = timer2 - timer1 + except Exception as e: + logger.error(f"心流发送消息失败: {e}") - # 更新心流 - timer1 = time.time() - await self._update_using_response(message, response_set) - timer2 = time.time() - timing_results["更新心流"] = timer2 - timer1 + # 处理表情包 + try: + timer1 = time.time() + await self._handle_emoji(message, chat, response_set) + timer2 = time.time() + timing_results["处理表情包"] = timer2 - timer1 + except Exception as e: + logger.error(f"心流处理表情包失败: {e}") - # 更新关系情绪 - timer1 = time.time() - await self._update_relationship(message, response_set) - timer2 = time.time() - timing_results["更新关系情绪"] = timer2 - timer1 + # 更新心流 + try: + timer1 = time.time() + await self._update_using_response(message, response_set) + timer2 = time.time() + timing_results["更新心流"] = timer2 - timer1 + except Exception as e: + logger.error(f"心流更新失败: {e}") + + # 更新关系情绪 + try: + timer1 = time.time() + await self._update_relationship(message, response_set) + timer2 = time.time() + timing_results["更新关系情绪"] = timer2 - timer1 + except Exception as e: + logger.error(f"心流更新关系情绪失败: {e}") + + except Exception as e: + logger.error(f"心流处理消息失败: {e}") # 输出性能计时结果 if do_reply: From 4a439eb502af0b8c9d82d25cf172d668953ab5fd Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Apr 2025 21:07:48 +0800 Subject: [PATCH 33/51] Update changelog_dev.md --- changelogs/changelog_dev.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/changelogs/changelog_dev.md b/changelogs/changelog_dev.md index 3a5f9740f..663ad9629 100644 --- a/changelogs/changelog_dev.md +++ b/changelogs/changelog_dev.md @@ -3,6 +3,9 @@ ## [test-0.6.1-snapshot-1] - 2025-4-5 - 修复pfc回复出错bug - 修复表情包打字时间,不会卡表情包 +- 改进了知识库的提取 +- 提供了新的数据库连接方式 +- 修复了ban_user无效的问题 ## [test-0.6.0-snapshot-9] - 2025-4-4 - 可以识别gif表情包 From 1f1ab2697dda70865d62db910b3e59be2f631f34 Mon Sep 17 00:00:00 2001 From: meng_xi_pan <1903647908@qq.com> Date: Sat, 5 Apr 2025 21:20:29 +0800 Subject: [PATCH 34/51] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=B8=80=E4=B8=AArelat?= =?UTF-8?q?ion=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/moods/moods.py | 2 +- src/plugins/person_info/relationship_manager.py | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/plugins/moods/moods.py b/src/plugins/moods/moods.py index 98fd61952..3d3feadf1 100644 --- a/src/plugins/moods/moods.py +++ b/src/plugins/moods/moods.py @@ -237,7 +237,7 @@ class MoodManager: old_arousal = self.current_mood.arousal old_mood = self.current_mood.text - valence_change *= relationship_manager.gain_coefficient[relationship_manager.positive_feedback_value] + valence_change = relationship_manager.feedback_to_mood(valence_change) # 应用情绪强度 valence_change *= intensity diff --git a/src/plugins/person_info/relationship_manager.py b/src/plugins/person_info/relationship_manager.py index 707dbbe51..9bbcf4e19 100644 --- a/src/plugins/person_info/relationship_manager.py +++ b/src/plugins/person_info/relationship_manager.py @@ -63,7 +63,15 @@ class RelationshipManager: value += value * mood_gain logger.info(f"当前relationship增益系数:{mood_gain:.3f}") return value - + + def feedback_to_mood(self, mood_value): + """对情绪的反馈""" + coefficient = self.gain_coefficient[abs(self.positive_feedback_value)] + if (mood_value > 0 and self.positive_feedback_value > 0 + or mood_value < 0 and self.positive_feedback_value < 0): + return mood_value*coefficient + else: + return mood_value/coefficient async def calculate_update_relationship_value(self, chat_stream: ChatStream, label: str, stance: str) -> None: """计算并变更关系值 From 47aa37eecb964624b792fc700bb3976d3511c0cb Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Apr 2025 23:11:24 +0800 Subject: [PATCH 35/51] Update .gitignore --- .gitignore | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 34c7b1e28..d46fb033f 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ logs/ nonebot-maibot-adapter/ *.zip run.bat +run_none.bat run.py message_queue_content.txt message_queue_content.bat @@ -230,4 +231,6 @@ logs .vscode -/config/* \ No newline at end of file +/config/* +run_none.bat +config/old/bot_config_20250405_212257.toml From 0acad09dd7197c53afed53f6b6456e94a4fc3e7b Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Apr 2025 23:44:03 +0800 Subject: [PATCH 36/51] =?UTF-8?q?fix=EF=BC=9A=E4=BF=AE=E6=94=B9=E5=88=B00.?= =?UTF-8?q?6.1-2=EF=BC=8C=E5=87=86=E5=A4=87=E5=BC=80=E5=A7=8B=E4=BA=BA?= =?UTF-8?q?=E6=A0=BC2.0=EF=BC=8C=E7=9F=A5=E8=AF=86=E5=BA=93=E5=8E=9F?= =?UTF-8?q?=E5=A7=8B=E7=89=88=E6=9C=AC=E9=99=90=E6=97=B6=E8=BF=94=E5=9C=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/config/config.py | 13 +++++-- template/bot_config_template.toml | 2 +- 麦麦开始学习(测试临时版).bat | 56 +++++++++++++++++++++++++++++++ 3 files changed, 67 insertions(+), 4 deletions(-) create mode 100644 麦麦开始学习(测试临时版).bat diff --git a/src/plugins/config/config.py b/src/plugins/config/config.py index b1e6299d5..5b58f2d52 100644 --- a/src/plugins/config/config.py +++ b/src/plugins/config/config.py @@ -25,12 +25,19 @@ config_config = LogConfig( logger = get_module_logger("config", config=config_config) #考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码 +is_test = True mai_version_main = "0.6.1" -mai_version_fix = "snapshot-1" +mai_version_fix = "snapshot-2" if mai_version_fix: - mai_version = f"{mai_version_main}-{mai_version_fix}" + if is_test: + mai_version = f"test-{mai_version_main}-{mai_version_fix}" + else: + mai_version = f"{mai_version_main}-{mai_version_fix}" else: - mai_version = mai_version_main + if is_test: + mai_version = f"test-{mai_version_main}" + else: + mai_version = mai_version_main def update_config(): # 获取根目录路径 diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 1ab846203..d7ec90cd1 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "1.1.4" +version = "1.2.4" #以下是给开发人员阅读的,一般用户不需要阅读 diff --git a/麦麦开始学习(测试临时版).bat b/麦麦开始学习(测试临时版).bat new file mode 100644 index 000000000..f96d7cfdc --- /dev/null +++ b/麦麦开始学习(测试临时版).bat @@ -0,0 +1,56 @@ +@echo off +chcp 65001 > nul +setlocal enabledelayedexpansion +cd /d %~dp0 + +title 麦麦学习系统 + +cls +echo ====================================== +echo 警告提示 +echo ====================================== +echo 1.这是一个demo系统,不完善不稳定,仅用于体验/不要塞入过长过大的文本,这会导致信息提取迟缓 +echo ====================================== + +echo. +echo ====================================== +echo 请选择Python环境: +echo 1 - venv (推荐) +echo 2 - conda +echo ====================================== +choice /c 12 /n /m "请输入数字选择(1或2): " + +if errorlevel 2 ( + echo ====================================== + set "CONDA_ENV=" + set /p CONDA_ENV="请输入要激活的 conda 环境名称: " + + :: 检查输入是否为空 + if "!CONDA_ENV!"=="" ( + echo 错误:环境名称不能为空 + pause + exit /b 1 + ) + + call conda activate !CONDA_ENV! + if errorlevel 1 ( + echo 激活 conda 环境失败 + pause + exit /b 1 + ) + + echo Conda 环境 "!CONDA_ENV!" 激活成功 + python src/plugins/zhishi/knowledge_library.py +) else ( + if exist "venv\Scripts\python.exe" ( + venv\Scripts\python src/plugins/zhishi/knowledge_library.py + ) else ( + echo ====================================== + echo 错误: venv环境不存在,请先创建虚拟环境 + pause + exit /b 1 + ) +) + +endlocal +pause From 80753d95a5bc0c12792ef0c438ad1a75f99d88e1 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Apr 2025 00:58:46 +0800 Subject: [PATCH 37/51] =?UTF-8?q?move=EF=BC=9A=E4=BF=AE=E6=94=B9=E4=BA=BA?= =?UTF-8?q?=E6=A0=BC=E6=96=87=E4=BB=B6=E7=BB=93=E6=9E=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/personality/offline_llm.py | 123 ++++++++++++ src/personality/personality.py | 32 +++ src/personality/personality_gen.py | 186 ++++++++++++++++++ src/plugins/config/config.py | 59 ++++-- .../big5_test.py | 0 .../can_i_recog_u.py | 0 .../combined_test.py | 0 .../offline_llm.py | 0 .../questionnaire.py | 0 .../renqingziji.py | 0 .../renqingziji_with_mymy.py | 0 .../{personality => personality_s}/scene.py | 0 .../{personality => personality_s}/who_r_u.py | 0 .../{personality => personality_s}/看我.txt | 0 template/bot_config_template.toml | 29 ++- 15 files changed, 406 insertions(+), 23 deletions(-) create mode 100644 src/personality/offline_llm.py create mode 100644 src/personality/personality.py create mode 100644 src/personality/personality_gen.py rename src/plugins/{personality => personality_s}/big5_test.py (100%) rename src/plugins/{personality => personality_s}/can_i_recog_u.py (100%) rename src/plugins/{personality => personality_s}/combined_test.py (100%) rename src/plugins/{personality => personality_s}/offline_llm.py (100%) rename src/plugins/{personality => personality_s}/questionnaire.py (100%) rename src/plugins/{personality => personality_s}/renqingziji.py (100%) rename src/plugins/{personality => personality_s}/renqingziji_with_mymy.py (100%) rename src/plugins/{personality => personality_s}/scene.py (100%) rename src/plugins/{personality => personality_s}/who_r_u.py (100%) rename src/plugins/{personality => personality_s}/看我.txt (100%) diff --git a/src/personality/offline_llm.py b/src/personality/offline_llm.py new file mode 100644 index 000000000..8d6820651 --- /dev/null +++ b/src/personality/offline_llm.py @@ -0,0 +1,123 @@ +import asyncio +import os +import time +from typing import Tuple, Union + +import aiohttp +import requests +from src.common.logger import get_module_logger + +logger = get_module_logger("offline_llm") + + +class LLM_request_off: + def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-V3", **kwargs): + self.model_name = model_name + self.params = kwargs + self.api_key = os.getenv("SILICONFLOW_KEY") + self.base_url = os.getenv("SILICONFLOW_BASE_URL") + + if not self.api_key or not self.base_url: + raise ValueError("环境变量未正确加载:SILICONFLOW_KEY 或 SILICONFLOW_BASE_URL 未设置") + + logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url + + def generate_response(self, prompt: str) -> Union[str, Tuple[str, str]]: + """根据输入的提示生成模型的响应""" + headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"} + + # 构建请求体 + data = { + "model": self.model_name, + "messages": [{"role": "user", "content": prompt}], + "temperature": 0.5, + **self.params, + } + + # 发送请求到完整的 chat/completions 端点 + api_url = f"{self.base_url.rstrip('/')}/chat/completions" + logger.info(f"Request URL: {api_url}") # 记录请求的 URL + + max_retries = 3 + base_wait_time = 15 # 基础等待时间(秒) + + for retry in range(max_retries): + try: + response = requests.post(api_url, headers=headers, json=data) + + if response.status_code == 429: + wait_time = base_wait_time * (2**retry) # 指数退避 + logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...") + time.sleep(wait_time) + continue + + response.raise_for_status() # 检查其他响应状态 + + result = response.json() + if "choices" in result and len(result["choices"]) > 0: + content = result["choices"][0]["message"]["content"] + reasoning_content = result["choices"][0]["message"].get("reasoning_content", "") + return content, reasoning_content + return "没有返回结果", "" + + except Exception as e: + if retry < max_retries - 1: # 如果还有重试机会 + wait_time = base_wait_time * (2**retry) + logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}") + time.sleep(wait_time) + else: + logger.error(f"请求失败: {str(e)}") + return f"请求失败: {str(e)}", "" + + logger.error("达到最大重试次数,请求仍然失败") + return "达到最大重试次数,请求仍然失败", "" + + async def generate_response_async(self, prompt: str) -> Union[str, Tuple[str, str]]: + """异步方式根据输入的提示生成模型的响应""" + headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"} + + # 构建请求体 + data = { + "model": self.model_name, + "messages": [{"role": "user", "content": prompt}], + "temperature": 0.5, + **self.params, + } + + # 发送请求到完整的 chat/completions 端点 + api_url = f"{self.base_url.rstrip('/')}/chat/completions" + logger.info(f"Request URL: {api_url}") # 记录请求的 URL + + max_retries = 3 + base_wait_time = 15 + + async with aiohttp.ClientSession() as session: + for retry in range(max_retries): + try: + async with session.post(api_url, headers=headers, json=data) as response: + if response.status == 429: + wait_time = base_wait_time * (2**retry) # 指数退避 + logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...") + await asyncio.sleep(wait_time) + continue + + response.raise_for_status() # 检查其他响应状态 + + result = await response.json() + if "choices" in result and len(result["choices"]) > 0: + content = result["choices"][0]["message"]["content"] + reasoning_content = result["choices"][0]["message"].get("reasoning_content", "") + return content, reasoning_content + return "没有返回结果", "" + + except Exception as e: + if retry < max_retries - 1: # 如果还有重试机会 + wait_time = base_wait_time * (2**retry) + logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}") + await asyncio.sleep(wait_time) + else: + logger.error(f"请求失败: {str(e)}") + return f"请求失败: {str(e)}", "" + + logger.error("达到最大重试次数,请求仍然失败") + return "达到最大重试次数,请求仍然失败", "" diff --git a/src/personality/personality.py b/src/personality/personality.py new file mode 100644 index 000000000..3977743a5 --- /dev/null +++ b/src/personality/personality.py @@ -0,0 +1,32 @@ +from dataclasses import dataclass +from typing import Dict, List + +@dataclass +class Personality: + """人格特质类""" + openness: float # 开放性 + conscientiousness: float # 尽责性 + extraversion: float # 外向性 + agreeableness: float # 宜人性 + neuroticism: float # 神经质 + bot_nickname: str # 机器人昵称 + personality_core: str # 人格核心特点 + personality_detail: List[str] # 人格细节描述 + + def to_dict(self) -> Dict: + """将人格特质转换为字典格式""" + return { + "openness": self.openness, + "conscientiousness": self.conscientiousness, + "extraversion": self.extraversion, + "agreeableness": self.agreeableness, + "neuroticism": self.neuroticism, + "bot_nickname": self.bot_nickname, + "personality_core": self.personality_core, + "personality_detail": self.personality_detail + } + + @classmethod + def from_dict(cls, data: Dict) -> 'Personality': + """从字典创建人格特质实例""" + return cls(**data) \ No newline at end of file diff --git a/src/personality/personality_gen.py b/src/personality/personality_gen.py new file mode 100644 index 000000000..8eaf99db0 --- /dev/null +++ b/src/personality/personality_gen.py @@ -0,0 +1,186 @@ +import os +import json +import sys +from typing import Optional, List + +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) +from src.personality.offline_llm import LLM_request_off +from src.common.logger import get_module_logger +from src.personality.personality import Personality + +logger = get_module_logger("personality_gen") + +class PersonalityGenerator: + """人格生成器类""" + def __init__(self, bot_nickname: str): + self.bot_nickname = bot_nickname + self.llm = LLM_request_off() + self.personality: Optional[Personality] = None + self.save_path = os.path.join("data", "personality") + + # 确保保存目录存在 + os.makedirs(self.save_path, exist_ok=True) + + def personality_exists(self) -> bool: + """检查是否已存在该机器人的人格文件""" + file_path = os.path.join(self.save_path, f"{self.bot_nickname}_personality.per") + return os.path.exists(file_path) + + async def generate_personality( + self, + personality_core: str, + personality_detail: List[str], + height: int, + weight: int, + age: int, + gender: str, + appearance: str, + interests: List[str], + others: List[str] + ) -> Optional[Personality]: + """根据配置生成人格特质""" + # 检查是否已存在 + if self.personality_exists(): + logger.info(f"机器人 {self.bot_nickname} 的人格文件已存在,跳过生成") + return await self.load_personality() + + # 构建提示文本 + prompt = f"""你是一个心理学家,专职心理测量和大五人格研究。请根据以下信息分析并给出这个人的大五人格特质评分。 +每个特质的分数范围是0到1之间的小数,请确保返回标准的JSON格式。 + +机器人信息: +- 昵称:{self.bot_nickname} +- 性格核心的特质:{personality_core} +- 性格细节:{', '.join(personality_detail)} +- 身高:{height}cm +- 体重:{weight}kg +- 年龄:{age}岁 +- 性别:{gender} +- 外貌:{appearance} +- 兴趣爱好:{', '.join(interests)} +- 其他信息:{', '.join(others)} +请只返回如下JSON格式数据(不要包含任何其他文字): +{{ + "openness": 0.x, + "conscientiousness": 0.x, + "extraversion": 0.x, + "agreeableness": 0.x, + "neuroticism": 0.x +}}""" + + response, _ = await self.llm.generate_response_async(prompt) + try: + # 尝试清理响应文本,只保留JSON部分 + json_str = response.strip() + if "```json" in json_str: + json_str = json_str.split("```json")[1].split("```")[0].strip() + elif "```" in json_str: + json_str = json_str.split("```")[1].strip() + + traits = json.loads(json_str) + + # 验证所有必需的字段是否存在 + required_fields = ["openness", "conscientiousness", "extraversion", "agreeableness", "neuroticism"] + if not all(field in traits for field in required_fields): + raise ValueError("缺少必需的人格特质字段") + + # 验证数值是否在合理范围内 + for field in required_fields: + if not 0 <= traits[field] <= 1: + traits[field] = max(0, min(traits[field], 1)) + + self.personality = Personality( + **traits, + bot_nickname=self.bot_nickname + ) + await self.save_personality() + return self.personality + + except json.JSONDecodeError as e: + logger.error(f"JSON解析失败: {e}\n响应内容: {response}") + raise + except Exception as e: + logger.error(f"生成人格特质失败: {e}") + raise + + async def save_personality(self) -> None: + """保存人格特质到文件""" + if not self.personality: + raise ValueError("没有可保存的人格特质") + + file_path = os.path.join(self.save_path, f"{self.bot_nickname}_personality.per") + try: + with open(file_path, 'w', encoding='utf-8') as f: + json.dump(self.personality.to_dict(), f, ensure_ascii=False, indent=4) + logger.info(f"人格特质已保存到: {file_path}") + except Exception as e: + logger.error(f"保存人格特质失败: {e}") + raise + + async def load_personality(self) -> Optional[Personality]: + """从文件加载人格特质""" + file_path = os.path.join(self.save_path, f"{self.bot_nickname}_personality.per") + try: + if os.path.exists(file_path): + with open(file_path, 'r', encoding='utf-8') as f: + data = json.load(f) + self.personality = Personality.from_dict(data) + return self.personality + except Exception as e: + logger.error(f"加载人格特质失败: {e}") + return None + +async def main(): + """主函数,用于测试人格生成""" + # 创建人格生成器实例 + generator = PersonalityGenerator("麦麦") + + # 生成或加载人格 + personality = await generator.generate_personality( + personality_core="对世界抱着善意和好奇,愿意尝试新奇事物", + personality_detail=[ + "你会刷小红书", + "你会刷贴吧", + "学习心理学和脑科学", + "你会刷b站,对ACG文化感兴趣", + "有时候有些搞怪", + ], + height=160, + weight=45, + age=20, + gender="女", + appearance="有着橙色短发", + interests=["摄影", "绘画"], + others=["是一个大二女大学生"] + ) + + if personality: + logger.info("人格特质生成成功:") + logger.info(f"开放性: {personality.openness}") + logger.info(f"尽责性: {personality.conscientiousness}") + logger.info(f"外向性: {personality.extraversion}") + logger.info(f"宜人性: {personality.agreeableness}") + logger.info(f"神经质: {personality.neuroticism}") + else: + logger.error("人格特质生成失败") + +if __name__ == "__main__": + import asyncio + import platform + + if platform.system() == 'Windows': + # Windows平台特殊处理 + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + + try: + loop = asyncio.get_event_loop() + loop.run_until_complete(main()) + finally: + # 确保所有待处理的任务都完成 + pending = asyncio.all_tasks(loop) + for task in pending: + task.cancel() + + # 运行一次以处理取消的任务 + loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True)) + loop.close() diff --git a/src/plugins/config/config.py b/src/plugins/config/config.py index 5b58f2d52..83314c082 100644 --- a/src/plugins/config/config.py +++ b/src/plugins/config/config.py @@ -148,14 +148,36 @@ class BotConfig: ban_user_id = set() # personality - PROMPT_PERSONALITY = [ - "用一句话或几句话描述性格特点和其他特征", - "例如,是一个热爱国家热爱党的新时代好青年", - "例如,曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧", - ] - PERSONALITY_1: float = 0.6 # 第一种人格概率 - PERSONALITY_2: float = 0.3 # 第二种人格概率 - PERSONALITY_3: float = 0.1 # 第三种人格概率 + personality_core = "用一句话或几句话描述人格的核心特点" # 建议20字以内,谁再写3000字小作文敲谁脑袋 + personality_detail: List[str] = field(default_factory=lambda: [ + "用一句话或几句话描述人格的一些细节", + "用一句话或几句话描述人格的一些细节", + "用一句话或几句话描述人格的一些细节", + "用一句话或几句话描述人格的一些细节", + "用一句话或几句话描述人格的一些细节" + ]) + + traits: List[str] = field(default_factory=lambda: [ + "用一个词描述性格", + "用一个词描述性格", + "用一个词描述性格", + ]) + + # identity + identity_detail: List[str] = field(default_factory=lambda: [ + "身份特点", + "身份特点", + ]) + height: int = 170 # 身高 单位厘米 + weight: int = 50 # 体重 单位千克 + age: int = 20 # 年龄 单位岁 + gender: str = "男" # 性别 + appearance: str = "用几句话描述外貌特征" # 外貌特征 + interests: List[str] = field(default_factory=lambda: [ + "兴趣爱好1", + "兴趣爱好2", + "兴趣爱好3" + ]) # schedule ENABLE_SCHEDULE_GEN: bool = False # 是否启用日程生成 @@ -347,14 +369,20 @@ class BotConfig: def personality(parent: dict): personality_config = parent["personality"] - personality = personality_config.get("prompt_personality") - if len(personality) >= 2: - logger.info(f"载入自定义人格:{personality}") - config.PROMPT_PERSONALITY = personality_config.get("prompt_personality", config.PROMPT_PERSONALITY) + if config.INNER_VERSION in SpecifierSet(">=1.2.4"): + config.personality_core = personality_config.get("personality_core", config.personality_core) + config.personality_detail = personality_config.get("personality_detail", config.personality_detail) - config.PERSONALITY_1 = personality_config.get("personality_1_probability", config.PERSONALITY_1) - config.PERSONALITY_2 = personality_config.get("personality_2_probability", config.PERSONALITY_2) - config.PERSONALITY_3 = personality_config.get("personality_3_probability", config.PERSONALITY_3) + def identity(parent: dict): + identity_config = parent["identity"] + if config.INNER_VERSION in SpecifierSet(">=1.2.4"): + config.identity_detail = identity_config.get("identity_detail", config.identity_detail) + config.height = identity_config.get("height", config.height) + config.weight = identity_config.get("weight", config.weight) + config.age = identity_config.get("age", config.age) + config.gender = identity_config.get("gender", config.gender) + config.appearance = identity_config.get("appearance", config.appearance) + config.interests = identity_config.get("interests", config.interests) def schedule(parent: dict): schedule_config = parent["schedule"] @@ -611,6 +639,7 @@ class BotConfig: "bot": {"func": bot, "support": ">=0.0.0"}, "groups": {"func": groups, "support": ">=0.0.0"}, "personality": {"func": personality, "support": ">=0.0.0"}, + "identity": {"func": identity, "support": ">=1.2.4"}, "schedule": {"func": schedule, "support": ">=0.0.11", "necessary": False}, "message": {"func": message, "support": ">=0.0.0"}, "willing": {"func": willing, "support": ">=0.0.9", "necessary": False}, diff --git a/src/plugins/personality/big5_test.py b/src/plugins/personality_s/big5_test.py similarity index 100% rename from src/plugins/personality/big5_test.py rename to src/plugins/personality_s/big5_test.py diff --git a/src/plugins/personality/can_i_recog_u.py b/src/plugins/personality_s/can_i_recog_u.py similarity index 100% rename from src/plugins/personality/can_i_recog_u.py rename to src/plugins/personality_s/can_i_recog_u.py diff --git a/src/plugins/personality/combined_test.py b/src/plugins/personality_s/combined_test.py similarity index 100% rename from src/plugins/personality/combined_test.py rename to src/plugins/personality_s/combined_test.py diff --git a/src/plugins/personality/offline_llm.py b/src/plugins/personality_s/offline_llm.py similarity index 100% rename from src/plugins/personality/offline_llm.py rename to src/plugins/personality_s/offline_llm.py diff --git a/src/plugins/personality/questionnaire.py b/src/plugins/personality_s/questionnaire.py similarity index 100% rename from src/plugins/personality/questionnaire.py rename to src/plugins/personality_s/questionnaire.py diff --git a/src/plugins/personality/renqingziji.py b/src/plugins/personality_s/renqingziji.py similarity index 100% rename from src/plugins/personality/renqingziji.py rename to src/plugins/personality_s/renqingziji.py diff --git a/src/plugins/personality/renqingziji_with_mymy.py b/src/plugins/personality_s/renqingziji_with_mymy.py similarity index 100% rename from src/plugins/personality/renqingziji_with_mymy.py rename to src/plugins/personality_s/renqingziji_with_mymy.py diff --git a/src/plugins/personality/scene.py b/src/plugins/personality_s/scene.py similarity index 100% rename from src/plugins/personality/scene.py rename to src/plugins/personality_s/scene.py diff --git a/src/plugins/personality/who_r_u.py b/src/plugins/personality_s/who_r_u.py similarity index 100% rename from src/plugins/personality/who_r_u.py rename to src/plugins/personality_s/who_r_u.py diff --git a/src/plugins/personality/看我.txt b/src/plugins/personality_s/看我.txt similarity index 100% rename from src/plugins/personality/看我.txt rename to src/plugins/personality_s/看我.txt diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index d7ec90cd1..c40c03dfd 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -34,14 +34,27 @@ talk_frequency_down = [] #降低回复频率的群号码 ban_user_id = [] #禁止回复和读取消息的QQ号 [personality] -prompt_personality = [ - "用一句话或几句话描述性格特点和其他特征", - "例如,是一个热爱国家热爱党的新时代好青年", - "例如,曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧" - ] -personality_1_probability = 0.7 # 第一种人格出现概率 -personality_2_probability = 0.2 # 第二种人格出现概率,可以为0 -personality_3_probability = 0.1 # 第三种人格出现概率,请确保三个概率相加等于1 +personality_core = "用一句话或几句话描述人格的核心特点" # 建议20字以内,谁再写3000字小作文敲谁脑袋 +personality_detail = [ + "用一句话或几句话描述人格的一些细节", + "用一句话或几句话描述人格的一些细节", + "用一句话或几句话描述人格的一些细节", + "用一句话或几句话描述人格的一些细节", + "用一句话或几句话描述人格的一些细节", +]# 条数任意 + +[identity] #アイデンティティがない 生まれないらららら +# 兴趣爱好 +identity_detail = [ + "身份特点", + "身份特点", +]# 条数任意 +#外貌特征 +height = 170 # 身高 单位厘米 +weight = 50 # 体重 单位千克 +age = 20 # 年龄 单位岁 +gender = "男" # 性别 +appearance = "用几句话描述外貌特征" # 外貌特征 [schedule] enable_schedule_gen = true # 是否启用日程表(尚未完成) From f3dfee22162cfc05559cbf5be28243672e1e4889 Mon Sep 17 00:00:00 2001 From: meng_xi_pan Date: Sun, 6 Apr 2025 02:42:03 +0800 Subject: [PATCH 38/51] =?UTF-8?q?group=5Finfo=E4=B8=BA=E7=A9=BA=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/message_buffer.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py index 1d0291a1c..a87ed4e9d 100644 --- a/src/plugins/chat/message_buffer.py +++ b/src/plugins/chat/message_buffer.py @@ -3,7 +3,7 @@ from src.common.logger import get_module_logger import asyncio from dataclasses import dataclass, field from .message import MessageRecv -from ..message.message_base import BaseMessageInfo +from ..message.message_base import BaseMessageInfo, GroupInfo import hashlib from typing import Dict from collections import OrderedDict @@ -25,9 +25,12 @@ class MessageBuffer: self.buffer_pool: Dict[str, OrderedDict[str, CacheMessages]] = {} self.lock = asyncio.Lock() - def get_person_id_(self, platform:str, user_id:str, group_id:str): + def get_person_id_(self, platform:str, user_id:str, group_info:GroupInfo): """获取唯一id""" - group_id = group_id or "私聊" + if group_info: + group_id = group_info.group_id + else: + group_id = "私聊" key = f"{platform}_{user_id}_{group_id}" return hashlib.md5(key.encode()).hexdigest() @@ -40,7 +43,7 @@ class MessageBuffer: return person_id_ = self.get_person_id_(message.message_info.platform, message.message_info.user_info.user_id, - message.message_info.group_info.group_id) + message.message_info.group_info) async with self.lock: if person_id_ not in self.buffer_pool: @@ -108,7 +111,7 @@ class MessageBuffer: return True person_id_ = self.get_person_id_(message.message_info.platform, message.message_info.user_info.user_id, - message.message_info.group_info.group_id) + message.message_info.group_info) async with self.lock: From 594c30ab7402ce1bd0fee70dc760aaad222ae485 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Apr 2025 11:00:59 +0800 Subject: [PATCH 39/51] =?UTF-8?q?feat=EF=BC=9A=E6=96=B0=E7=9A=84=E4=BA=BA?= =?UTF-8?q?=E6=A0=BC=E7=BB=93=E6=9E=84=EF=BC=8C=E5=AE=8C=E6=88=9020%?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/heart_flow/heartflow.py | 45 ++- src/heart_flow/observation.py | 28 +- src/heart_flow/sub_heartflow.py | 65 ++++- src/individuality/identity.py | 95 +++++++ src/individuality/individuality.py | 77 ++++++ .../offline_llm.py | 0 src/individuality/personality.py | 119 ++++++++ .../renqingziji_with_mymy.py | 0 src/individuality/scene.py | 40 +++ src/individuality/template_scene.json | 112 ++++++++ src/main.py | 18 +- src/personality/personality.py | 32 --- src/personality/personality_gen.py | 186 ------------- .../reasoning_prompt_builder.py | 35 +-- .../think_flow_prompt_builder.py | 98 +------ src/plugins/config/config.py | 25 +- src/plugins/personality_s/scene.py | 261 ------------------ src/plugins/schedule/schedule_generator.py | 4 +- template/bot_config_template.toml | 2 +- 从0.6.0升级0.6.1请先看我.txt | 1 + 20 files changed, 629 insertions(+), 614 deletions(-) create mode 100644 src/individuality/identity.py create mode 100644 src/individuality/individuality.py rename src/{personality => individuality}/offline_llm.py (100%) create mode 100644 src/individuality/personality.py rename src/{plugins/personality_s => individuality}/renqingziji_with_mymy.py (100%) create mode 100644 src/individuality/scene.py create mode 100644 src/individuality/template_scene.json delete mode 100644 src/personality/personality.py delete mode 100644 src/personality/personality_gen.py delete mode 100644 src/plugins/personality_s/scene.py create mode 100644 从0.6.0升级0.6.1请先看我.txt diff --git a/src/heart_flow/heartflow.py b/src/heart_flow/heartflow.py index 2d0326384..8f582fb07 100644 --- a/src/heart_flow/heartflow.py +++ b/src/heart_flow/heartflow.py @@ -6,7 +6,9 @@ from src.plugins.config.config import global_config from src.plugins.schedule.schedule_generator import bot_schedule import asyncio from src.common.logger import get_module_logger, LogConfig, HEARTFLOW_STYLE_CONFIG # noqa: E402 +from src.individuality.individuality import Individuality import time +import random heartflow_config = LogConfig( # 使用海马体专用样式 @@ -40,7 +42,6 @@ class Heartflow: self._subheartflows = {} self.active_subheartflows_nums = 0 - self.personality_info = " ".join(global_config.PROMPT_PERSONALITY) async def _cleanup_inactive_subheartflows(self): """定期清理不活跃的子心流""" @@ -81,7 +82,27 @@ class Heartflow: logger.debug("麦麦大脑袋转起来了") self.current_state.update_current_state_info() - personality_info = self.personality_info + # 开始构建prompt + prompt_personality = "你" + #person + individuality = Individuality.get_instance() + + personality_core = individuality.personality.personality_core + prompt_personality += personality_core + + personality_sides = individuality.personality.personality_sides + random.shuffle(personality_sides) + prompt_personality += f",{personality_sides[0]}" + + identity_detail = individuality.identity.identity_detail + random.shuffle(identity_detail) + prompt_personality += f",{identity_detail[0]}" + + + + personality_info = prompt_personality + + current_thinking_info = self.current_mind mood_info = self.current_state.mood related_memory_info = "memory" @@ -123,7 +144,25 @@ class Heartflow: return await self.minds_summary(sub_minds) async def minds_summary(self, minds_str): - personality_info = self.personality_info + # 开始构建prompt + prompt_personality = "你" + #person + individuality = Individuality.get_instance() + + personality_core = individuality.personality.personality_core + prompt_personality += personality_core + + personality_sides = individuality.personality.personality_sides + random.shuffle(personality_sides) + prompt_personality += f",{personality_sides[0]}" + + identity_detail = individuality.identity.identity_detail + random.shuffle(identity_detail) + prompt_personality += f",{identity_detail[0]}" + + + + personality_info = prompt_personality mood_info = self.current_state.mood prompt = "" diff --git a/src/heart_flow/observation.py b/src/heart_flow/observation.py index 09af33c41..f4a082a4e 100644 --- a/src/heart_flow/observation.py +++ b/src/heart_flow/observation.py @@ -4,7 +4,8 @@ from datetime import datetime from src.plugins.models.utils_model import LLM_request from src.plugins.config.config import global_config from src.common.database import db - +from src.individuality.individuality import Individuality +import random # 所有观察的基类 class Observation: @@ -24,7 +25,6 @@ class ChattingObservation(Observation): self.talking_message = [] self.talking_message_str = "" - self.personality_info = " ".join(global_config.PROMPT_PERSONALITY) self.name = global_config.BOT_NICKNAME self.nick_name = global_config.BOT_ALIAS_NAMES @@ -115,8 +115,30 @@ class ChattingObservation(Observation): async def update_talking_summary(self, new_messages_str): # 基于已经有的talking_summary,和新的talking_message,生成一个summary # print(f"更新聊天总结:{self.talking_summary}") + # 开始构建prompt + prompt_personality = "你" + #person + individuality = Individuality.get_instance() + + personality_core = individuality.personality.personality_core + prompt_personality += personality_core + + personality_sides = individuality.personality.personality_sides + random.shuffle(personality_sides) + prompt_personality += f",{personality_sides[0]}" + + identity_detail = individuality.identity.identity_detail + random.shuffle(identity_detail) + prompt_personality += f",{identity_detail[0]}" + + + + personality_info = prompt_personality + + + prompt = "" - prompt += f"你{self.personality_info},请注意识别你自己的聊天发言" + prompt += f"{personality_info},请注意识别你自己的聊天发言" prompt += f"你的名字叫:{self.name},你的昵称是:{self.nick_name}\n" prompt += f"你正在参与一个qq群聊的讨论,你记得这个群之前在聊的内容是:{self.observe_info}\n" prompt += f"现在群里的群友们产生了新的讨论,有了新的发言,具体内容如下:{new_messages_str}\n" diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py index 1312b7aae..b6a0fb30e 100644 --- a/src/heart_flow/sub_heartflow.py +++ b/src/heart_flow/sub_heartflow.py @@ -11,6 +11,8 @@ from src.common.logger import get_module_logger, LogConfig, SUB_HEARTFLOW_STYLE_ from src.plugins.chat.utils import get_embedding from src.common.database import db from typing import Union +from src.individuality.individuality import Individuality +import random subheartflow_config = LogConfig( # 使用海马体专用样式 @@ -51,7 +53,6 @@ class SubHeartflow: if not self.current_mind: self.current_mind = "你什么也没想" - self.personality_info = " ".join(global_config.PROMPT_PERSONALITY) self.is_active = False @@ -159,6 +160,25 @@ class SubHeartflow: chat_observe_info = observation.observe_info # print(f"chat_observe_info:{chat_observe_info}") + # 开始构建prompt + prompt_personality = "你" + #person + individuality = Individuality.get_instance() + + personality_core = individuality.personality.personality_core + prompt_personality += personality_core + + personality_sides = individuality.personality.personality_sides + random.shuffle(personality_sides) + prompt_personality += f",{personality_sides[0]}" + + identity_detail = individuality.identity.identity_detail + random.shuffle(identity_detail) + prompt_personality += f",{identity_detail[0]}" + + + + # 调取记忆 related_memory = await HippocampusManager.get_instance().get_memory_from_text( text=chat_observe_info, max_memory_num=2, max_memory_length=2, max_depth=3, fast_retrieval=False @@ -184,7 +204,7 @@ class SubHeartflow: prompt = "" # prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n" - prompt += f"你{self.personality_info}\n" + prompt += f"{prompt_personality}\n" prompt += f"你刚刚在做的事情是:{schedule_info}\n" if related_memory_info: prompt += f"你想起来你之前见过的回忆:{related_memory_info}。\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n" @@ -207,6 +227,25 @@ class SubHeartflow: async def do_thinking_after_reply(self, reply_content, chat_talking_prompt): # print("麦麦回复之后脑袋转起来了") + + # 开始构建prompt + prompt_personality = "你" + #person + individuality = Individuality.get_instance() + + personality_core = individuality.personality.personality_core + prompt_personality += personality_core + + personality_sides = individuality.personality.personality_sides + random.shuffle(personality_sides) + prompt_personality += f",{personality_sides[0]}" + + identity_detail = individuality.identity.identity_detail + random.shuffle(identity_detail) + prompt_personality += f",{identity_detail[0]}" + + + current_thinking_info = self.current_mind mood_info = self.current_state.mood @@ -219,7 +258,7 @@ class SubHeartflow: prompt = "" # prompt += f"你现在正在做的事情是:{schedule_info}\n" - prompt += f"你{self.personality_info}\n" + prompt += f"{prompt_personality}\n" prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n" prompt += f"刚刚你的想法是{current_thinking_info}。" prompt += f"你现在看到了网友们发的新消息:{message_new_info}\n" @@ -238,12 +277,30 @@ class SubHeartflow: self.last_reply_time = time.time() async def judge_willing(self): + # 开始构建prompt + prompt_personality = "你" + #person + individuality = Individuality.get_instance() + + personality_core = individuality.personality.personality_core + prompt_personality += personality_core + + personality_sides = individuality.personality.personality_sides + random.shuffle(personality_sides) + prompt_personality += f",{personality_sides[0]}" + + identity_detail = individuality.identity.identity_detail + random.shuffle(identity_detail) + prompt_personality += f",{identity_detail[0]}" + + + # print("麦麦闹情绪了1") current_thinking_info = self.current_mind mood_info = self.current_state.mood # print("麦麦闹情绪了2") prompt = "" - prompt += f"{self.personality_info}\n" + prompt += f"{prompt_personality}\n" prompt += "现在你正在上网,和qq群里的网友们聊天" prompt += f"你现在的想法是{current_thinking_info}。" prompt += f"你现在{mood_info}。" diff --git a/src/individuality/identity.py b/src/individuality/identity.py new file mode 100644 index 000000000..287f9e5d5 --- /dev/null +++ b/src/individuality/identity.py @@ -0,0 +1,95 @@ +from dataclasses import dataclass +from typing import List + +@dataclass +class Identity: + """身份特征类""" + identity_detail: List[str] # 身份细节描述 + height: int # 身高(厘米) + weight: int # 体重(千克) + age: int # 年龄 + gender: str # 性别 + appearance: str # 外貌特征 + + _instance = None + + def __new__(cls, *args, **kwargs): + if cls._instance is None: + cls._instance = super().__new__(cls) + return cls._instance + + def __init__(self, identity_detail: List[str] = None, height: int = 0, weight: int = 0, + age: int = 0, gender: str = "", appearance: str = ""): + """初始化身份特征 + + Args: + identity_detail: 身份细节描述列表 + height: 身高(厘米) + weight: 体重(千克) + age: 年龄 + gender: 性别 + appearance: 外貌特征 + """ + if identity_detail is None: + identity_detail = [] + self.identity_detail = identity_detail + self.height = height + self.weight = weight + self.age = age + self.gender = gender + self.appearance = appearance + + @classmethod + def get_instance(cls) -> 'Identity': + """获取Identity单例实例 + + Returns: + Identity: 单例实例 + """ + if cls._instance is None: + cls._instance = cls() + return cls._instance + + @classmethod + def initialize(cls, identity_detail: List[str], height: int, weight: int, + age: int, gender: str, appearance: str) -> 'Identity': + """初始化身份特征 + + Args: + identity_detail: 身份细节描述列表 + height: 身高(厘米) + weight: 体重(千克) + age: 年龄 + gender: 性别 + appearance: 外貌特征 + + Returns: + Identity: 初始化后的身份特征实例 + """ + instance = cls.get_instance() + instance.identity_detail = identity_detail + instance.height = height + instance.weight = weight + instance.age = age + instance.gender = gender + instance.appearance = appearance + return instance + + def to_dict(self) -> dict: + """将身份特征转换为字典格式""" + return { + "identity_detail": self.identity_detail, + "height": self.height, + "weight": self.weight, + "age": self.age, + "gender": self.gender, + "appearance": self.appearance + } + + @classmethod + def from_dict(cls, data: dict) -> 'Identity': + """从字典创建身份特征实例""" + instance = cls.get_instance() + for key, value in data.items(): + setattr(instance, key, value) + return instance \ No newline at end of file diff --git a/src/individuality/individuality.py b/src/individuality/individuality.py new file mode 100644 index 000000000..899de62e7 --- /dev/null +++ b/src/individuality/individuality.py @@ -0,0 +1,77 @@ +from typing import Optional +from .personality import Personality +from .identity import Identity + +class Individuality: + """个体特征管理类""" + _instance = None + + def __new__(cls, *args, **kwargs): + if cls._instance is None: + cls._instance = super().__new__(cls) + return cls._instance + + def __init__(self): + self.personality: Optional[Personality] = None + self.identity: Optional[Identity] = None + + @classmethod + def get_instance(cls) -> 'Individuality': + """获取Individuality单例实例 + + Returns: + Individuality: 单例实例 + """ + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def initialize(self, bot_nickname: str, personality_core: str, personality_sides: list, + identity_detail: list, height: int, weight: int, age: int, + gender: str, appearance: str) -> None: + """初始化个体特征 + + Args: + bot_nickname: 机器人昵称 + personality_core: 人格核心特点 + personality_sides: 人格侧面描述 + identity_detail: 身份细节描述 + height: 身高(厘米) + weight: 体重(千克) + age: 年龄 + gender: 性别 + appearance: 外貌特征 + """ + # 初始化人格 + self.personality = Personality.initialize( + bot_nickname=bot_nickname, + personality_core=personality_core, + personality_sides=personality_sides + ) + + # 初始化身份 + self.identity = Identity.initialize( + identity_detail=identity_detail, + height=height, + weight=weight, + age=age, + gender=gender, + appearance=appearance + ) + + def to_dict(self) -> dict: + """将个体特征转换为字典格式""" + return { + "personality": self.personality.to_dict() if self.personality else None, + "identity": self.identity.to_dict() if self.identity else None + } + + @classmethod + def from_dict(cls, data: dict) -> 'Individuality': + """从字典创建个体特征实例""" + instance = cls.get_instance() + if data.get("personality"): + instance.personality = Personality.from_dict(data["personality"]) + if data.get("identity"): + instance.identity = Identity.from_dict(data["identity"]) + return instance \ No newline at end of file diff --git a/src/personality/offline_llm.py b/src/individuality/offline_llm.py similarity index 100% rename from src/personality/offline_llm.py rename to src/individuality/offline_llm.py diff --git a/src/individuality/personality.py b/src/individuality/personality.py new file mode 100644 index 000000000..19aa3c212 --- /dev/null +++ b/src/individuality/personality.py @@ -0,0 +1,119 @@ +from dataclasses import dataclass +from typing import Dict, List +import os +import json +from pathlib import Path + +@dataclass +class Personality: + """人格特质类""" + openness: float # 开放性 + conscientiousness: float # 尽责性 + extraversion: float # 外向性 + agreeableness: float # 宜人性 + neuroticism: float # 神经质 + bot_nickname: str # 机器人昵称 + personality_core: str # 人格核心特点 + personality_sides: List[str] # 人格侧面描述 + + _instance = None + + def __new__(cls, *args, **kwargs): + if cls._instance is None: + cls._instance = super().__new__(cls) + return cls._instance + + def __init__(self, personality_core: str = "", personality_sides: List[str] = None): + if personality_sides is None: + personality_sides = [] + self.personality_core = personality_core + self.personality_sides = personality_sides + + @classmethod + def get_instance(cls) -> 'Personality': + """获取Personality单例实例 + + Returns: + Personality: 单例实例 + """ + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def _init_big_five_personality(self): + """初始化大五人格特质""" + # 构建文件路径 + personality_file = Path("data/personality") / f"{self.bot_nickname}_personality.per" + + # 如果文件存在,读取文件 + if personality_file.exists(): + with open(personality_file, 'r', encoding='utf-8') as f: + personality_data = json.load(f) + self.openness = personality_data.get('openness', 0.5) + self.conscientiousness = personality_data.get('conscientiousness', 0.5) + self.extraversion = personality_data.get('extraversion', 0.5) + self.agreeableness = personality_data.get('agreeableness', 0.5) + self.neuroticism = personality_data.get('neuroticism', 0.5) + else: + # 如果文件不存在,根据personality_core和personality_core来设置大五人格特质 + if "活泼" in self.personality_core or "开朗" in self.personality_sides: + self.extraversion = 0.8 + self.neuroticism = 0.2 + else: + self.extraversion = 0.3 + self.neuroticism = 0.5 + + if "认真" in self.personality_core or "负责" in self.personality_sides: + self.conscientiousness = 0.9 + else: + self.conscientiousness = 0.5 + + if "友善" in self.personality_core or "温柔" in self.personality_sides: + self.agreeableness = 0.9 + else: + self.agreeableness = 0.5 + + if "创新" in self.personality_core or "开放" in self.personality_sides: + self.openness = 0.8 + else: + self.openness = 0.5 + + @classmethod + def initialize(cls, bot_nickname: str, personality_core: str, personality_sides: List[str]) -> 'Personality': + """初始化人格特质 + + Args: + bot_nickname: 机器人昵称 + personality_core: 人格核心特点 + personality_sides: 人格侧面描述 + + Returns: + Personality: 初始化后的人格特质实例 + """ + instance = cls.get_instance() + instance.bot_nickname = bot_nickname + instance.personality_core = personality_core + instance.personality_sides = personality_sides + instance._init_big_five_personality() + return instance + + def to_dict(self) -> Dict: + """将人格特质转换为字典格式""" + return { + "openness": self.openness, + "conscientiousness": self.conscientiousness, + "extraversion": self.extraversion, + "agreeableness": self.agreeableness, + "neuroticism": self.neuroticism, + "bot_nickname": self.bot_nickname, + "personality_core": self.personality_core, + "personality_sides": self.personality_sides + } + + @classmethod + def from_dict(cls, data: Dict) -> 'Personality': + """从字典创建人格特质实例""" + instance = cls.get_instance() + for key, value in data.items(): + setattr(instance, key, value) + return instance \ No newline at end of file diff --git a/src/plugins/personality_s/renqingziji_with_mymy.py b/src/individuality/renqingziji_with_mymy.py similarity index 100% rename from src/plugins/personality_s/renqingziji_with_mymy.py rename to src/individuality/renqingziji_with_mymy.py diff --git a/src/individuality/scene.py b/src/individuality/scene.py new file mode 100644 index 000000000..b94d55046 --- /dev/null +++ b/src/individuality/scene.py @@ -0,0 +1,40 @@ +import json +from typing import Dict +import os + +def load_scenes() -> Dict: + """ + 从JSON文件加载场景数据 + + Returns: + Dict: 包含所有场景的字典 + """ + current_dir = os.path.dirname(os.path.abspath(__file__)) + json_path = os.path.join(current_dir, 'template_scene.json') + + with open(json_path, 'r', encoding='utf-8') as f: + return json.load(f) + +PERSONALITY_SCENES = load_scenes() + +def get_scene_by_factor(factor: str) -> Dict: + """ + 根据人格因子获取对应的情景测试 + + Args: + factor (str): 人格因子名称 + + Returns: + Dict: 包含情景描述的字典 + """ + return PERSONALITY_SCENES.get(factor, None) + + +def get_all_scenes() -> Dict: + """ + 获取所有情景测试 + + Returns: + Dict: 所有情景测试的字典 + """ + return PERSONALITY_SCENES diff --git a/src/individuality/template_scene.json b/src/individuality/template_scene.json new file mode 100644 index 000000000..cd9ae4752 --- /dev/null +++ b/src/individuality/template_scene.json @@ -0,0 +1,112 @@ +{ + "外向性": { + "场景1": { + "scenario": "你刚刚搬到一个新的城市工作。今天是你入职的第一天,在公司的电梯里,一位同事微笑着和你打招呼:\n\n同事:「嗨!你是新来的同事吧?我是市场部的小林。」\n\n同事看起来很友善,还主动介绍说:「待会午饭时间,我们部门有几个人准备一起去楼下新开的餐厅,你要一起来吗?可以认识一下其他同事。」", + "explanation": "这个场景通过职场社交情境,观察个体对于新环境、新社交圈的态度和反应倾向。" + }, + "场景2": { + "scenario": "在大学班级群里,班长发起了一个组织班级联谊活动的投票:\n\n班长:「大家好!下周末我们准备举办一次班级联谊活动,地点在学校附近的KTV。想请大家报名参加,也欢迎大家邀请其他班级的同学!」\n\n已经有几个同学在群里积极响应,有人@你问你要不要一起参加。", + "explanation": "通过班级活动场景,观察个体对群体社交活动的参与意愿。" + }, + "场景3": { + "scenario": "你在社交平台上发布了一条动态,收到了很多陌生网友的评论和私信:\n\n网友A:「你说的这个观点很有意思!想和你多交流一下。」\n\n网友B:「我也对这个话题很感兴趣,要不要建个群一起讨论?」", + "explanation": "通过网络社交场景,观察个体对线上社交的态度。" + }, + "场景4": { + "scenario": "你暗恋的对象今天主动来找你:\n\n对方:「那个...我最近在准备一个演讲比赛,听说你口才很好。能不能请你帮我看看演讲稿,顺便给我一些建议?如果你有时间的话,可以一起吃个饭聊聊。」", + "explanation": "通过恋爱情境,观察个体在面对心仪对象时的社交表现。" + }, + "场景5": { + "scenario": "在一次线下读书会上,主持人突然点名让你分享读后感:\n\n主持人:「听说你对这本书很有见解,能不能和大家分享一下你的想法?」\n\n现场有二十多个陌生的读书爱好者,都期待地看着你。", + "explanation": "通过即兴发言场景,观察个体的社交表现欲和公众表达能力。" + } + }, + "神经质": { + "场景1": { + "scenario": "你正在准备一个重要的项目演示,这关系到你的晋升机会。就在演示前30分钟,你收到了主管发来的消息:\n\n主管:「临时有个变动,CEO也会来听你的演示。他对这个项目特别感兴趣。」\n\n正当你准备回复时,主管又发来一条:「对了,能不能把演示时间压缩到15分钟?CEO下午还有其他安排。你之前准备的是30分钟的版本对吧?」", + "explanation": "这个场景通过突发的压力情境,观察个体在面对计划外变化时的情绪反应和调节能力。" + }, + "场景2": { + "scenario": "期末考试前一天晚上,你收到了好朋友发来的消息:\n\n好朋友:「不好意思这么晚打扰你...我看你平时成绩很好,能不能帮我解答几个问题?我真的很担心明天的考试。」\n\n你看了看时间,已经是晚上11点,而你原本计划的复习还没完成。", + "explanation": "通过考试压力场景,观察个体在时间紧张时的情绪管理。" + }, + "场景3": { + "scenario": "你在社交媒体上发表的一个观点引发了争议,有不少人开始批评你:\n\n网友A:「这种观点也好意思说出来,真是无知。」\n\n网友B:「建议楼主先去补补课再来发言。」\n\n评论区里的负面评论越来越多,还有人开始人身攻击。", + "explanation": "通过网络争议场景,观察个体面对批评时的心理承受能力。" + }, + "场景4": { + "scenario": "你和恋人约好今天一起看电影,但在约定时间前半小时,对方发来消息:\n\n恋人:「对不起,我临时有点事,可能要迟到一会儿。」\n\n二十分钟后,对方又发来消息:「可能要再等等,抱歉!」\n\n电影快要开始了,但对方还是没有出现。", + "explanation": "通过恋爱情境,观察个体对不确定性的忍耐程度。" + }, + "场景5": { + "scenario": "在一次重要的小组展示中,你的组员在演示途中突然卡壳了:\n\n组员小声对你说:「我忘词了,接下来的部分是什么来着...」\n\n台下的老师和同学都在等待,气氛有些尴尬。", + "explanation": "通过公开场合的突发状况,观察个体的应急反应和压力处理能力。" + } + }, + "严谨性": { + "场景1": { + "scenario": "你是团队的项目负责人,刚刚接手了一个为期两个月的重要项目。在第一次团队会议上:\n\n小王:「老大,我觉得两个月时间很充裕,我们先做着看吧,遇到问题再解决。」\n\n小张:「要不要先列个时间表?不过感觉太详细的计划也没必要,点到为止就行。」\n\n小李:「客户那边说如果能提前完成有奖励,我觉得我们可以先做快一点的部分。」", + "explanation": "这个场景通过项目管理情境,体现个体在工作方法、计划性和责任心方面的特征。" + }, + "场景2": { + "scenario": "期末小组作业,组长让大家分工完成一份研究报告。在截止日期前三天:\n\n组员A:「我的部分大概写完了,感觉还行。」\n\n组员B:「我这边可能还要一天才能完成,最近太忙了。」\n\n组员C发来一份没有任何引用出处、可能存在抄袭的内容:「我写完了,你们看看怎么样?」", + "explanation": "通过学习场景,观察个体对学术规范和质量要求的重视程度。" + }, + "场景3": { + "scenario": "你在一个兴趣小组的群聊中,大家正在讨论举办一次线下活动:\n\n成员A:「到时候见面就知道具体怎么玩了!」\n\n成员B:「对啊,随意一点挺好的。」\n\n成员C:「人来了自然就热闹了。」", + "explanation": "通过活动组织场景,观察个体对活动计划的态度。" + }, + "场景4": { + "scenario": "你和恋人计划一起去旅游,对方说:\n\n恋人:「我们就随心而行吧!订个目的地,其他的到了再说,这样更有意思。」\n\n距离出发还有一周时间,但机票、住宿和具体行程都还没有确定。", + "explanation": "通过旅行规划场景,观察个体的计划性和对不确定性的接受程度。" + }, + "场景5": { + "scenario": "在一个重要的团队项目中,你发现一个同事的工作存在明显错误:\n\n同事:「差不多就行了,反正领导也看不出来。」\n\n这个错误可能不会立即造成问题,但长期来看可能会影响项目质量。", + "explanation": "通过工作质量场景,观察个体对细节和标准的坚持程度。" + } + }, + "开放性": { + "场景1": { + "scenario": "周末下午,你的好友小美兴致勃勃地给你打电话:\n\n小美:「我刚发现一个特别有意思的沉浸式艺术展!不是传统那种挂画的展览,而是把整个空间都变成了艺术品。观众要穿特制的服装,还要带上VR眼镜,好像还有AI实时互动!」\n\n小美继续说:「虽然票价不便宜,但听说体验很独特。网上评价两极分化,有人说是前所未有的艺术革新,也有人说是哗众取宠。要不要周末一起去体验一下?」", + "explanation": "这个场景通过新型艺术体验,反映个体对创新事物的接受程度和尝试意愿。" + }, + "场景2": { + "scenario": "在一节创意写作课上,老师提出了一个特别的作业:\n\n老师:「下周的作业是用AI写作工具协助创作一篇小说。你们可以自由探索如何与AI合作,打破传统写作方式。」\n\n班上随即展开了激烈讨论,有人认为这是对创作的亵渎,也有人对这种新形式感到兴奋。", + "explanation": "通过新技术应用场景,观察个体对创新学习方式的态度。" + }, + "场景3": { + "scenario": "在社交媒体上,你看到一个朋友分享了一种新的生活方式:\n\n「最近我在尝试'数字游牧'生活,就是一边远程工作一边环游世界。没有固定住所,住青旅或短租,认识来自世界各地的朋友。虽然有时会很不稳定,但这种自由的生活方式真的很棒!」\n\n评论区里争论不断,有人向往这种生活,也有人觉得太冒险。", + "explanation": "通过另类生活方式,观察个体对非传统选择的态度。" + }, + "场景4": { + "scenario": "你的恋人突然提出了一个想法:\n\n恋人:「我们要不要尝试一下开放式关系?就是在保持彼此关系的同时,也允许和其他人发展感情。现在国外很多年轻人都这样。」\n\n这个提议让你感到意外,你之前从未考虑过这种可能性。", + "explanation": "通过感情观念场景,观察个体对非传统关系模式的接受度。" + }, + "场景5": { + "scenario": "在一次朋友聚会上,大家正在讨论未来职业规划:\n\n朋友A:「我准备辞职去做自媒体,专门介绍一些小众的文化和艺术。」\n\n朋友B:「我想去学习生物科技,准备转行做人造肉研发。」\n\n朋友C:「我在考虑加入一个区块链创业项目,虽然风险很大。」", + "explanation": "通过职业选择场景,观察个体对新兴领域的探索意愿。" + } + }, + "宜人性": { + "场景1": { + "scenario": "在回家的公交车上,你遇到这样一幕:\n\n一位老奶奶颤颤巍巍地上了车,车上座位已经坐满了。她站在你旁边,看起来很疲惫。这时你听到前排两个年轻人的对话:\n\n年轻人A:「那个老太太好像站不稳,看起来挺累的。」\n\n年轻人B:「现在的老年人真是...我看她包里还有菜,肯定是去菜市场买完菜回来的,这么多人都不知道叫子女开车接送。」\n\n就在这时,老奶奶一个趔趄,差点摔倒。她扶住了扶手,但包里的东西洒了一些出来。", + "explanation": "这个场景通过公共场合的助人情境,体现个体的同理心和对他人需求的关注程度。" + }, + "场景2": { + "scenario": "在班级群里,有同学发起为生病住院的同学捐款:\n\n同学A:「大家好,小林最近得了重病住院,医药费很贵,家里负担很重。我们要不要一起帮帮他?」\n\n同学B:「我觉得这是他家里的事,我们不方便参与吧。」\n\n同学C:「但是都是同学一场,帮帮忙也是应该的。」", + "explanation": "通过同学互助场景,观察个体的助人意愿和同理心。" + }, + "场景3": { + "scenario": "在一个网络讨论组里,有人发布了求助信息:\n\n求助者:「最近心情很低落,感觉生活很压抑,不知道该怎么办...」\n\n评论区里已经有一些回复:\n「生活本来就是这样,想开点!」\n「你这样子太消极了,要积极面对。」\n「谁还没点烦心事啊,过段时间就好了。」", + "explanation": "通过网络互助场景,观察个体的共情能力和安慰方式。" + }, + "场景4": { + "scenario": "你的恋人向你倾诉工作压力:\n\n恋人:「最近工作真的好累,感觉快坚持不下去了...」\n\n但今天你也遇到了很多烦心事,心情也不太好。", + "explanation": "通过感情关系场景,观察个体在自身状态不佳时的关怀能力。" + }, + "场景5": { + "scenario": "在一次团队项目中,新来的同事小王因为经验不足,造成了一个严重的错误。在部门会议上:\n\n主管:「这个错误造成了很大的损失,是谁负责的这部分?」\n\n小王看起来很紧张,欲言又止。你知道是他造成的错误,同时你也是这个项目的共同负责人。", + "explanation": "通过职场情境,观察个体在面对他人过错时的态度和处理方式。" + } + } +} \ No newline at end of file diff --git a/src/main.py b/src/main.py index 14dc04355..3e657204f 100644 --- a/src/main.py +++ b/src/main.py @@ -15,6 +15,7 @@ from .plugins.config.config import global_config from .plugins.chat.bot import chat_bot from .common.logger import get_module_logger from .plugins.remote import heartbeat_thread # noqa: F401 +from .individuality.individuality import Individuality logger = get_module_logger("main") @@ -26,6 +27,7 @@ class MainSystem: self.mood_manager = MoodManager.get_instance() self.hippocampus_manager = HippocampusManager.get_instance() self._message_manager_started = False + self.individuality = Individuality.get_instance() # 使用消息API替代直接的FastAPI实例 from .plugins.message import global_api @@ -79,7 +81,7 @@ class MainSystem: # 初始化日程 bot_schedule.initialize( name=global_config.BOT_NICKNAME, - personality=global_config.PROMPT_PERSONALITY, + personality=global_config.personality_core, behavior=global_config.PROMPT_SCHEDULE_GEN, interval=global_config.SCHEDULE_DOING_UPDATE_INTERVAL, ) @@ -88,6 +90,20 @@ class MainSystem: # 启动FastAPI服务器 self.app.register_message_handler(chat_bot.message_process) + # 初始化个体特征 + self.individuality.initialize( + bot_nickname=global_config.BOT_NICKNAME, + personality_core=global_config.personality_core, + personality_sides=global_config.personality_sides, + identity_detail=global_config.identity_detail, + height=global_config.height, + weight=global_config.weight, + age=global_config.age, + gender=global_config.gender, + appearance=global_config.appearance + ) + logger.success("个体特征初始化成功") + try: # 启动心流系统 asyncio.create_task(heartflow.heartflow_start_working()) diff --git a/src/personality/personality.py b/src/personality/personality.py deleted file mode 100644 index 3977743a5..000000000 --- a/src/personality/personality.py +++ /dev/null @@ -1,32 +0,0 @@ -from dataclasses import dataclass -from typing import Dict, List - -@dataclass -class Personality: - """人格特质类""" - openness: float # 开放性 - conscientiousness: float # 尽责性 - extraversion: float # 外向性 - agreeableness: float # 宜人性 - neuroticism: float # 神经质 - bot_nickname: str # 机器人昵称 - personality_core: str # 人格核心特点 - personality_detail: List[str] # 人格细节描述 - - def to_dict(self) -> Dict: - """将人格特质转换为字典格式""" - return { - "openness": self.openness, - "conscientiousness": self.conscientiousness, - "extraversion": self.extraversion, - "agreeableness": self.agreeableness, - "neuroticism": self.neuroticism, - "bot_nickname": self.bot_nickname, - "personality_core": self.personality_core, - "personality_detail": self.personality_detail - } - - @classmethod - def from_dict(cls, data: Dict) -> 'Personality': - """从字典创建人格特质实例""" - return cls(**data) \ No newline at end of file diff --git a/src/personality/personality_gen.py b/src/personality/personality_gen.py deleted file mode 100644 index 8eaf99db0..000000000 --- a/src/personality/personality_gen.py +++ /dev/null @@ -1,186 +0,0 @@ -import os -import json -import sys -from typing import Optional, List - -sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) -from src.personality.offline_llm import LLM_request_off -from src.common.logger import get_module_logger -from src.personality.personality import Personality - -logger = get_module_logger("personality_gen") - -class PersonalityGenerator: - """人格生成器类""" - def __init__(self, bot_nickname: str): - self.bot_nickname = bot_nickname - self.llm = LLM_request_off() - self.personality: Optional[Personality] = None - self.save_path = os.path.join("data", "personality") - - # 确保保存目录存在 - os.makedirs(self.save_path, exist_ok=True) - - def personality_exists(self) -> bool: - """检查是否已存在该机器人的人格文件""" - file_path = os.path.join(self.save_path, f"{self.bot_nickname}_personality.per") - return os.path.exists(file_path) - - async def generate_personality( - self, - personality_core: str, - personality_detail: List[str], - height: int, - weight: int, - age: int, - gender: str, - appearance: str, - interests: List[str], - others: List[str] - ) -> Optional[Personality]: - """根据配置生成人格特质""" - # 检查是否已存在 - if self.personality_exists(): - logger.info(f"机器人 {self.bot_nickname} 的人格文件已存在,跳过生成") - return await self.load_personality() - - # 构建提示文本 - prompt = f"""你是一个心理学家,专职心理测量和大五人格研究。请根据以下信息分析并给出这个人的大五人格特质评分。 -每个特质的分数范围是0到1之间的小数,请确保返回标准的JSON格式。 - -机器人信息: -- 昵称:{self.bot_nickname} -- 性格核心的特质:{personality_core} -- 性格细节:{', '.join(personality_detail)} -- 身高:{height}cm -- 体重:{weight}kg -- 年龄:{age}岁 -- 性别:{gender} -- 外貌:{appearance} -- 兴趣爱好:{', '.join(interests)} -- 其他信息:{', '.join(others)} -请只返回如下JSON格式数据(不要包含任何其他文字): -{{ - "openness": 0.x, - "conscientiousness": 0.x, - "extraversion": 0.x, - "agreeableness": 0.x, - "neuroticism": 0.x -}}""" - - response, _ = await self.llm.generate_response_async(prompt) - try: - # 尝试清理响应文本,只保留JSON部分 - json_str = response.strip() - if "```json" in json_str: - json_str = json_str.split("```json")[1].split("```")[0].strip() - elif "```" in json_str: - json_str = json_str.split("```")[1].strip() - - traits = json.loads(json_str) - - # 验证所有必需的字段是否存在 - required_fields = ["openness", "conscientiousness", "extraversion", "agreeableness", "neuroticism"] - if not all(field in traits for field in required_fields): - raise ValueError("缺少必需的人格特质字段") - - # 验证数值是否在合理范围内 - for field in required_fields: - if not 0 <= traits[field] <= 1: - traits[field] = max(0, min(traits[field], 1)) - - self.personality = Personality( - **traits, - bot_nickname=self.bot_nickname - ) - await self.save_personality() - return self.personality - - except json.JSONDecodeError as e: - logger.error(f"JSON解析失败: {e}\n响应内容: {response}") - raise - except Exception as e: - logger.error(f"生成人格特质失败: {e}") - raise - - async def save_personality(self) -> None: - """保存人格特质到文件""" - if not self.personality: - raise ValueError("没有可保存的人格特质") - - file_path = os.path.join(self.save_path, f"{self.bot_nickname}_personality.per") - try: - with open(file_path, 'w', encoding='utf-8') as f: - json.dump(self.personality.to_dict(), f, ensure_ascii=False, indent=4) - logger.info(f"人格特质已保存到: {file_path}") - except Exception as e: - logger.error(f"保存人格特质失败: {e}") - raise - - async def load_personality(self) -> Optional[Personality]: - """从文件加载人格特质""" - file_path = os.path.join(self.save_path, f"{self.bot_nickname}_personality.per") - try: - if os.path.exists(file_path): - with open(file_path, 'r', encoding='utf-8') as f: - data = json.load(f) - self.personality = Personality.from_dict(data) - return self.personality - except Exception as e: - logger.error(f"加载人格特质失败: {e}") - return None - -async def main(): - """主函数,用于测试人格生成""" - # 创建人格生成器实例 - generator = PersonalityGenerator("麦麦") - - # 生成或加载人格 - personality = await generator.generate_personality( - personality_core="对世界抱着善意和好奇,愿意尝试新奇事物", - personality_detail=[ - "你会刷小红书", - "你会刷贴吧", - "学习心理学和脑科学", - "你会刷b站,对ACG文化感兴趣", - "有时候有些搞怪", - ], - height=160, - weight=45, - age=20, - gender="女", - appearance="有着橙色短发", - interests=["摄影", "绘画"], - others=["是一个大二女大学生"] - ) - - if personality: - logger.info("人格特质生成成功:") - logger.info(f"开放性: {personality.openness}") - logger.info(f"尽责性: {personality.conscientiousness}") - logger.info(f"外向性: {personality.extraversion}") - logger.info(f"宜人性: {personality.agreeableness}") - logger.info(f"神经质: {personality.neuroticism}") - else: - logger.error("人格特质生成失败") - -if __name__ == "__main__": - import asyncio - import platform - - if platform.system() == 'Windows': - # Windows平台特殊处理 - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) - - try: - loop = asyncio.get_event_loop() - loop.run_until_complete(main()) - finally: - # 确保所有待处理的任务都完成 - pending = asyncio.all_tasks(loop) - for task in pending: - task.cancel() - - # 运行一次以处理取消的任务 - loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True)) - loop.close() diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py b/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py index 4c0f035ea..176f59b43 100644 --- a/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py +++ b/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py @@ -1,14 +1,13 @@ import random import time from typing import Optional, Union -import re -import jieba import numpy as np from ....common.database import db from ...chat.utils import get_embedding, get_recent_group_detailed_plain_text, get_recent_group_speaker from ...chat.chat_stream import chat_manager from ...moods.moods import MoodManager +from ....individuality.individuality import Individuality from ...memory_system.Hippocampus import HippocampusManager from ...schedule.schedule_generator import bot_schedule from ...config.config import global_config @@ -28,7 +27,23 @@ class PromptBuilder: ) -> tuple[str, str]: # 开始构建prompt - + prompt_personality = "你" + #person + individuality = Individuality.get_instance() + + personality_core = individuality.personality.personality_core + prompt_personality += personality_core + + personality_sides = individuality.personality.personality_sides + random.shuffle(personality_sides) + prompt_personality += f",{personality_sides[0]}" + + identity_detail = individuality.identity.identity_detail + random.shuffle(identity_detail) + prompt_personality += f",{identity_detail[0]}" + + + # 关系 who_chat_in_group = [(chat_stream.user_info.platform, chat_stream.user_info.user_id, @@ -105,20 +120,6 @@ class PromptBuilder: ) keywords_reaction_prompt += rule.get("reaction", "") + "," - # 人格选择 - personality = global_config.PROMPT_PERSONALITY - probability_1 = global_config.PERSONALITY_1 - probability_2 = global_config.PERSONALITY_2 - - personality_choice = random.random() - - if personality_choice < probability_1: # 第一种风格 - prompt_personality = personality[0] - elif personality_choice < probability_1 + probability_2: # 第二种风格 - prompt_personality = personality[1] - else: # 第三种人格 - prompt_personality = personality[2] - # 中文高手(新加的好玩功能) prompt_ger = "" if random.random() < 0.04: diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py index d79878258..b5b01bb7b 100644 --- a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py +++ b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py @@ -10,7 +10,7 @@ from ...chat.utils import get_recent_group_detailed_plain_text, get_recent_group from ...chat.chat_stream import chat_manager from src.common.logger import get_module_logger from ...person_info.relationship_manager import relationship_manager - +from ....individuality.individuality import Individuality from src.heart_flow.heartflow import heartflow logger = get_module_logger("prompt") @@ -28,6 +28,20 @@ class PromptBuilder: current_mind_info = heartflow.get_subheartflow(stream_id).current_mind # 开始构建prompt + prompt_personality = "你" + #person + individuality = Individuality.get_instance() + + personality_core = individuality.personality.personality_core + prompt_personality += personality_core + + personality_sides = individuality.personality.personality_sides + random.shuffle(personality_sides) + prompt_personality += f",{personality_sides[0]}" + + identity_detail = individuality.identity.identity_detail + random.shuffle(identity_detail) + prompt_personality += f",{identity_detail[0]}" # 关系 who_chat_in_group = [(chat_stream.user_info.platform, @@ -90,20 +104,6 @@ class PromptBuilder: ) keywords_reaction_prompt += rule.get("reaction", "") + "," - # 人格选择 - personality = global_config.PROMPT_PERSONALITY - probability_1 = global_config.PERSONALITY_1 - probability_2 = global_config.PERSONALITY_2 - - personality_choice = random.random() - - if personality_choice < probability_1: # 第一种风格 - prompt_personality = personality[0] - elif personality_choice < probability_1 + probability_2: # 第二种风格 - prompt_personality = personality[1] - else: # 第三种人格 - prompt_personality = personality[2] - # 中文高手(新加的好玩功能) prompt_ger = "" if random.random() < 0.04: @@ -133,73 +133,5 @@ class PromptBuilder: return prompt - def _build_initiative_prompt_select(self, group_id, probability_1=0.8, probability_2=0.1): - current_date = time.strftime("%Y-%m-%d", time.localtime()) - current_time = time.strftime("%H:%M:%S", time.localtime()) - bot_schedule_now_time, bot_schedule_now_activity = bot_schedule.get_current_task() - prompt_date = f"""今天是{current_date},现在是{current_time},你今天的日程是: -{bot_schedule.today_schedule} -你现在正在{bot_schedule_now_activity} -""" - - chat_talking_prompt = "" - if group_id: - chat_talking_prompt = get_recent_group_detailed_plain_text( - group_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True - ) - - chat_talking_prompt = f"以下是群里正在聊天的内容:\n{chat_talking_prompt}" - # print(f"\033[1;34m[调试]\033[0m 已从数据库获取群 {group_id} 的消息记录:{chat_talking_prompt}") - - # 获取主动发言的话题 - all_nodes = HippocampusManager.get_instance().memory_graph.dots - all_nodes = filter(lambda dot: len(dot[1]["memory_items"]) > 3, all_nodes) - nodes_for_select = random.sample(all_nodes, 5) - topics = [info[0] for info in nodes_for_select] - - # 激活prompt构建 - activate_prompt = "" - activate_prompt = "以上是群里正在进行的聊天。" - personality = global_config.PROMPT_PERSONALITY - prompt_personality = "" - personality_choice = random.random() - if personality_choice < probability_1: # 第一种人格 - prompt_personality = f"""{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[0]}""" - elif personality_choice < probability_1 + probability_2: # 第二种人格 - prompt_personality = f"""{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[1]}""" - else: # 第三种人格 - prompt_personality = f"""{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[2]}""" - - topics_str = ",".join(f'"{topics}"') - prompt_for_select = ( - f"你现在想在群里发言,回忆了一下,想到几个话题,分别是{topics_str},综合当前状态以及群内气氛," - f"请你在其中选择一个合适的话题,注意只需要输出话题,除了话题什么也不要输出(双引号也不要输出)" - ) - - prompt_initiative_select = f"{prompt_date}\n{prompt_personality}\n{prompt_for_select}" - prompt_regular = f"{prompt_date}\n{prompt_personality}" - - return prompt_initiative_select, nodes_for_select, prompt_regular - - def _build_initiative_prompt_check(self, selected_node, prompt_regular): - memory = random.sample(selected_node["memory_items"], 3) - memory = "\n".join(memory) - prompt_for_check = ( - f"{prompt_regular}你现在想在群里发言,回忆了一下,想到一个话题,是{selected_node['concept']}," - f"关于这个话题的记忆有\n{memory}\n,以这个作为主题发言合适吗?请在把握群里的聊天内容的基础上," - f"综合群内的氛围,如果认为应该发言请输出yes,否则输出no,请注意是决定是否需要发言,而不是编写回复内容," - f"除了yes和no不要输出任何回复内容。" - ) - return prompt_for_check, memory - - def _build_initiative_prompt(self, selected_node, prompt_regular, memory): - prompt_for_initiative = ( - f"{prompt_regular}你现在想在群里发言,回忆了一下,想到一个话题,是{selected_node['concept']}," - f"关于这个话题的记忆有\n{memory}\n,请在把握群里的聊天内容的基础上,综合群内的氛围," - f"以日常且口语化的口吻,简短且随意一点进行发言,不要说的太有条理,可以有个性。" - f"记住不要输出多余内容(包括前后缀,冒号和引号,括号,表情,@等)" - ) - return prompt_for_initiative - prompt_builder = PromptBuilder() diff --git a/src/plugins/config/config.py b/src/plugins/config/config.py index 83314c082..125439374 100644 --- a/src/plugins/config/config.py +++ b/src/plugins/config/config.py @@ -149,20 +149,11 @@ class BotConfig: # personality personality_core = "用一句话或几句话描述人格的核心特点" # 建议20字以内,谁再写3000字小作文敲谁脑袋 - personality_detail: List[str] = field(default_factory=lambda: [ - "用一句话或几句话描述人格的一些细节", - "用一句话或几句话描述人格的一些细节", - "用一句话或几句话描述人格的一些细节", - "用一句话或几句话描述人格的一些细节", - "用一句话或几句话描述人格的一些细节" + personality_sides: List[str] = field(default_factory=lambda: [ + "用一句话或几句话描述人格的一些侧面", + "用一句话或几句话描述人格的一些侧面", + "用一句话或几句话描述人格的一些侧面" ]) - - traits: List[str] = field(default_factory=lambda: [ - "用一个词描述性格", - "用一个词描述性格", - "用一个词描述性格", - ]) - # identity identity_detail: List[str] = field(default_factory=lambda: [ "身份特点", @@ -173,11 +164,6 @@ class BotConfig: age: int = 20 # 年龄 单位岁 gender: str = "男" # 性别 appearance: str = "用几句话描述外貌特征" # 外貌特征 - interests: List[str] = field(default_factory=lambda: [ - "兴趣爱好1", - "兴趣爱好2", - "兴趣爱好3" - ]) # schedule ENABLE_SCHEDULE_GEN: bool = False # 是否启用日程生成 @@ -371,7 +357,7 @@ class BotConfig: personality_config = parent["personality"] if config.INNER_VERSION in SpecifierSet(">=1.2.4"): config.personality_core = personality_config.get("personality_core", config.personality_core) - config.personality_detail = personality_config.get("personality_detail", config.personality_detail) + config.personality_sides = personality_config.get("personality_sides", config.personality_sides) def identity(parent: dict): identity_config = parent["identity"] @@ -382,7 +368,6 @@ class BotConfig: config.age = identity_config.get("age", config.age) config.gender = identity_config.get("gender", config.gender) config.appearance = identity_config.get("appearance", config.appearance) - config.interests = identity_config.get("interests", config.interests) def schedule(parent: dict): schedule_config = parent["schedule"] diff --git a/src/plugins/personality_s/scene.py b/src/plugins/personality_s/scene.py deleted file mode 100644 index 0ce094a36..000000000 --- a/src/plugins/personality_s/scene.py +++ /dev/null @@ -1,261 +0,0 @@ -from typing import Dict - -PERSONALITY_SCENES = { - "外向性": { - "场景1": { - "scenario": """你刚刚搬到一个新的城市工作。今天是你入职的第一天,在公司的电梯里,一位同事微笑着和你打招呼: - -同事:「嗨!你是新来的同事吧?我是市场部的小林。」 - -同事看起来很友善,还主动介绍说:「待会午饭时间,我们部门有几个人准备一起去楼下新开的餐厅,你要一起来吗?可以认识一下其他同事。」""", - "explanation": "这个场景通过职场社交情境,观察个体对于新环境、新社交圈的态度和反应倾向。", - }, - "场景2": { - "scenario": """在大学班级群里,班长发起了一个组织班级联谊活动的投票: - -班长:「大家好!下周末我们准备举办一次班级联谊活动,地点在学校附近的KTV。想请大家报名参加,也欢迎大家邀请其他班级的同学!」 - -已经有几个同学在群里积极响应,有人@你问你要不要一起参加。""", - "explanation": "通过班级活动场景,观察个体对群体社交活动的参与意愿。", - }, - "场景3": { - "scenario": """你在社交平台上发布了一条动态,收到了很多陌生网友的评论和私信: - -网友A:「你说的这个观点很有意思!想和你多交流一下。」 - -网友B:「我也对这个话题很感兴趣,要不要建个群一起讨论?」""", - "explanation": "通过网络社交场景,观察个体对线上社交的态度。", - }, - "场景4": { - "scenario": """你暗恋的对象今天主动来找你: - -对方:「那个...我最近在准备一个演讲比赛,听说你口才很好。能不能请你帮我看看演讲稿,顺便给我一些建议?""" - """如果你有时间的话,可以一起吃个饭聊聊。」""", - "explanation": "通过恋爱情境,观察个体在面对心仪对象时的社交表现。", - }, - "场景5": { - "scenario": """在一次线下读书会上,主持人突然点名让你分享读后感: - -主持人:「听说你对这本书很有见解,能不能和大家分享一下你的想法?」 - -现场有二十多个陌生的读书爱好者,都期待地看着你。""", - "explanation": "通过即兴发言场景,观察个体的社交表现欲和公众表达能力。", - }, - }, - "神经质": { - "场景1": { - "scenario": """你正在准备一个重要的项目演示,这关系到你的晋升机会。""" - """就在演示前30分钟,你收到了主管发来的消息: - -主管:「临时有个变动,CEO也会来听你的演示。他对这个项目特别感兴趣。」 - -正当你准备回复时,主管又发来一条:「对了,能不能把演示时间压缩到15分钟?CEO下午还有其他安排。你之前准备的是30分钟的版本对吧?」""", - "explanation": "这个场景通过突发的压力情境,观察个体在面对计划外变化时的情绪反应和调节能力。", - }, - "场景2": { - "scenario": """期末考试前一天晚上,你收到了好朋友发来的消息: - -好朋友:「不好意思这么晚打扰你...我看你平时成绩很好,能不能帮我解答几个问题?我真的很担心明天的考试。」 - -你看了看时间,已经是晚上11点,而你原本计划的复习还没完成。""", - "explanation": "通过考试压力场景,观察个体在时间紧张时的情绪管理。", - }, - "场景3": { - "scenario": """你在社交媒体上发表的一个观点引发了争议,有不少人开始批评你: - -网友A:「这种观点也好意思说出来,真是无知。」 - -网友B:「建议楼主先去补补课再来发言。」 - -评论区里的负面评论越来越多,还有人开始人身攻击。""", - "explanation": "通过网络争议场景,观察个体面对批评时的心理承受能力。", - }, - "场景4": { - "scenario": """你和恋人约好今天一起看电影,但在约定时间前半小时,对方发来消息: - -恋人:「对不起,我临时有点事,可能要迟到一会儿。」 - -二十分钟后,对方又发来消息:「可能要再等等,抱歉!」 - -电影快要开始了,但对方还是没有出现。""", - "explanation": "通过恋爱情境,观察个体对不确定性的忍耐程度。", - }, - "场景5": { - "scenario": """在一次重要的小组展示中,你的组员在演示途中突然卡壳了: - -组员小声对你说:「我忘词了,接下来的部分是什么来着...」 - -台下的老师和同学都在等待,气氛有些尴尬。""", - "explanation": "通过公开场合的突发状况,观察个体的应急反应和压力处理能力。", - }, - }, - "严谨性": { - "场景1": { - "scenario": """你是团队的项目负责人,刚刚接手了一个为期两个月的重要项目。在第一次团队会议上: - -小王:「老大,我觉得两个月时间很充裕,我们先做着看吧,遇到问题再解决。」 - -小张:「要不要先列个时间表?不过感觉太详细的计划也没必要,点到为止就行。」 - -小李:「客户那边说如果能提前完成有奖励,我觉得我们可以先做快一点的部分。」""", - "explanation": "这个场景通过项目管理情境,体现个体在工作方法、计划性和责任心方面的特征。", - }, - "场景2": { - "scenario": """期末小组作业,组长让大家分工完成一份研究报告。在截止日期前三天: - -组员A:「我的部分大概写完了,感觉还行。」 - -组员B:「我这边可能还要一天才能完成,最近太忙了。」 - -组员C发来一份没有任何引用出处、可能存在抄袭的内容:「我写完了,你们看看怎么样?」""", - "explanation": "通过学习场景,观察个体对学术规范和质量要求的重视程度。", - }, - "场景3": { - "scenario": """你在一个兴趣小组的群聊中,大家正在讨论举办一次线下活动: - -成员A:「到时候见面就知道具体怎么玩了!」 - -成员B:「对啊,随意一点挺好的。」 - -成员C:「人来了自然就热闹了。」""", - "explanation": "通过活动组织场景,观察个体对活动计划的态度。", - }, - "场景4": { - "scenario": """你和恋人计划一起去旅游,对方说: - -恋人:「我们就随心而行吧!订个目的地,其他的到了再说,这样更有意思。」 - -距离出发还有一周时间,但机票、住宿和具体行程都还没有确定。""", - "explanation": "通过旅行规划场景,观察个体的计划性和对不确定性的接受程度。", - }, - "场景5": { - "scenario": """在一个重要的团队项目中,你发现一个同事的工作存在明显错误: - -同事:「差不多就行了,反正领导也看不出来。」 - -这个错误可能不会立即造成问题,但长期来看可能会影响项目质量。""", - "explanation": "通过工作质量场景,观察个体对细节和标准的坚持程度。", - }, - }, - "开放性": { - "场景1": { - "scenario": """周末下午,你的好友小美兴致勃勃地给你打电话: - -小美:「我刚发现一个特别有意思的沉浸式艺术展!不是传统那种挂画的展览,而是把整个空间都变成了艺术品。""" - """观众要穿特制的服装,还要带上VR眼镜,好像还有AI实时互动!」 - -小美继续说:「虽然票价不便宜,但听说体验很独特。网上评价两极分化,有人说是前所未有的艺术革新,也有人说是哗众取宠。""" - """要不要周末一起去体验一下?」""", - "explanation": "这个场景通过新型艺术体验,反映个体对创新事物的接受程度和尝试意愿。", - }, - "场景2": { - "scenario": """在一节创意写作课上,老师提出了一个特别的作业: - -老师:「下周的作业是用AI写作工具协助创作一篇小说。你们可以自由探索如何与AI合作,打破传统写作方式。」 - -班上随即展开了激烈讨论,有人认为这是对创作的亵渎,也有人对这种新形式感到兴奋。""", - "explanation": "通过新技术应用场景,观察个体对创新学习方式的态度。", - }, - "场景3": { - "scenario": """在社交媒体上,你看到一个朋友分享了一种新的生活方式: - -「最近我在尝试'数字游牧'生活,就是一边远程工作一边环游世界。""" - """没有固定住所,住青旅或短租,认识来自世界各地的朋友。虽然有时会很不稳定,但这种自由的生活方式真的很棒!」 - -评论区里争论不断,有人向往这种生活,也有人觉得太冒险。""", - "explanation": "通过另类生活方式,观察个体对非传统选择的态度。", - }, - "场景4": { - "scenario": """你的恋人突然提出了一个想法: - -恋人:「我们要不要尝试一下开放式关系?就是在保持彼此关系的同时,也允许和其他人发展感情。现在国外很多年轻人都这样。」 - -这个提议让你感到意外,你之前从未考虑过这种可能性。""", - "explanation": "通过感情观念场景,观察个体对非传统关系模式的接受度。", - }, - "场景5": { - "scenario": """在一次朋友聚会上,大家正在讨论未来职业规划: - -朋友A:「我准备辞职去做自媒体,专门介绍一些小众的文化和艺术。」 - -朋友B:「我想去学习生物科技,准备转行做人造肉研发。」 - -朋友C:「我在考虑加入一个区块链创业项目,虽然风险很大。」""", - "explanation": "通过职业选择场景,观察个体对新兴领域的探索意愿。", - }, - }, - "宜人性": { - "场景1": { - "scenario": """在回家的公交车上,你遇到这样一幕: - -一位老奶奶颤颤巍巍地上了车,车上座位已经坐满了。她站在你旁边,看起来很疲惫。这时你听到前排两个年轻人的对话: - -年轻人A:「那个老太太好像站不稳,看起来挺累的。」 - -年轻人B:「现在的老年人真是...我看她包里还有菜,肯定是去菜市场买完菜回来的,这么多人都不知道叫子女开车接送。」 - -就在这时,老奶奶一个趔趄,差点摔倒。她扶住了扶手,但包里的东西洒了一些出来。""", - "explanation": "这个场景通过公共场合的助人情境,体现个体的同理心和对他人需求的关注程度。", - }, - "场景2": { - "scenario": """在班级群里,有同学发起为生病住院的同学捐款: - -同学A:「大家好,小林最近得了重病住院,医药费很贵,家里负担很重。我们要不要一起帮帮他?」 - -同学B:「我觉得这是他家里的事,我们不方便参与吧。」 - -同学C:「但是都是同学一场,帮帮忙也是应该的。」""", - "explanation": "通过同学互助场景,观察个体的助人意愿和同理心。", - }, - "场景3": { - "scenario": """在一个网络讨论组里,有人发布了求助信息: - -求助者:「最近心情很低落,感觉生活很压抑,不知道该怎么办...」 - -评论区里已经有一些回复: -「生活本来就是这样,想开点!」 -「你这样子太消极了,要积极面对。」 -「谁还没点烦心事啊,过段时间就好了。」""", - "explanation": "通过网络互助场景,观察个体的共情能力和安慰方式。", - }, - "场景4": { - "scenario": """你的恋人向你倾诉工作压力: - -恋人:「最近工作真的好累,感觉快坚持不下去了...」 - -但今天你也遇到了很多烦心事,心情也不太好。""", - "explanation": "通过感情关系场景,观察个体在自身状态不佳时的关怀能力。", - }, - "场景5": { - "scenario": """在一次团队项目中,新来的同事小王因为经验不足,造成了一个严重的错误。在部门会议上: - -主管:「这个错误造成了很大的损失,是谁负责的这部分?」 - -小王看起来很紧张,欲言又止。你知道是他造成的错误,同时你也是这个项目的共同负责人。""", - "explanation": "通过职场情境,观察个体在面对他人过错时的态度和处理方式。", - }, - }, -} - - -def get_scene_by_factor(factor: str) -> Dict: - """ - 根据人格因子获取对应的情景测试 - - Args: - factor (str): 人格因子名称 - - Returns: - Dict: 包含情景描述的字典 - """ - return PERSONALITY_SCENES.get(factor, None) - - -def get_all_scenes() -> Dict: - """ - 获取所有情景测试 - - Returns: - Dict: 所有情景测试的字典 - """ - return PERSONALITY_SCENES diff --git a/src/plugins/schedule/schedule_generator.py b/src/plugins/schedule/schedule_generator.py index edce54b64..e8999099b 100644 --- a/src/plugins/schedule/schedule_generator.py +++ b/src/plugins/schedule/schedule_generator.py @@ -62,9 +62,7 @@ class ScheduleGenerator: self.name = name self.behavior = behavior self.schedule_doing_update_interval = interval - - for pers in personality: - self.personality += pers + "\n" + self.personality = personality async def mai_schedule_start(self): """启动日程系统,每5分钟执行一次move_doing,并在日期变化时重新检查日程""" diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index c40c03dfd..9e107a5cd 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -35,7 +35,7 @@ ban_user_id = [] #禁止回复和读取消息的QQ号 [personality] personality_core = "用一句话或几句话描述人格的核心特点" # 建议20字以内,谁再写3000字小作文敲谁脑袋 -personality_detail = [ +personality_sides = [ "用一句话或几句话描述人格的一些细节", "用一句话或几句话描述人格的一些细节", "用一句话或几句话描述人格的一些细节", diff --git a/从0.6.0升级0.6.1请先看我.txt b/从0.6.0升级0.6.1请先看我.txt new file mode 100644 index 000000000..734061c99 --- /dev/null +++ b/从0.6.0升级0.6.1请先看我.txt @@ -0,0 +1 @@ +该版本变动了人格相关设置,原有的配置内容可能被自动更新,如果你没有备份,可以在\config\old找回 \ No newline at end of file From 915ad8c61a80c23901a720ae8bf1c8d196ece34a Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Apr 2025 14:22:19 +0800 Subject: [PATCH 40/51] =?UTF-8?q?fix=EF=BC=9A=E9=98=B2=E6=AD=A2=E5=88=9D?= =?UTF-8?q?=E6=AC=A1=E6=B3=A8=E5=86=8C=E8=A1=A8=E6=83=85=E5=8C=85=E6=97=B6?= =?UTF-8?q?=E6=BA=A2=E5=87=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 1 + src/plugins/chat/emoji_manager.py | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/.gitignore b/.gitignore index d46fb033f..2bac2dac9 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ mongodb/ NapCat.Framework.Windows.Once/ log/ logs/ +MaiBot-Napcat-Adapter-main /test /src/test nonebot-maibot-adapter/ diff --git a/src/plugins/chat/emoji_manager.py b/src/plugins/chat/emoji_manager.py index 6121124c5..6247bf405 100644 --- a/src/plugins/chat/emoji_manager.py +++ b/src/plugins/chat/emoji_manager.py @@ -249,7 +249,22 @@ class EmojiManager: f for f in os.listdir(emoji_dir) if f.lower().endswith((".jpg", ".jpeg", ".png", ".gif")) ] + # 检查当前表情包数量 + self._update_emoji_count() + if self.emoji_num >= self.emoji_num_max: + logger.warning(f"[警告] 表情包数量已达到上限({self.emoji_num}/{self.emoji_num_max}),跳过注册") + return + + # 计算还可以注册的数量 + remaining_slots = self.emoji_num_max - self.emoji_num + logger.info(f"[注册] 还可以注册 {remaining_slots} 个表情包") + for filename in files_to_process: + # 如果已经达到上限,停止注册 + if self.emoji_num >= self.emoji_num_max: + logger.warning(f"[警告] 表情包数量已达到上限({self.emoji_num}/{self.emoji_num_max}),停止注册") + break + image_path = os.path.join(emoji_dir, filename) # 获取图片的base64编码和哈希值 @@ -340,6 +355,10 @@ class EmojiManager: logger.success(f"[注册] 新表情包: {filename}") logger.info(f"[描述] {description}") + # 更新当前表情包数量 + self.emoji_num += 1 + logger.info(f"[统计] 当前表情包数量: {self.emoji_num}/{self.emoji_num_max}") + # 保存到images数据库 image_doc = { "hash": image_hash, From 88a4db8894cde45230a3da6878ccd52f2ce54d76 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Apr 2025 14:22:34 +0800 Subject: [PATCH 41/51] =?UTF-8?q?fix=EF=BC=9A=E4=BC=98=E5=8C=96=E4=BA=BA?= =?UTF-8?q?=E6=A0=BCprompt=E6=8F=90=E5=8F=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/individuality/identity.py | 22 +++ src/individuality/individuality.py | 30 +++- ...renqingziji_with_mymy.py => per_bf_gen.py} | 6 +- src/individuality/personality.py | 28 +++- src/individuality/questionnaire.py | 142 ++++++++++++++++++ .../think_flow_prompt_builder.py | 21 +-- src/plugins/moods/moods.py | 22 ++- 7 files changed, 245 insertions(+), 26 deletions(-) rename src/individuality/{renqingziji_with_mymy.py => per_bf_gen.py} (96%) create mode 100644 src/individuality/questionnaire.py diff --git a/src/individuality/identity.py b/src/individuality/identity.py index 287f9e5d5..6704562ec 100644 --- a/src/individuality/identity.py +++ b/src/individuality/identity.py @@ -1,5 +1,6 @@ from dataclasses import dataclass from typing import List +import random @dataclass class Identity: @@ -75,6 +76,27 @@ class Identity: instance.appearance = appearance return instance + def get_prompt(self,x_person,level): + """ + 获取身份特征的prompt + """ + if x_person == 2: + prompt_identity = "你" + elif x_person == 1: + prompt_identity = "我" + else: + prompt_identity = "他" + + if level == 1: + identity_detail = self.identity_detail + random.shuffle(identity_detail) + prompt_identity += identity_detail[0] + elif level == 2: + for detail in identity_detail: + prompt_identity += f",{detail}" + prompt_identity += "。" + return prompt_identity + def to_dict(self) -> dict: """将身份特征转换为字典格式""" return { diff --git a/src/individuality/individuality.py b/src/individuality/individuality.py index 899de62e7..b491ed308 100644 --- a/src/individuality/individuality.py +++ b/src/individuality/individuality.py @@ -74,4 +74,32 @@ class Individuality: instance.personality = Personality.from_dict(data["personality"]) if data.get("identity"): instance.identity = Identity.from_dict(data["identity"]) - return instance \ No newline at end of file + return instance + + def get_prompt(self,type,x_person,level): + """ + 获取个体特征的prompt + """ + if type == "personality": + return self.personality.get_prompt(x_person,level) + elif type == "identity": + return self.identity.get_prompt(x_person,level) + else: + return "" + + def get_traits(self,factor): + """ + 获取个体特征的特质 + """ + if factor == "openness": + return self.personality.openness + elif factor == "conscientiousness": + return self.personality.conscientiousness + elif factor == "extraversion": + return self.personality.extraversion + elif factor == "agreeableness": + return self.personality.agreeableness + elif factor == "neuroticism": + return self.personality.neuroticism + + diff --git a/src/individuality/renqingziji_with_mymy.py b/src/individuality/per_bf_gen.py similarity index 96% rename from src/individuality/renqingziji_with_mymy.py rename to src/individuality/per_bf_gen.py index 04cbec099..596229280 100644 --- a/src/individuality/renqingziji_with_mymy.py +++ b/src/individuality/per_bf_gen.py @@ -25,9 +25,9 @@ env_path = project_root / ".env" root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")) sys.path.append(root_path) -from src.plugins.personality.scene import get_scene_by_factor, PERSONALITY_SCENES # noqa: E402 -from src.plugins.personality.questionnaire import FACTOR_DESCRIPTIONS # noqa: E402 -from src.plugins.personality.offline_llm import LLMModel # noqa: E402 +from src.individuality.scene import get_scene_by_factor, PERSONALITY_SCENES # noqa: E402 +from src.individuality.questionnaire import FACTOR_DESCRIPTIONS # noqa: E402 +from src.individuality.offline_llm import LLMModel # noqa: E402 # 加载环境变量 if env_path.exists(): diff --git a/src/individuality/personality.py b/src/individuality/personality.py index 19aa3c212..36dacd2c4 100644 --- a/src/individuality/personality.py +++ b/src/individuality/personality.py @@ -3,6 +3,7 @@ from typing import Dict, List import os import json from pathlib import Path +import random @dataclass class Personality: @@ -116,4 +117,29 @@ class Personality: instance = cls.get_instance() for key, value in data.items(): setattr(instance, key, value) - return instance \ No newline at end of file + return instance + + def get_prompt(self,x_person,level): + # 开始构建prompt + if x_person == 2: + prompt_personality = "你" + elif x_person == 1: + prompt_personality = "我" + else: + prompt_personality = "他" + #person + + prompt_personality += self.personality_core + + if level == 2: + personality_sides = self.personality_sides + random.shuffle(personality_sides) + prompt_personality += f",{personality_sides[0]}" + elif level == 3: + personality_sides = self.personality_sides + for side in personality_sides: + prompt_personality += f",{side}" + + prompt_personality += "。" + + return prompt_personality diff --git a/src/individuality/questionnaire.py b/src/individuality/questionnaire.py new file mode 100644 index 000000000..8e965061d --- /dev/null +++ b/src/individuality/questionnaire.py @@ -0,0 +1,142 @@ +# 人格测试问卷题目 +# 王孟成, 戴晓阳, & 姚树桥. (2011). +# 中国大五人格问卷的初步编制Ⅲ:简式版的制定及信效度检验. 中国临床心理学杂志, 19(04), Article 04. + +# 王孟成, 戴晓阳, & 姚树桥. (2010). +# 中国大五人格问卷的初步编制Ⅰ:理论框架与信度分析. 中国临床心理学杂志, 18(05), Article 05. + +PERSONALITY_QUESTIONS = [ + # 神经质维度 (F1) + {"id": 1, "content": "我常担心有什么不好的事情要发生", "factor": "神经质", "reverse_scoring": False}, + {"id": 2, "content": "我常感到害怕", "factor": "神经质", "reverse_scoring": False}, + {"id": 3, "content": "有时我觉得自己一无是处", "factor": "神经质", "reverse_scoring": False}, + {"id": 4, "content": "我很少感到忧郁或沮丧", "factor": "神经质", "reverse_scoring": True}, + {"id": 5, "content": "别人一句漫不经心的话,我常会联系在自己身上", "factor": "神经质", "reverse_scoring": False}, + {"id": 6, "content": "在面对压力时,我有种快要崩溃的感觉", "factor": "神经质", "reverse_scoring": False}, + {"id": 7, "content": "我常担忧一些无关紧要的事情", "factor": "神经质", "reverse_scoring": False}, + {"id": 8, "content": "我常常感到内心不踏实", "factor": "神经质", "reverse_scoring": False}, + # 严谨性维度 (F2) + {"id": 9, "content": "在工作上,我常只求能应付过去便可", "factor": "严谨性", "reverse_scoring": True}, + {"id": 10, "content": "一旦确定了目标,我会坚持努力地实现它", "factor": "严谨性", "reverse_scoring": False}, + {"id": 11, "content": "我常常是仔细考虑之后才做出决定", "factor": "严谨性", "reverse_scoring": False}, + {"id": 12, "content": "别人认为我是个慎重的人", "factor": "严谨性", "reverse_scoring": False}, + {"id": 13, "content": "做事讲究逻辑和条理是我的一个特点", "factor": "严谨性", "reverse_scoring": False}, + {"id": 14, "content": "我喜欢一开头就把事情计划好", "factor": "严谨性", "reverse_scoring": False}, + {"id": 15, "content": "我工作或学习很勤奋", "factor": "严谨性", "reverse_scoring": False}, + {"id": 16, "content": "我是个倾尽全力做事的人", "factor": "严谨性", "reverse_scoring": False}, + # 宜人性维度 (F3) + { + "id": 17, + "content": "尽管人类社会存在着一些阴暗的东西(如战争、罪恶、欺诈),我仍然相信人性总的来说是善良的", + "factor": "宜人性", + "reverse_scoring": False, + }, + {"id": 18, "content": "我觉得大部分人基本上是心怀善意的", "factor": "宜人性", "reverse_scoring": False}, + {"id": 19, "content": "虽然社会上有骗子,但我觉得大部分人还是可信的", "factor": "宜人性", "reverse_scoring": False}, + {"id": 20, "content": "我不太关心别人是否受到不公正的待遇", "factor": "宜人性", "reverse_scoring": True}, + {"id": 21, "content": "我时常觉得别人的痛苦与我无关", "factor": "宜人性", "reverse_scoring": True}, + {"id": 22, "content": "我常为那些遭遇不幸的人感到难过", "factor": "宜人性", "reverse_scoring": False}, + {"id": 23, "content": "我是那种只照顾好自己,不替别人担忧的人", "factor": "宜人性", "reverse_scoring": True}, + {"id": 24, "content": "当别人向我诉说不幸时,我常感到难过", "factor": "宜人性", "reverse_scoring": False}, + # 开放性维度 (F4) + {"id": 25, "content": "我的想象力相当丰富", "factor": "开放性", "reverse_scoring": False}, + {"id": 26, "content": "我头脑中经常充满生动的画面", "factor": "开放性", "reverse_scoring": False}, + {"id": 27, "content": "我对许多事情有着很强的好奇心", "factor": "开放性", "reverse_scoring": False}, + {"id": 28, "content": "我喜欢冒险", "factor": "开放性", "reverse_scoring": False}, + {"id": 29, "content": "我是个勇于冒险,突破常规的人", "factor": "开放性", "reverse_scoring": False}, + {"id": 30, "content": "我身上具有别人没有的冒险精神", "factor": "开放性", "reverse_scoring": False}, + { + "id": 31, + "content": "我渴望学习一些新东西,即使它们与我的日常生活无关", + "factor": "开放性", + "reverse_scoring": False, + }, + { + "id": 32, + "content": "我很愿意也很容易接受那些新事物、新观点、新想法", + "factor": "开放性", + "reverse_scoring": False, + }, + # 外向性维度 (F5) + {"id": 33, "content": "我喜欢参加社交与娱乐聚会", "factor": "外向性", "reverse_scoring": False}, + {"id": 34, "content": "我对人多的聚会感到乏味", "factor": "外向性", "reverse_scoring": True}, + {"id": 35, "content": "我尽量避免参加人多的聚会和嘈杂的环境", "factor": "外向性", "reverse_scoring": True}, + {"id": 36, "content": "在热闹的聚会上,我常常表现主动并尽情玩耍", "factor": "外向性", "reverse_scoring": False}, + {"id": 37, "content": "有我在的场合一般不会冷场", "factor": "外向性", "reverse_scoring": False}, + {"id": 38, "content": "我希望成为领导者而不是被领导者", "factor": "外向性", "reverse_scoring": False}, + {"id": 39, "content": "在一个团体中,我希望处于领导地位", "factor": "外向性", "reverse_scoring": False}, + {"id": 40, "content": "别人多认为我是一个热情和友好的人", "factor": "外向性", "reverse_scoring": False}, +] + +# 因子维度说明 +FACTOR_DESCRIPTIONS = { + "外向性": { + "description": "反映个体神经系统的强弱和动力特征。外向性主要表现为个体在人际交往和社交活动中的倾向性," + "包括对社交活动的兴趣、" + "对人群的态度、社交互动中的主动程度以及在群体中的影响力。高分者倾向于积极参与社交活动,乐于与人交往,善于表达自我," + "并往往在群体中发挥领导作用;低分者则倾向于独处,不喜欢热闹的社交场合,表现出内向、安静的特征。", + "trait_words": ["热情", "活力", "社交", "主动"], + "subfactors": { + "合群性": "个体愿意与他人聚在一起,即接近人群的倾向;高分表现乐群、好交际,低分表现封闭、独处", + "热情": "个体对待别人时所表现出的态度;高分表现热情好客,低分表现冷淡", + "支配性": "个体喜欢指使、操纵他人,倾向于领导别人的特点;高分表现好强、发号施令,低分表现顺从、低调", + "活跃": "个体精力充沛,活跃、主动性等特点;高分表现活跃,低分表现安静", + }, + }, + "神经质": { + "description": "反映个体情绪的状态和体验内心苦恼的倾向性。这个维度主要关注个体在面对压力、" + "挫折和日常生活挑战时的情绪稳定性和适应能力。它包含了对焦虑、抑郁、愤怒等负面情绪的敏感程度," + "以及个体对这些情绪的调节和控制能力。高分者容易体验负面情绪,对压力较为敏感,情绪波动较大;" + "低分者则表现出较强的情绪稳定性,能够较好地应对压力和挫折。", + "trait_words": ["稳定", "沉着", "从容", "坚韧"], + "subfactors": { + "焦虑": "个体体验焦虑感的个体差异;高分表现坐立不安,低分表现平静", + "抑郁": "个体体验抑郁情感的个体差异;高分表现郁郁寡欢,低分表现平静", + "敏感多疑": "个体常常关注自己的内心活动,行为和过于意识人对自己的看法、评价;高分表现敏感多疑," + "低分表现淡定、自信", + "脆弱性": "个体在危机或困难面前无力、脆弱的特点;高分表现无能、易受伤、逃避,低分表现坚强", + "愤怒-敌意": "个体准备体验愤怒,及相关情绪的状态;高分表现暴躁易怒,低分表现平静", + }, + }, + "严谨性": { + "description": "反映个体在目标导向行为上的组织、坚持和动机特征。这个维度体现了个体在工作、" + "学习等目标性活动中的自我约束和行为管理能力。它涉及到个体的责任感、自律性、计划性、条理性以及完成任务的态度。" + "高分者往往表现出强烈的责任心、良好的组织能力、谨慎的决策风格和持续的努力精神;低分者则可能表现出随意性强、" + "缺乏规划、做事马虎或易放弃的特点。", + "trait_words": ["负责", "自律", "条理", "勤奋"], + "subfactors": { + "责任心": "个体对待任务和他人认真负责,以及对自己承诺的信守;高分表现有责任心、负责任," + "低分表现推卸责任、逃避处罚", + "自我控制": "个体约束自己的能力,及自始至终的坚持性;高分表现自制、有毅力,低分表现冲动、无毅力", + "审慎性": "个体在采取具体行动前的心理状态;高分表现谨慎、小心,低分表现鲁莽、草率", + "条理性": "个体处理事务和工作的秩序,条理和逻辑性;高分表现整洁、有秩序,低分表现混乱、遗漏", + "勤奋": "个体工作和学习的努力程度及为达到目标而表现出的进取精神;高分表现勤奋、刻苦,低分表现懒散", + }, + }, + "开放性": { + "description": "反映个体对新异事物、新观念和新经验的接受程度,以及在思维和行为方面的创新倾向。" + "这个维度体现了个体在认知和体验方面的广度、深度和灵活性。它包括对艺术的欣赏能力、对知识的求知欲、想象力的丰富程度," + "以及对冒险和创新的态度。高分者往往具有丰富的想象力、广泛的兴趣、开放的思维方式和创新的倾向;低分者则倾向于保守、" + "传统,喜欢熟悉和常规的事物。", + "trait_words": ["创新", "好奇", "艺术", "冒险"], + "subfactors": { + "幻想": "个体富于幻想和想象的水平;高分表现想象力丰富,低分表现想象力匮乏", + "审美": "个体对于艺术和美的敏感与热爱程度;高分表现富有艺术气息,低分表现一般对艺术不敏感", + "好奇心": "个体对未知事物的态度;高分表现兴趣广泛、好奇心浓,低分表现兴趣少、无好奇心", + "冒险精神": "个体愿意尝试有风险活动的个体差异;高分表现好冒险,低分表现保守", + "价值观念": "个体对新事物、新观念、怪异想法的态度;高分表现开放、坦然接受新事物,低分则相反", + }, + }, + "宜人性": { + "description": "反映个体在人际关系中的亲和倾向,体现了对他人的关心、同情和合作意愿。" + "这个维度主要关注个体与他人互动时的态度和行为特征,包括对他人的信任程度、同理心水平、" + "助人意愿以及在人际冲突中的处理方式。高分者通常表现出友善、富有同情心、乐于助人的特质,善于与他人建立和谐关系;" + "低分者则可能表现出较少的人际关注,在社交互动中更注重自身利益,较少考虑他人感受。", + "trait_words": ["友善", "同理", "信任", "合作"], + "subfactors": { + "信任": "个体对他人和/或他人言论的相信程度;高分表现信任他人,低分表现怀疑", + "体贴": "个体对别人的兴趣和需要的关注程度;高分表现体贴、温存,低分表现冷漠、不在乎", + "同情": "个体对处于不利地位的人或物的态度;高分表现富有同情心,低分表现冷漠", + }, + }, +} diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py index b5b01bb7b..de861583e 100644 --- a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py +++ b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py @@ -26,23 +26,10 @@ class PromptBuilder: ) -> tuple[str, str]: current_mind_info = heartflow.get_subheartflow(stream_id).current_mind - - # 开始构建prompt - prompt_personality = "你" - #person + individuality = Individuality.get_instance() - - personality_core = individuality.personality.personality_core - prompt_personality += personality_core - - personality_sides = individuality.personality.personality_sides - random.shuffle(personality_sides) - prompt_personality += f",{personality_sides[0]}" - - identity_detail = individuality.identity.identity_detail - random.shuffle(identity_detail) - prompt_personality += f",{identity_detail[0]}" - + prompt_personality = individuality.get_prompt(type = "personality",x_person = 2,level = 1) + prompt_identity = individuality.get_prompt(type = "identity",x_person = 2,level = 1) # 关系 who_chat_in_group = [(chat_stream.user_info.platform, chat_stream.user_info.user_id, @@ -124,7 +111,7 @@ class PromptBuilder: 你刚刚脑子里在想: {current_mind_info} 现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n -你的网名叫{global_config.BOT_NICKNAME},有人也叫你{"/".join(global_config.BOT_ALIAS_NAMES)},{prompt_personality}。 +你的网名叫{global_config.BOT_NICKNAME},有人也叫你{"/".join(global_config.BOT_ALIAS_NAMES)},{prompt_personality} {prompt_identity}。 你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些, 尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger} 请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话 diff --git a/src/plugins/moods/moods.py b/src/plugins/moods/moods.py index 3d3feadf1..df01a9c6f 100644 --- a/src/plugins/moods/moods.py +++ b/src/plugins/moods/moods.py @@ -6,6 +6,7 @@ from dataclasses import dataclass from ..config.config import global_config from src.common.logger import get_module_logger, LogConfig, MOOD_STYLE_CONFIG from ..person_info.relationship_manager import relationship_manager +from src.individuality.individuality import Individuality mood_config = LogConfig( # 使用海马体专用样式 @@ -129,16 +130,29 @@ class MoodManager: current_time = time.time() time_diff = current_time - self.last_update - # Valence 向中性(0)回归 - valence_target = 0 + # 获取人格特质 + personality = Individuality.get_instance().personality + if personality: + # 神经质:0.5为默认值,0为极低,1为极高 + # 神经质越高,情绪变化越快(衰减率越高) + neuroticism_factor = 0.5 + (personality.neuroticism - 0.5) * 0.5 # 范围在0.25-0.75之间 + # 宜人性:0.5为默认值,0为极低,1为极高 + # 宜人性越低,越容易走向负面情绪(向负值偏移) + agreeableness_bias = (0.5 - personality.agreeableness) * 0.2 # 范围在-0.1到0.1之间 + else: + neuroticism_factor = 0.5 # 默认值 + agreeableness_bias = 0.0 + + # Valence 向中性(0)回归,考虑宜人性偏差 + valence_target = agreeableness_bias self.current_mood.valence = valence_target + (self.current_mood.valence - valence_target) * math.exp( - -self.decay_rate_valence * time_diff + -self.decay_rate_valence * time_diff * neuroticism_factor ) # Arousal 向中性(0.5)回归 arousal_target = 0.5 self.current_mood.arousal = arousal_target + (self.current_mood.arousal - arousal_target) * math.exp( - -self.decay_rate_arousal * time_diff + -self.decay_rate_arousal * time_diff * neuroticism_factor ) # 确保值在合理范围内 From 2d40b01ba8f27b8eac21cf4c84d774d69400e526 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Apr 2025 15:31:23 +0800 Subject: [PATCH 42/51] =?UTF-8?q?feat=EF=BC=9A=E5=8A=A8=E6=80=81=E9=BA=A6?= =?UTF-8?q?=E9=BA=A6=E4=BA=BA=E6=A0=BC=E6=B5=8B=E8=AF=84=E5=B7=A5=E5=85=B7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/individuality/offline_llm.py | 2 +- src/individuality/per_bf_gen.py | 266 +++++++++++++----- ...(测试临时版).bat => (临时版)麦麦开始学习.bat | 0 (测试版)麦麦生成人格.bat | 56 ++++ 4 files changed, 246 insertions(+), 78 deletions(-) rename 麦麦开始学习(测试临时版).bat => (临时版)麦麦开始学习.bat (100%) create mode 100644 (测试版)麦麦生成人格.bat diff --git a/src/individuality/offline_llm.py b/src/individuality/offline_llm.py index 8d6820651..711743778 100644 --- a/src/individuality/offline_llm.py +++ b/src/individuality/offline_llm.py @@ -20,7 +20,7 @@ class LLM_request_off: if not self.api_key or not self.base_url: raise ValueError("环境变量未正确加载:SILICONFLOW_KEY 或 SILICONFLOW_BASE_URL 未设置") - logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url + # logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url def generate_response(self, prompt: str) -> Union[str, Tuple[str, str]]: """根据输入的提示生成模型的响应""" diff --git a/src/individuality/per_bf_gen.py b/src/individuality/per_bf_gen.py index 596229280..387e5729f 100644 --- a/src/individuality/per_bf_gen.py +++ b/src/individuality/per_bf_gen.py @@ -1,36 +1,30 @@ -""" -The definition of artificial personality in this paper follows the dispositional para-digm and adapts a definition of -personality developed for humans [17]: -Personality for a human is the "whole and organisation of relatively stable tendencies and patterns of experience and -behaviour within one person (distinguishing it from other persons)". This definition is modified for artificial -personality: -Artificial personality describes the relatively stable tendencies and patterns of behav-iour of an AI-based machine that -can be designed by developers and designers via different modalities, such as language, creating the impression -of individuality of a humanized social agent when users interact with the machine.""" - from typing import Dict, List import json import os from pathlib import Path from dotenv import load_dotenv import sys +import toml +import random +from tqdm import tqdm -""" -第一种方案:基于情景评估的人格测定 -""" -current_dir = Path(__file__).resolve().parent -project_root = current_dir.parent.parent.parent -env_path = project_root / ".env" - -root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")) +# 添加项目根目录到 Python 路径 +root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) sys.path.append(root_path) -from src.individuality.scene import get_scene_by_factor, PERSONALITY_SCENES # noqa: E402 -from src.individuality.questionnaire import FACTOR_DESCRIPTIONS # noqa: E402 -from src.individuality.offline_llm import LLMModel # noqa: E402 +# 加载配置文件 +config_path = os.path.join(root_path, "config", "bot_config.toml") +with open(config_path, "r", encoding="utf-8") as f: + config = toml.load(f) + +# 现在可以导入src模块 +from src.individuality.scene import get_scene_by_factor, PERSONALITY_SCENES +from src.individuality.questionnaire import FACTOR_DESCRIPTIONS +from src.individuality.offline_llm import LLM_request_off # 加载环境变量 -if env_path.exists(): +env_path = os.path.join(root_path, ".env") +if os.path.exists(env_path): print(f"从 {env_path} 加载环境变量") load_dotenv(env_path) else: @@ -38,10 +32,60 @@ else: print("将使用默认配置") +def adapt_scene(scene: str) -> str: + + personality_core = config['personality']['personality_core'] + personality_sides = config['personality']['personality_sides'] + personality_side = random.choice(personality_sides) + identity_details = config['identity']['identity_detail'] + identity_detail = random.choice(identity_details) + + """ + 根据config中的属性,改编场景使其更适合当前角色 + + Args: + scene: 原始场景描述 + + Returns: + str: 改编后的场景描述 + """ + try: + prompt = f""" +这是一个参与人格测评的角色形象: +- 昵称: {config['bot']['nickname']} +- 性别: {config['identity']['gender']} +- 年龄: {config['identity']['age']}岁 +- 外貌: {config['identity']['appearance']} +- 性格核心: {personality_core} +- 性格侧面: {personality_side} +- 身份细节: {identity_detail} + +请根据上述形象,改编以下原始场景,在测评中,用户将根据该场景给出上述角色形象的反应: +{scene} + +保持场景的本质不变,但最好贴近生活且具体,并且让它更适合这个角色。 +改编后的场景应该自然、连贯,并考虑角色的年龄、身份和性格特点。只返回改编后的场景描述,不要包含其他说明。注意{config['bot']['nickname']}是面对这个情景的人,而不是场景的其他人。""" + + llm = LLM_request_off(model_name=config['model']['llm_normal']['name']) + adapted_scene, _ = llm.generate_response(prompt) + + # 检查返回的场景是否为空或错误信息 + if not adapted_scene or "错误" in adapted_scene or "失败" in adapted_scene: + print("场景改编失败,将使用原始场景") + return scene + + return adapted_scene + except Exception as e: + print(f"场景改编过程出错:{str(e)},将使用原始场景") + return scene + + class PersonalityEvaluator_direct: def __init__(self): self.personality_traits = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0} self.scenarios = [] + self.final_scores = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0} + self.dimension_counts = {trait: 0 for trait in self.final_scores.keys()} # 为每个人格特质获取对应的场景 for trait in PERSONALITY_SCENES: @@ -67,7 +111,7 @@ class PersonalityEvaluator_direct: {"场景": scene["scenario"], "评估维度": [trait, secondary_trait], "场景编号": scene_key} ) - self.llm = LLMModel() + self.llm = LLM_request_off() def evaluate_response(self, scenario: str, response: str, dimensions: List[str]) -> Dict[str, float]: """ @@ -125,71 +169,139 @@ class PersonalityEvaluator_direct: except Exception as e: print(f"评估过程出错:{str(e)}") return {dim: 3.5 for dim in dimensions} + + def run_evaluation(self): + """ + 运行整个评估过程 + """ + print(f"欢迎使用{config['bot']['nickname']}形象创建程序!") + print("接下来,将给您呈现一系列有关您bot的场景(共15个)。") + print("请想象您的bot在以下场景下会做什么,并描述您的bot的反应。") + print("每个场景都会进行不同方面的评估。") + print("\n角色基本信息:") + print(f"- 昵称:{config['bot']['nickname']}") + print(f"- 性格核心:{config['personality']['personality_core']}") + print(f"- 性格侧面:{config['personality']['personality_sides']}") + print(f"- 身份细节:{config['identity']['identity_detail']}") + print("\n准备好了吗?按回车键开始...") + input() + + total_scenarios = len(self.scenarios) + progress_bar = tqdm(total=total_scenarios, desc="场景进度", ncols=100, bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]') + + for i, scenario_data in enumerate(self.scenarios, 1): + # print(f"\n{'-' * 20} 场景 {i}/{total_scenarios} - {scenario_data['场景编号']} {'-' * 20}") + + # 改编场景,使其更适合当前角色 + print(f"{config['bot']['nickname']}祈祷中...") + adapted_scene = adapt_scene(scenario_data["场景"]) + scenario_data["改编场景"] = adapted_scene + + print(adapted_scene) + print(f"\n请描述{config['bot']['nickname']}在这种情况下会如何反应:") + response = input().strip() + + if not response: + print("反应描述不能为空!") + continue + + print("\n正在评估您的描述...") + scores = self.evaluate_response(adapted_scene, response, scenario_data["评估维度"]) + + # 更新最终分数 + for dimension, score in scores.items(): + self.final_scores[dimension] += score + self.dimension_counts[dimension] += 1 + + print("\n当前评估结果:") + print("-" * 30) + for dimension, score in scores.items(): + print(f"{dimension}: {score}/6") + + # 更新进度条 + progress_bar.update(1) + + # if i < total_scenarios: + # print("\n按回车键继续下一个场景...") + # input() + + progress_bar.close() + + # 计算平均分 + for dimension in self.final_scores: + if self.dimension_counts[dimension] > 0: + self.final_scores[dimension] = round(self.final_scores[dimension] / self.dimension_counts[dimension], 2) + + print("\n" + "=" * 50) + print(f" {config['bot']['nickname']}的人格特征评估结果 ".center(50)) + print("=" * 50) + for trait, score in self.final_scores.items(): + print(f"{trait}: {score}/6".ljust(20) + f"测试场景数:{self.dimension_counts[trait]}".rjust(30)) + print("=" * 50) + + # 返回评估结果 + return self.get_result() + + def get_result(self): + """ + 获取评估结果 + """ + return { + "final_scores": self.final_scores, + "dimension_counts": self.dimension_counts, + "scenarios": self.scenarios, + "bot_info": { + "nickname": config['bot']['nickname'], + "gender": config['identity']['gender'], + "age": config['identity']['age'], + "height": config['identity']['height'], + "weight": config['identity']['weight'], + "appearance": config['identity']['appearance'], + "personality_core": config['personality']['personality_core'], + "personality_sides": config['personality']['personality_sides'], + "identity_detail": config['identity']['identity_detail'] + } + } def main(): - print("欢迎使用人格形象创建程序!") - print("接下来,您将面对一系列场景(共15个)。请根据您想要创建的角色形象,描述在该场景下可能的反应。") - print("每个场景都会评估不同的人格维度,最终得出完整的人格特征评估。") - print("评分标准:1=非常不符合,2=比较不符合,3=有点不符合,4=有点符合,5=比较符合,6=非常符合") - print("\n准备好了吗?按回车键开始...") - input() - evaluator = PersonalityEvaluator_direct() - final_scores = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0} - dimension_counts = {trait: 0 for trait in final_scores.keys()} + result = evaluator.run_evaluation() - for i, scenario_data in enumerate(evaluator.scenarios, 1): - print(f"\n场景 {i}/{len(evaluator.scenarios)} - {scenario_data['场景编号']}:") - print("-" * 50) - print(scenario_data["场景"]) - print("\n请描述您的角色在这种情况下会如何反应:") - response = input().strip() - - if not response: - print("反应描述不能为空!") - continue - - print("\n正在评估您的描述...") - scores = evaluator.evaluate_response(scenario_data["场景"], response, scenario_data["评估维度"]) - - # 更新最终分数 - for dimension, score in scores.items(): - final_scores[dimension] += score - dimension_counts[dimension] += 1 - - print("\n当前评估结果:") - print("-" * 30) - for dimension, score in scores.items(): - print(f"{dimension}: {score}/6") - - if i < len(evaluator.scenarios): - print("\n按回车键继续下一个场景...") - input() - - # 计算平均分 - for dimension in final_scores: - if dimension_counts[dimension] > 0: - final_scores[dimension] = round(final_scores[dimension] / dimension_counts[dimension], 2) - - print("\n最终人格特征评估结果:") - print("-" * 30) - for trait, score in final_scores.items(): - print(f"{trait}: {score}/6") - print(f"测试场景数:{dimension_counts[trait]}") - - # 保存结果 - result = {"final_scores": final_scores, "dimension_counts": dimension_counts, "scenarios": evaluator.scenarios} + # 准备简化的结果数据 + simplified_result = { + "openness": round(result["final_scores"]["开放性"] / 6, 1), # 转换为0-1范围 + "conscientiousness": round(result["final_scores"]["严谨性"] / 6, 1), + "extraversion": round(result["final_scores"]["外向性"] / 6, 1), + "agreeableness": round(result["final_scores"]["宜人性"] / 6, 1), + "neuroticism": round(result["final_scores"]["神经质"] / 6, 1), + "bot_nickname": config['bot']['nickname'] + } # 确保目录存在 - os.makedirs("results", exist_ok=True) + save_dir = os.path.join(root_path, "data", "personality") + os.makedirs(save_dir, exist_ok=True) + + # 创建文件名,替换可能的非法字符 + bot_name = config['bot']['nickname'] + # 替换Windows文件名中不允许的字符 + for char in ['\\', '/', ':', '*', '?', '"', '<', '>', '|']: + bot_name = bot_name.replace(char, '_') + + file_name = f"{bot_name}_personality.per" + save_path = os.path.join(save_dir, file_name) + + # 保存简化的结果 + with open(save_path, "w", encoding="utf-8") as f: + json.dump(simplified_result, f, ensure_ascii=False, indent=4) - # 保存到文件 + print(f"\n结果已保存到 {save_path}") + + # 同时保存完整结果到results目录 + os.makedirs("results", exist_ok=True) with open("results/personality_result.json", "w", encoding="utf-8") as f: json.dump(result, f, ensure_ascii=False, indent=2) - print("\n结果已保存到 results/personality_result.json") - if __name__ == "__main__": main() diff --git a/麦麦开始学习(测试临时版).bat b/(临时版)麦麦开始学习.bat similarity index 100% rename from 麦麦开始学习(测试临时版).bat rename to (临时版)麦麦开始学习.bat diff --git a/(测试版)麦麦生成人格.bat b/(测试版)麦麦生成人格.bat new file mode 100644 index 000000000..e2aa5c06a --- /dev/null +++ b/(测试版)麦麦生成人格.bat @@ -0,0 +1,56 @@ +@echo off +chcp 65001 > nul +setlocal enabledelayedexpansion +cd /d %~dp0 + +title 麦麦人格生成 + +cls +echo ====================================== +echo 警告提示 +echo ====================================== +echo 1.这是一个demo系统,仅供体验,特性可能会在将来移除 +echo ====================================== + +echo. +echo ====================================== +echo 请选择Python环境: +echo 1 - venv (推荐) +echo 2 - conda +echo ====================================== +choice /c 12 /n /m "请输入数字选择(1或2): " + +if errorlevel 2 ( + echo ====================================== + set "CONDA_ENV=" + set /p CONDA_ENV="请输入要激活的 conda 环境名称: " + + :: 检查输入是否为空 + if "!CONDA_ENV!"=="" ( + echo 错误:环境名称不能为空 + pause + exit /b 1 + ) + + call conda activate !CONDA_ENV! + if errorlevel 1 ( + echo 激活 conda 环境失败 + pause + exit /b 1 + ) + + echo Conda 环境 "!CONDA_ENV!" 激活成功 + python src/individuality/per_bf_gen.py +) else ( + if exist "venv\Scripts\python.exe" ( + venv\Scripts\python src/individuality/per_bf_gen.py + ) else ( + echo ====================================== + echo 错误: venv环境不存在,请先创建虚拟环境 + pause + exit /b 1 + ) +) + +endlocal +pause From 028054aa7db591931f6de210d506a1446d491f5a Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Apr 2025 15:51:13 +0800 Subject: [PATCH 43/51] =?UTF-8?q?fix=EF=BC=9A=E8=B0=83=E6=95=B4=E5=8F=82?= =?UTF-8?q?=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/individuality/per_bf_gen.py | 7 ++++--- src/individuality/template_scene.json | 14 +++++++------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/individuality/per_bf_gen.py b/src/individuality/per_bf_gen.py index 387e5729f..fbf19e595 100644 --- a/src/individuality/per_bf_gen.py +++ b/src/individuality/per_bf_gen.py @@ -60,11 +60,12 @@ def adapt_scene(scene: str) -> str: - 性格侧面: {personality_side} - 身份细节: {identity_detail} -请根据上述形象,改编以下原始场景,在测评中,用户将根据该场景给出上述角色形象的反应: +请根据上述形象,改编以下场景,在测评中,用户将根据该场景给出上述角色形象的反应: {scene} - 保持场景的本质不变,但最好贴近生活且具体,并且让它更适合这个角色。 -改编后的场景应该自然、连贯,并考虑角色的年龄、身份和性格特点。只返回改编后的场景描述,不要包含其他说明。注意{config['bot']['nickname']}是面对这个情景的人,而不是场景的其他人。""" +改编后的场景应该自然、连贯,并考虑角色的年龄、身份和性格特点。只返回改编后的场景描述,不要包含其他说明。注意{config['bot']['nickname']}是面对这个情景的人,而不是场景的其他人。 +以上场景不包含{config['bot']['nickname']}反应,这是用户填写的部分,现在,请你给出改编后的场景描述,不包含{config['bot']['nickname']}的反应。 +""" llm = LLM_request_off(model_name=config['model']['llm_normal']['name']) adapted_scene, _ = llm.generate_response(prompt) diff --git a/src/individuality/template_scene.json b/src/individuality/template_scene.json index cd9ae4752..a6542e75d 100644 --- a/src/individuality/template_scene.json +++ b/src/individuality/template_scene.json @@ -57,8 +57,8 @@ "explanation": "通过活动组织场景,观察个体对活动计划的态度。" }, "场景4": { - "scenario": "你和恋人计划一起去旅游,对方说:\n\n恋人:「我们就随心而行吧!订个目的地,其他的到了再说,这样更有意思。」\n\n距离出发还有一周时间,但机票、住宿和具体行程都还没有确定。", - "explanation": "通过旅行规划场景,观察个体的计划性和对不确定性的接受程度。" + "scenario": "你的好友小明邀请你一起参加一个重要的演出活动,他说:\n\n小明:「到时候我们就即兴发挥吧!不用排练了,我相信我们的默契。」\n\n距离演出还有三天,但节目内容、配乐和服装都还没有确定。", + "explanation": "通过演出准备场景,观察个体的计划性和对不确定性的接受程度。" }, "场景5": { "scenario": "在一个重要的团队项目中,你发现一个同事的工作存在明显错误:\n\n同事:「差不多就行了,反正领导也看不出来。」\n\n这个错误可能不会立即造成问题,但长期来看可能会影响项目质量。", @@ -75,12 +75,12 @@ "explanation": "通过新技术应用场景,观察个体对创新学习方式的态度。" }, "场景3": { - "scenario": "在社交媒体上,你看到一个朋友分享了一种新的生活方式:\n\n「最近我在尝试'数字游牧'生活,就是一边远程工作一边环游世界。没有固定住所,住青旅或短租,认识来自世界各地的朋友。虽然有时会很不稳定,但这种自由的生活方式真的很棒!」\n\n评论区里争论不断,有人向往这种生活,也有人觉得太冒险。", - "explanation": "通过另类生活方式,观察个体对非传统选择的态度。" + "scenario": "在社交媒体上,你看到一个朋友分享了一种新的学习方式:\n\n「最近我在尝试'沉浸式学习',就是完全投入到一个全新的领域。比如学习一门陌生的语言,或者尝试完全不同的职业技能。虽然过程会很辛苦,但这种打破舒适圈的感觉真的很棒!」\n\n评论区里争论不断,有人认为这种学习方式效率高,也有人觉得太激进。", + "explanation": "通过新型学习方式,观察个体对创新和挑战的态度。" }, "场景4": { - "scenario": "你的恋人突然提出了一个想法:\n\n恋人:「我们要不要尝试一下开放式关系?就是在保持彼此关系的同时,也允许和其他人发展感情。现在国外很多年轻人都这样。」\n\n这个提议让你感到意外,你之前从未考虑过这种可能性。", - "explanation": "通过感情观念场景,观察个体对非传统关系模式的接受度。" + "scenario": "你的朋友向你推荐了一种新的饮食方式:\n\n朋友:「我最近在尝试'未来食品',比如人造肉、3D打印食物、昆虫蛋白等。这不仅对环境友好,营养也很均衡。要不要一起来尝试看看?」\n\n这个提议让你感到好奇又犹豫,你之前从未尝试过这些新型食物。", + "explanation": "通过饮食创新场景,观察个体对新事物的接受度和尝试精神。" }, "场景5": { "scenario": "在一次朋友聚会上,大家正在讨论未来职业规划:\n\n朋友A:「我准备辞职去做自媒体,专门介绍一些小众的文化和艺术。」\n\n朋友B:「我想去学习生物科技,准备转行做人造肉研发。」\n\n朋友C:「我在考虑加入一个区块链创业项目,虽然风险很大。」", @@ -101,7 +101,7 @@ "explanation": "通过网络互助场景,观察个体的共情能力和安慰方式。" }, "场景4": { - "scenario": "你的恋人向你倾诉工作压力:\n\n恋人:「最近工作真的好累,感觉快坚持不下去了...」\n\n但今天你也遇到了很多烦心事,心情也不太好。", + "scenario": "你的朋友向你倾诉工作压力:\n\n朋友:「最近工作真的好累,感觉快坚持不下去了...」\n\n但今天你也遇到了很多烦心事,心情也不太好。", "explanation": "通过感情关系场景,观察个体在自身状态不佳时的关怀能力。" }, "场景5": { From 6393bee0207454f5bac1331f732bbbc47db7b19a Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Apr 2025 15:53:50 +0800 Subject: [PATCH 44/51] =?UTF-8?q?fix=EF=BC=9A=E5=86=8D=E6=AC=A1=E8=B0=83?= =?UTF-8?q?=E6=95=B4=E5=8F=82=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/individuality/offline_llm.py | 2 +- src/individuality/per_bf_gen.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/individuality/offline_llm.py b/src/individuality/offline_llm.py index 711743778..7a698fc1d 100644 --- a/src/individuality/offline_llm.py +++ b/src/individuality/offline_llm.py @@ -30,7 +30,7 @@ class LLM_request_off: data = { "model": self.model_name, "messages": [{"role": "user", "content": prompt}], - "temperature": 0.5, + "temperature": 0.4, **self.params, } diff --git a/src/individuality/per_bf_gen.py b/src/individuality/per_bf_gen.py index fbf19e595..05b6c0234 100644 --- a/src/individuality/per_bf_gen.py +++ b/src/individuality/per_bf_gen.py @@ -63,8 +63,8 @@ def adapt_scene(scene: str) -> str: 请根据上述形象,改编以下场景,在测评中,用户将根据该场景给出上述角色形象的反应: {scene} 保持场景的本质不变,但最好贴近生活且具体,并且让它更适合这个角色。 -改编后的场景应该自然、连贯,并考虑角色的年龄、身份和性格特点。只返回改编后的场景描述,不要包含其他说明。注意{config['bot']['nickname']}是面对这个情景的人,而不是场景的其他人。 -以上场景不包含{config['bot']['nickname']}反应,这是用户填写的部分,现在,请你给出改编后的场景描述,不包含{config['bot']['nickname']}的反应。 +改编后的场景应该自然、连贯,并考虑角色的年龄、身份和性格特点。只返回改编后的场景描述,不要包含其他说明。注意{config['bot']['nickname']}是面对这个场景的人,而不是场景的其他人。场景中不会有其描述, +现在,请你给出改编后的场景描述 """ llm = LLM_request_off(model_name=config['model']['llm_normal']['name']) From 61d35063d215cfac2b95d485c7aa62f9caa61f07 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Apr 2025 15:57:42 +0800 Subject: [PATCH 45/51] fix:ruff --- src/heart_flow/sub_heartflow.py | 6 +++--- src/individuality/per_bf_gen.py | 9 ++++----- src/individuality/personality.py | 1 - src/plugins/PFC/pfc.py | 1 - src/plugins/PFC/pfc_utils.py | 2 +- .../reasoning_chat/reasoning_prompt_builder.py | 5 ++--- .../think_flow_chat/think_flow_prompt_builder.py | 3 --- 7 files changed, 10 insertions(+), 17 deletions(-) diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py index b6a0fb30e..1b4bebeca 100644 --- a/src/heart_flow/sub_heartflow.py +++ b/src/heart_flow/sub_heartflow.py @@ -193,7 +193,7 @@ class SubHeartflow: related_info,grouped_results = await self.get_prompt_info(chat_observe_info + message_txt, 0.4) print(related_info) - for topic, results in grouped_results.items(): + for _topic, results in grouped_results.items(): for result in results: print(result) self.running_knowledges.append(result) @@ -457,8 +457,8 @@ class SubHeartflow: # 按主题组织输出 for topic, results in grouped_results.items(): related_info += f"【主题: {topic}】\n" - for i, result in enumerate(results, 1): - similarity = result["similarity"] + for _i, result in enumerate(results, 1): + _similarity = result["similarity"] content = result["content"].strip() # 调试:为内容添加序号和相似度信息 # related_info += f"{i}. [{similarity:.2f}] {content}\n" diff --git a/src/individuality/per_bf_gen.py b/src/individuality/per_bf_gen.py index 05b6c0234..0a8b2e4a7 100644 --- a/src/individuality/per_bf_gen.py +++ b/src/individuality/per_bf_gen.py @@ -1,7 +1,6 @@ from typing import Dict, List import json import os -from pathlib import Path from dotenv import load_dotenv import sys import toml @@ -18,9 +17,9 @@ with open(config_path, "r", encoding="utf-8") as f: config = toml.load(f) # 现在可以导入src模块 -from src.individuality.scene import get_scene_by_factor, PERSONALITY_SCENES -from src.individuality.questionnaire import FACTOR_DESCRIPTIONS -from src.individuality.offline_llm import LLM_request_off +from src.individuality.scene import get_scene_by_factor, PERSONALITY_SCENES #noqa E402 +from src.individuality.questionnaire import FACTOR_DESCRIPTIONS #noqa E402 +from src.individuality.offline_llm import LLM_request_off #noqa E402 # 加载环境变量 env_path = os.path.join(root_path, ".env") @@ -190,7 +189,7 @@ class PersonalityEvaluator_direct: total_scenarios = len(self.scenarios) progress_bar = tqdm(total=total_scenarios, desc="场景进度", ncols=100, bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]') - for i, scenario_data in enumerate(self.scenarios, 1): + for _i, scenario_data in enumerate(self.scenarios, 1): # print(f"\n{'-' * 20} 场景 {i}/{total_scenarios} - {scenario_data['场景编号']} {'-' * 20}") # 改编场景,使其更适合当前角色 diff --git a/src/individuality/personality.py b/src/individuality/personality.py index 36dacd2c4..eb822ab1f 100644 --- a/src/individuality/personality.py +++ b/src/individuality/personality.py @@ -1,6 +1,5 @@ from dataclasses import dataclass from typing import Dict, List -import os import json from pathlib import Path import random diff --git a/src/plugins/PFC/pfc.py b/src/plugins/PFC/pfc.py index e02409ce8..4500625ac 100644 --- a/src/plugins/PFC/pfc.py +++ b/src/plugins/PFC/pfc.py @@ -18,7 +18,6 @@ from .chat_observer import ChatObserver from .pfc_KnowledgeFetcher import KnowledgeFetcher from .reply_checker import ReplyChecker from .pfc_utils import get_items_from_json -import json import time logger = get_module_logger("pfc") diff --git a/src/plugins/PFC/pfc_utils.py b/src/plugins/PFC/pfc_utils.py index 2b94e6c4d..9d0278b02 100644 --- a/src/plugins/PFC/pfc_utils.py +++ b/src/plugins/PFC/pfc_utils.py @@ -1,6 +1,6 @@ import json import re -from typing import Dict, Any, Optional, List, Tuple, Union +from typing import Dict, Any, Optional, Tuple from src.common.logger import get_module_logger logger = get_module_logger("pfc_utils") diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py b/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py index 176f59b43..3a9f0dc46 100644 --- a/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py +++ b/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py @@ -1,7 +1,6 @@ import random import time from typing import Optional, Union -import numpy as np from ....common.database import db from ...chat.utils import get_embedding, get_recent_group_detailed_plain_text, get_recent_group_speaker @@ -297,8 +296,8 @@ class PromptBuilder: # 按主题组织输出 for topic, results in grouped_results.items(): related_info += f"【主题: {topic}】\n" - for i, result in enumerate(results, 1): - similarity = result["similarity"] + for _i, result in enumerate(results, 1): + _similarity = result["similarity"] content = result["content"].strip() # 调试:为内容添加序号和相似度信息 # related_info += f"{i}. [{similarity:.2f}] {content}\n" diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py index de861583e..b6fe9fb89 100644 --- a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py +++ b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py @@ -1,10 +1,7 @@ import random -import time from typing import Optional -from ...memory_system.Hippocampus import HippocampusManager from ...moods.moods import MoodManager -from ...schedule.schedule_generator import bot_schedule from ...config.config import global_config from ...chat.utils import get_recent_group_detailed_plain_text, get_recent_group_speaker from ...chat.chat_stream import chat_manager From ff46d5a7d245e0e80e17569326564ba7774d7010 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Apr 2025 22:15:57 +0800 Subject: [PATCH 46/51] =?UTF-8?q?fix=EF=BC=9Amatplotlib=E7=82=B8=E9=A3=9E?= =?UTF-8?q?=E4=B8=BB=E7=A8=8B=E5=BA=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gui/logger_gui.py | 597 +++++++++++++------------ src/gui/reasoning_gui.py | 544 +++++++++++----------- src/plugins/person_info/person_info.py | 39 +- 3 files changed, 602 insertions(+), 578 deletions(-) diff --git a/src/gui/logger_gui.py b/src/gui/logger_gui.py index f2dd698cd..9488446c4 100644 --- a/src/gui/logger_gui.py +++ b/src/gui/logger_gui.py @@ -1,347 +1,378 @@ -import customtkinter as ctk -import subprocess -import threading -import queue -import re -import os -import signal -from collections import deque +# import customtkinter as ctk +# import subprocess +# import threading +# import queue +# import re +# import os +# import signal +# from collections import deque +# import sys -# 设置应用的外观模式和默认颜色主题 -ctk.set_appearance_mode("dark") -ctk.set_default_color_theme("blue") +# # 设置应用的外观模式和默认颜色主题 +# ctk.set_appearance_mode("dark") +# ctk.set_default_color_theme("blue") -class LogViewerApp(ctk.CTk): - """日志查看器应用的主类,继承自customtkinter的CTk类""" +# class LogViewerApp(ctk.CTk): +# """日志查看器应用的主类,继承自customtkinter的CTk类""" - def __init__(self): - """初始化日志查看器应用的界面和状态""" - super().__init__() - self.title("日志查看器") - self.geometry("1200x800") +# def __init__(self): +# """初始化日志查看器应用的界面和状态""" +# super().__init__() +# self.title("日志查看器") +# self.geometry("1200x800") - # 初始化进程、日志队列、日志数据等变量 - self.process = None - self.log_queue = queue.Queue() - self.log_data = deque(maxlen=10000) # 使用固定长度队列 - self.available_levels = set() - self.available_modules = set() - self.sorted_modules = [] - self.module_checkboxes = {} # 存储模块复选框的字典 +# # 标记GUI是否运行中 +# self.is_running = True + +# # 程序关闭时的清理操作 +# self.protocol("WM_DELETE_WINDOW", self._on_closing) + +# # 初始化进程、日志队列、日志数据等变量 +# self.process = None +# self.log_queue = queue.Queue() +# self.log_data = deque(maxlen=10000) # 使用固定长度队列 +# self.available_levels = set() +# self.available_modules = set() +# self.sorted_modules = [] +# self.module_checkboxes = {} # 存储模块复选框的字典 - # 日志颜色配置 - self.color_config = { - "time": "#888888", - "DEBUG": "#2196F3", - "INFO": "#4CAF50", - "WARNING": "#FF9800", - "ERROR": "#F44336", - "module": "#D4D0AB", - "default": "#FFFFFF", - } +# # 日志颜色配置 +# self.color_config = { +# "time": "#888888", +# "DEBUG": "#2196F3", +# "INFO": "#4CAF50", +# "WARNING": "#FF9800", +# "ERROR": "#F44336", +# "module": "#D4D0AB", +# "default": "#FFFFFF", +# } - # 列可见性配置 - self.column_visibility = {"show_time": True, "show_level": True, "show_module": True} +# # 列可见性配置 +# self.column_visibility = {"show_time": True, "show_level": True, "show_module": True} - # 选中的日志等级和模块 - self.selected_levels = set() - self.selected_modules = set() +# # 选中的日志等级和模块 +# self.selected_levels = set() +# self.selected_modules = set() - # 创建界面组件并启动日志队列处理 - self.create_widgets() - self.after(100, self.process_log_queue) +# # 创建界面组件并启动日志队列处理 +# self.create_widgets() +# self.after(100, self.process_log_queue) - def create_widgets(self): - """创建应用界面的各个组件""" - self.grid_columnconfigure(0, weight=1) - self.grid_rowconfigure(1, weight=1) +# def create_widgets(self): +# """创建应用界面的各个组件""" +# self.grid_columnconfigure(0, weight=1) +# self.grid_rowconfigure(1, weight=1) - # 控制面板 - control_frame = ctk.CTkFrame(self) - control_frame.grid(row=0, column=0, sticky="ew", padx=10, pady=5) +# # 控制面板 +# control_frame = ctk.CTkFrame(self) +# control_frame.grid(row=0, column=0, sticky="ew", padx=10, pady=5) - self.start_btn = ctk.CTkButton(control_frame, text="启动", command=self.start_process) - self.start_btn.pack(side="left", padx=5) +# self.start_btn = ctk.CTkButton(control_frame, text="启动", command=self.start_process) +# self.start_btn.pack(side="left", padx=5) - self.stop_btn = ctk.CTkButton(control_frame, text="停止", command=self.stop_process, state="disabled") - self.stop_btn.pack(side="left", padx=5) +# self.stop_btn = ctk.CTkButton(control_frame, text="停止", command=self.stop_process, state="disabled") +# self.stop_btn.pack(side="left", padx=5) - self.clear_btn = ctk.CTkButton(control_frame, text="清屏", command=self.clear_logs) - self.clear_btn.pack(side="left", padx=5) +# self.clear_btn = ctk.CTkButton(control_frame, text="清屏", command=self.clear_logs) +# self.clear_btn.pack(side="left", padx=5) - column_filter_frame = ctk.CTkFrame(control_frame) - column_filter_frame.pack(side="left", padx=20) +# column_filter_frame = ctk.CTkFrame(control_frame) +# column_filter_frame.pack(side="left", padx=20) - self.time_check = ctk.CTkCheckBox(column_filter_frame, text="显示时间", command=self.refresh_logs) - self.time_check.pack(side="left", padx=5) - self.time_check.select() +# self.time_check = ctk.CTkCheckBox(column_filter_frame, text="显示时间", command=self.refresh_logs) +# self.time_check.pack(side="left", padx=5) +# self.time_check.select() - self.level_check = ctk.CTkCheckBox(column_filter_frame, text="显示等级", command=self.refresh_logs) - self.level_check.pack(side="left", padx=5) - self.level_check.select() +# self.level_check = ctk.CTkCheckBox(column_filter_frame, text="显示等级", command=self.refresh_logs) +# self.level_check.pack(side="left", padx=5) +# self.level_check.select() - self.module_check = ctk.CTkCheckBox(column_filter_frame, text="显示模块", command=self.refresh_logs) - self.module_check.pack(side="left", padx=5) - self.module_check.select() +# self.module_check = ctk.CTkCheckBox(column_filter_frame, text="显示模块", command=self.refresh_logs) +# self.module_check.pack(side="left", padx=5) +# self.module_check.select() - # 筛选面板 - filter_frame = ctk.CTkFrame(self) - filter_frame.grid(row=0, column=1, rowspan=2, sticky="ns", padx=5) +# # 筛选面板 +# filter_frame = ctk.CTkFrame(self) +# filter_frame.grid(row=0, column=1, rowspan=2, sticky="ns", padx=5) - ctk.CTkLabel(filter_frame, text="日志等级筛选").pack(pady=5) - self.level_scroll = ctk.CTkScrollableFrame(filter_frame, width=150, height=200) - self.level_scroll.pack(fill="both", expand=True, padx=5) +# ctk.CTkLabel(filter_frame, text="日志等级筛选").pack(pady=5) +# self.level_scroll = ctk.CTkScrollableFrame(filter_frame, width=150, height=200) +# self.level_scroll.pack(fill="both", expand=True, padx=5) - ctk.CTkLabel(filter_frame, text="模块筛选").pack(pady=5) - self.module_filter_entry = ctk.CTkEntry(filter_frame, placeholder_text="输入模块过滤词") - self.module_filter_entry.pack(pady=5) - self.module_filter_entry.bind("", self.update_module_filter) +# ctk.CTkLabel(filter_frame, text="模块筛选").pack(pady=5) +# self.module_filter_entry = ctk.CTkEntry(filter_frame, placeholder_text="输入模块过滤词") +# self.module_filter_entry.pack(pady=5) +# self.module_filter_entry.bind("", self.update_module_filter) - self.module_scroll = ctk.CTkScrollableFrame(filter_frame, width=300, height=200) - self.module_scroll.pack(fill="both", expand=True, padx=5) +# self.module_scroll = ctk.CTkScrollableFrame(filter_frame, width=300, height=200) +# self.module_scroll.pack(fill="both", expand=True, padx=5) - self.log_text = ctk.CTkTextbox(self, wrap="word") - self.log_text.grid(row=1, column=0, sticky="nsew", padx=10, pady=5) +# self.log_text = ctk.CTkTextbox(self, wrap="word") +# self.log_text.grid(row=1, column=0, sticky="nsew", padx=10, pady=5) - self.init_text_tags() +# self.init_text_tags() - def update_module_filter(self, event): - """根据模块过滤词更新模块复选框的显示""" - filter_text = self.module_filter_entry.get().strip().lower() - for module, checkbox in self.module_checkboxes.items(): - if filter_text in module.lower(): - checkbox.pack(anchor="w", padx=5, pady=2) - else: - checkbox.pack_forget() +# def update_module_filter(self, event): +# """根据模块过滤词更新模块复选框的显示""" +# filter_text = self.module_filter_entry.get().strip().lower() +# for module, checkbox in self.module_checkboxes.items(): +# if filter_text in module.lower(): +# checkbox.pack(anchor="w", padx=5, pady=2) +# else: +# checkbox.pack_forget() - def update_filters(self, level, module): - """更新日志等级和模块的筛选器""" - if level not in self.available_levels: - self.available_levels.add(level) - self.add_checkbox(self.level_scroll, level, "level") +# def update_filters(self, level, module): +# """更新日志等级和模块的筛选器""" +# if level not in self.available_levels: +# self.available_levels.add(level) +# self.add_checkbox(self.level_scroll, level, "level") - module_key = self.get_module_key(module) - if module_key not in self.available_modules: - self.available_modules.add(module_key) - self.sorted_modules = sorted(self.available_modules, key=lambda x: x.lower()) - self.rebuild_module_checkboxes() +# module_key = self.get_module_key(module) +# if module_key not in self.available_modules: +# self.available_modules.add(module_key) +# self.sorted_modules = sorted(self.available_modules, key=lambda x: x.lower()) +# self.rebuild_module_checkboxes() - def rebuild_module_checkboxes(self): - """重新构建模块复选框""" - # 清空现有复选框 - for widget in self.module_scroll.winfo_children(): - widget.destroy() - self.module_checkboxes.clear() +# def rebuild_module_checkboxes(self): +# """重新构建模块复选框""" +# # 清空现有复选框 +# for widget in self.module_scroll.winfo_children(): +# widget.destroy() +# self.module_checkboxes.clear() - # 重建排序后的复选框 - for module in self.sorted_modules: - self.add_checkbox(self.module_scroll, module, "module") +# # 重建排序后的复选框 +# for module in self.sorted_modules: +# self.add_checkbox(self.module_scroll, module, "module") - def add_checkbox(self, parent, text, type_): - """在指定父组件中添加复选框""" +# def add_checkbox(self, parent, text, type_): +# """在指定父组件中添加复选框""" - def update_filter(): - current = cb.get() - if type_ == "level": - (self.selected_levels.add if current else self.selected_levels.discard)(text) - else: - (self.selected_modules.add if current else self.selected_modules.discard)(text) - self.refresh_logs() +# def update_filter(): +# current = cb.get() +# if type_ == "level": +# (self.selected_levels.add if current else self.selected_levels.discard)(text) +# else: +# (self.selected_modules.add if current else self.selected_modules.discard)(text) +# self.refresh_logs() - cb = ctk.CTkCheckBox(parent, text=text, command=update_filter) - cb.select() # 初始选中 +# cb = ctk.CTkCheckBox(parent, text=text, command=update_filter) +# cb.select() # 初始选中 - # 手动同步初始状态到集合(关键修复) - if type_ == "level": - self.selected_levels.add(text) - else: - self.selected_modules.add(text) +# # 手动同步初始状态到集合(关键修复) +# if type_ == "level": +# self.selected_levels.add(text) +# else: +# self.selected_modules.add(text) - if type_ == "module": - self.module_checkboxes[text] = cb - cb.pack(anchor="w", padx=5, pady=2) - return cb +# if type_ == "module": +# self.module_checkboxes[text] = cb +# cb.pack(anchor="w", padx=5, pady=2) +# return cb - def check_filter(self, entry): - """检查日志条目是否符合当前筛选条件""" - level_ok = not self.selected_levels or entry["level"] in self.selected_levels - module_key = self.get_module_key(entry["module"]) - module_ok = not self.selected_modules or module_key in self.selected_modules - return level_ok and module_ok +# def check_filter(self, entry): +# """检查日志条目是否符合当前筛选条件""" +# level_ok = not self.selected_levels or entry["level"] in self.selected_levels +# module_key = self.get_module_key(entry["module"]) +# module_ok = not self.selected_modules or module_key in self.selected_modules +# return level_ok and module_ok - def init_text_tags(self): - """初始化日志文本的颜色标签""" - for tag, color in self.color_config.items(): - self.log_text.tag_config(tag, foreground=color) - self.log_text.tag_config("default", foreground=self.color_config["default"]) +# def init_text_tags(self): +# """初始化日志文本的颜色标签""" +# for tag, color in self.color_config.items(): +# self.log_text.tag_config(tag, foreground=color) +# self.log_text.tag_config("default", foreground=self.color_config["default"]) - def start_process(self): - """启动日志进程并开始读取输出""" - self.process = subprocess.Popen( - ["nb", "run"], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - text=True, - bufsize=1, - encoding="utf-8", - errors="ignore", - ) - self.start_btn.configure(state="disabled") - self.stop_btn.configure(state="normal") - threading.Thread(target=self.read_output, daemon=True).start() +# def start_process(self): +# """启动日志进程并开始读取输出""" +# self.process = subprocess.Popen( +# ["nb", "run"], +# stdout=subprocess.PIPE, +# stderr=subprocess.STDOUT, +# text=True, +# bufsize=1, +# encoding="utf-8", +# errors="ignore", +# ) +# self.start_btn.configure(state="disabled") +# self.stop_btn.configure(state="normal") +# threading.Thread(target=self.read_output, daemon=True).start() - def stop_process(self): - """停止日志进程并清理相关资源""" - if self.process: - try: - if hasattr(self.process, "pid"): - if os.name == "nt": - subprocess.run( - ["taskkill", "/F", "/T", "/PID", str(self.process.pid)], check=True, capture_output=True - ) - else: - os.killpg(os.getpgid(self.process.pid), signal.SIGTERM) - except (subprocess.CalledProcessError, ProcessLookupError, OSError) as e: - print(f"终止进程失败: {e}") - finally: - self.process = None - self.log_queue.queue.clear() - self.start_btn.configure(state="normal") - self.stop_btn.configure(state="disabled") - self.refresh_logs() +# def stop_process(self): +# """停止日志进程并清理相关资源""" +# if self.process: +# try: +# if hasattr(self.process, "pid"): +# if os.name == "nt": +# subprocess.run( +# ["taskkill", "/F", "/T", "/PID", str(self.process.pid)], check=True, capture_output=True +# ) +# else: +# os.killpg(os.getpgid(self.process.pid), signal.SIGTERM) +# except (subprocess.CalledProcessError, ProcessLookupError, OSError) as e: +# print(f"终止进程失败: {e}") +# finally: +# self.process = None +# self.log_queue.queue.clear() +# self.start_btn.configure(state="normal") +# self.stop_btn.configure(state="disabled") +# self.refresh_logs() - def read_output(self): - """读取日志进程的输出并放入队列""" - try: - while self.process and self.process.poll() is None: - line = self.process.stdout.readline() - if line: - self.log_queue.put(line) - else: - break # 避免空循环 - self.process.stdout.close() # 确保关闭文件描述符 - except ValueError: # 处理可能的I/O操作异常 - pass +# def read_output(self): +# """读取日志进程的输出并放入队列""" +# try: +# while self.process and self.process.poll() is None and self.is_running: +# line = self.process.stdout.readline() +# if line: +# self.log_queue.put(line) +# else: +# break # 避免空循环 +# self.process.stdout.close() # 确保关闭文件描述符 +# except ValueError: # 处理可能的I/O操作异常 +# pass - def process_log_queue(self): - """处理日志队列中的日志条目""" - while not self.log_queue.empty(): - line = self.log_queue.get() - self.process_log_line(line) - self.after(100, self.process_log_queue) +# def process_log_queue(self): +# """处理日志队列中的日志条目""" +# while not self.log_queue.empty(): +# line = self.log_queue.get() +# self.process_log_line(line) + +# # 仅在GUI仍在运行时继续处理队列 +# if self.is_running: +# self.after(100, self.process_log_queue) - def process_log_line(self, line): - """解析单行日志并更新日志数据和筛选器""" - match = re.match( - r"""^ - (?:(?P