From cc190ac2b97f6e6497adb0f284ac95e4c4e0687e Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 8 Apr 2025 00:23:08 +0800
Subject: [PATCH 01/13] =?UTF-8?q?better=EF=BC=9A=E5=B0=9D=E8=AF=95?=
=?UTF-8?q?=E9=87=8D=E6=9E=84pfc?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/PFC/chat_observer.py | 159 ++++++---
src/plugins/PFC/chat_states.py | 267 ++++++++++++++
src/plugins/PFC/message_storage.py | 134 ++++++++
src/plugins/PFC/pfc.py | 536 +++++++++++++++--------------
src/plugins/config/config.py | 7 +-
template/bot_config_template.toml | 2 +-
6 files changed, 797 insertions(+), 308 deletions(-)
create mode 100644 src/plugins/PFC/chat_states.py
create mode 100644 src/plugins/PFC/message_storage.py
diff --git a/src/plugins/PFC/chat_observer.py b/src/plugins/PFC/chat_observer.py
index 532afc9db..62ce6d7f9 100644
--- a/src/plugins/PFC/chat_observer.py
+++ b/src/plugins/PFC/chat_observer.py
@@ -5,6 +5,8 @@ from src.common.logger import get_module_logger
from src.common.database import db
from ..message.message_base import UserInfo
from ..config.config import global_config
+from .chat_states import NotificationManager, create_new_message_notification, create_cold_chat_notification
+from .message_storage import MessageStorage, MongoDBMessageStorage
logger = get_module_logger("chat_observer")
@@ -15,36 +17,40 @@ class ChatObserver:
_instances: Dict[str, 'ChatObserver'] = {}
@classmethod
- def get_instance(cls, stream_id: str) -> 'ChatObserver':
+ def get_instance(cls, stream_id: str, message_storage: Optional[MessageStorage] = None) -> 'ChatObserver':
"""获取或创建观察器实例
Args:
stream_id: 聊天流ID
+ message_storage: 消息存储实现,如果为None则使用MongoDB实现
Returns:
ChatObserver: 观察器实例
"""
if stream_id not in cls._instances:
- cls._instances[stream_id] = cls(stream_id)
+ cls._instances[stream_id] = cls(stream_id, message_storage)
return cls._instances[stream_id]
- def __init__(self, stream_id: str):
+ def __init__(self, stream_id: str, message_storage: Optional[MessageStorage] = None):
"""初始化观察器
Args:
stream_id: 聊天流ID
+ message_storage: 消息存储实现,如果为None则使用MongoDB实现
"""
if stream_id in self._instances:
raise RuntimeError(f"ChatObserver for {stream_id} already exists. Use get_instance() instead.")
self.stream_id = stream_id
+ self.message_storage = message_storage or MongoDBMessageStorage()
+
self.last_user_speak_time: Optional[float] = None # 对方上次发言时间
self.last_bot_speak_time: Optional[float] = None # 机器人上次发言时间
self.last_check_time: float = time.time() # 上次查看聊天记录时间
self.last_message_read: Optional[str] = None # 最后读取的消息ID
self.last_message_time: Optional[float] = None # 最后一条消息的时间戳
- self.waiting_start_time: Optional[float] = None # 等待开始时间
+ self.waiting_start_time: float = time.time() # 等待开始时间,初始化为当前时间
# 消息历史记录
self.message_history: List[Dict[str, Any]] = [] # 所有消息历史
@@ -56,8 +62,16 @@ class ChatObserver:
self._task: Optional[asyncio.Task] = None
self._update_event = asyncio.Event() # 触发更新的事件
self._update_complete = asyncio.Event() # 更新完成的事件
+
+ # 通知管理器
+ self.notification_manager = NotificationManager()
+
+ # 冷场检查配置
+ self.cold_chat_threshold: float = 60.0 # 60秒无消息判定为冷场
+ self.last_cold_chat_check: float = time.time()
+ self.is_cold_chat_state: bool = False
- def check(self) -> bool:
+ async def check(self) -> bool:
"""检查距离上一次观察之后是否有了新消息
Returns:
@@ -65,13 +79,10 @@ class ChatObserver:
"""
logger.debug(f"检查距离上一次观察之后是否有了新消息: {self.last_check_time}")
- query = {
- "chat_id": self.stream_id,
- "time": {"$gt": self.last_check_time}
- }
-
- # 只需要查询是否存在,不需要获取具体消息
- new_message_exists = db.messages.find_one(query) is not None
+ new_message_exists = await self.message_storage.has_new_messages(
+ self.stream_id,
+ self.last_check_time
+ )
if new_message_exists:
logger.debug("发现新消息")
@@ -79,27 +90,8 @@ class ChatObserver:
return new_message_exists
- def get_new_message(self) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
- """获取上一次观察的时间点后的新消息,插入到历史记录中,并返回新消息和历史记录两个对象"""
- messages = self.get_message_history(self.last_check_time)
- for message in messages:
- self._add_message_to_history(message)
- return messages, self.message_history
-
- def new_message_after(self, time_point: float) -> bool:
- """判断是否在指定时间点后有新消息
-
- Args:
- time_point: 时间戳
-
- Returns:
- bool: 是否有新消息
- """
- logger.debug(f"判断是否在指定时间点后有新消息: {self.last_message_time} > {time_point}")
- return self.last_message_time is None or self.last_message_time > time_point
-
- def _add_message_to_history(self, message: Dict[str, Any]):
- """添加消息到历史记录
+ async def _add_message_to_history(self, message: Dict[str, Any]):
+ """添加消息到历史记录并发送通知
Args:
message: 消息数据
@@ -116,6 +108,75 @@ class ChatObserver:
else:
self.last_user_speak_time = message["time"]
+ # 发送新消息通知
+ notification = create_new_message_notification(
+ sender="chat_observer",
+ target="pfc",
+ message=message
+ )
+ await self.notification_manager.send_notification(notification)
+
+ # 检查并更新冷场状态
+ await self._check_cold_chat()
+
+ async def _check_cold_chat(self):
+ """检查是否处于冷场状态并发送通知"""
+ current_time = time.time()
+
+ # 每10秒检查一次冷场状态
+ if current_time - self.last_cold_chat_check < 10:
+ return
+
+ self.last_cold_chat_check = current_time
+
+ # 判断是否冷场
+ is_cold = False
+ if self.last_message_time is None:
+ is_cold = True
+ else:
+ is_cold = (current_time - self.last_message_time) > self.cold_chat_threshold
+
+ # 如果冷场状态发生变化,发送通知
+ if is_cold != self.is_cold_chat_state:
+ self.is_cold_chat_state = is_cold
+ notification = create_cold_chat_notification(
+ sender="chat_observer",
+ target="pfc",
+ is_cold=is_cold
+ )
+ await self.notification_manager.send_notification(notification)
+
+ async def get_new_message(self) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
+ """获取上一次观察的时间点后的新消息,插入到历史记录中,并返回新消息和历史记录两个对象"""
+ messages = await self.message_storage.get_messages_after(
+ self.stream_id,
+ self.last_message_read
+ )
+ for message in messages:
+ await self._add_message_to_history(message)
+ return messages, self.message_history
+
+ def new_message_after(self, time_point: float) -> bool:
+ """判断是否在指定时间点后有新消息
+
+ Args:
+ time_point: 时间戳
+
+ Returns:
+ bool: 是否有新消息
+ """
+ if time_point is None:
+ logger.warning("time_point 为 None,返回 False")
+ return False
+
+ if self.last_message_time is None:
+ logger.debug("没有最后消息时间,返回 False")
+ return False
+
+ has_new = self.last_message_time > time_point
+ logger.debug(f"判断是否在指定时间点后有新消息: {self.last_message_time} > {time_point} = {has_new}")
+ return has_new
+
def get_message_history(
self,
start_time: Optional[float] = None,
@@ -159,15 +220,9 @@ class ChatObserver:
Returns:
List[Dict[str, Any]]: 新消息列表
"""
- query = {"chat_id": self.stream_id}
- if self.last_message_read:
- # 获取ID大于last_message_read的消息
- last_message = db.messages.find_one({"message_id": self.last_message_read})
- if last_message:
- query["time"] = {"$gt": last_message["time"]}
-
- new_messages = list(
- db.messages.find(query).sort("time", 1)
+ new_messages = await self.message_storage.get_messages_after(
+ self.stream_id,
+ self.last_message_read
)
if new_messages:
@@ -184,30 +239,24 @@ class ChatObserver:
Returns:
List[Dict[str, Any]]: 最多5条消息
"""
- query = {
- "chat_id": self.stream_id,
- "time": {"$lt": time_point}
- }
-
- new_messages = list(
- db.messages.find(query).sort("time", -1).limit(5) # 倒序获取5条
+ new_messages = await self.message_storage.get_messages_before(
+ self.stream_id,
+ time_point
)
- # 将消息按时间正序排列
- new_messages.reverse()
-
if new_messages:
self.last_message_read = new_messages[-1]["message_id"]
return new_messages
-
+
+ '''主要观察循环'''
async def _update_loop(self):
"""更新循环"""
try:
start_time = time.time()
messages = await self._fetch_new_messages_before(start_time)
for message in messages:
- self._add_message_to_history(message)
+ await self._add_message_to_history(message)
except Exception as e:
logger.error(f"缓冲消息出错: {e}")
@@ -228,7 +277,7 @@ class ChatObserver:
if new_messages:
# 处理新消息
for message in new_messages:
- self._add_message_to_history(message)
+ await self._add_message_to_history(message)
# 设置完成事件
self._update_complete.set()
diff --git a/src/plugins/PFC/chat_states.py b/src/plugins/PFC/chat_states.py
new file mode 100644
index 000000000..bb7cfc4a6
--- /dev/null
+++ b/src/plugins/PFC/chat_states.py
@@ -0,0 +1,267 @@
+from enum import Enum, auto
+from typing import Optional, Dict, Any, List, Set
+from dataclasses import dataclass
+from datetime import datetime
+from abc import ABC, abstractmethod
+
+class ChatState(Enum):
+ """聊天状态枚举"""
+ NORMAL = auto() # 正常状态
+ NEW_MESSAGE = auto() # 有新消息
+ COLD_CHAT = auto() # 冷场状态
+ ACTIVE_CHAT = auto() # 活跃状态
+ BOT_SPEAKING = auto() # 机器人正在说话
+ USER_SPEAKING = auto() # 用户正在说话
+ SILENT = auto() # 沉默状态
+ ERROR = auto() # 错误状态
+
+class NotificationType(Enum):
+ """通知类型枚举"""
+ NEW_MESSAGE = auto() # 新消息通知
+ COLD_CHAT = auto() # 冷场通知
+ ACTIVE_CHAT = auto() # 活跃通知
+ BOT_SPEAKING = auto() # 机器人说话通知
+ USER_SPEAKING = auto() # 用户说话通知
+ MESSAGE_DELETED = auto() # 消息删除通知
+ USER_JOINED = auto() # 用户加入通知
+ USER_LEFT = auto() # 用户离开通知
+ ERROR = auto() # 错误通知
+
+@dataclass
+class ChatStateInfo:
+ """聊天状态信息"""
+ state: ChatState
+ last_message_time: Optional[float] = None
+ last_message_content: Optional[str] = None
+ last_speaker: Optional[str] = None
+ message_count: int = 0
+ cold_duration: float = 0.0 # 冷场持续时间(秒)
+ active_duration: float = 0.0 # 活跃持续时间(秒)
+
+@dataclass
+class Notification:
+ """通知基类"""
+ type: NotificationType
+ timestamp: float
+ sender: str # 发送者标识
+ target: str # 接收者标识
+ data: Dict[str, Any]
+
+ def to_dict(self) -> Dict[str, Any]:
+ """转换为字典格式"""
+ return {
+ "type": self.type.name,
+ "timestamp": self.timestamp,
+ "data": self.data
+ }
+
+@dataclass
+class StateNotification(Notification):
+ """持续状态通知"""
+ is_active: bool = True
+
+ def to_dict(self) -> Dict[str, Any]:
+ base_dict = super().to_dict()
+ base_dict["is_active"] = self.is_active
+ return base_dict
+
+class NotificationHandler(ABC):
+ """通知处理器接口"""
+
+ @abstractmethod
+ async def handle_notification(self, notification: Notification):
+ """处理通知"""
+ pass
+
+class NotificationManager:
+ """通知管理器"""
+
+ def __init__(self):
+ # 按接收者和通知类型存储处理器
+ self._handlers: Dict[str, Dict[NotificationType, List[NotificationHandler]]] = {}
+ self._active_states: Set[NotificationType] = set()
+ self._notification_history: List[Notification] = []
+
+ def register_handler(self, target: str, notification_type: NotificationType, handler: NotificationHandler):
+ """注册通知处理器
+
+ Args:
+ target: 接收者标识(例如:"pfc")
+ notification_type: 要处理的通知类型
+ handler: 处理器实例
+ """
+ if target not in self._handlers:
+ self._handlers[target] = {}
+ if notification_type not in self._handlers[target]:
+ self._handlers[target][notification_type] = []
+ self._handlers[target][notification_type].append(handler)
+
+ def unregister_handler(self, target: str, notification_type: NotificationType, handler: NotificationHandler):
+ """注销通知处理器
+
+ Args:
+ target: 接收者标识
+ notification_type: 通知类型
+ handler: 要注销的处理器实例
+ """
+ if target in self._handlers and notification_type in self._handlers[target]:
+ handlers = self._handlers[target][notification_type]
+ if handler in handlers:
+ handlers.remove(handler)
+ # 如果该类型的处理器列表为空,删除该类型
+ if not handlers:
+ del self._handlers[target][notification_type]
+ # 如果该目标没有任何处理器,删除该目标
+ if not self._handlers[target]:
+ del self._handlers[target]
+
+ async def send_notification(self, notification: Notification):
+ """发送通知"""
+ self._notification_history.append(notification)
+
+ # 如果是状态通知,更新活跃状态
+ if isinstance(notification, StateNotification):
+ if notification.is_active:
+ self._active_states.add(notification.type)
+ else:
+ self._active_states.discard(notification.type)
+
+ # 调用目标接收者的处理器
+ target = notification.target
+ if target in self._handlers:
+ handlers = self._handlers[target].get(notification.type, [])
+ for handler in handlers:
+ await handler.handle_notification(notification)
+
+ def get_active_states(self) -> Set[NotificationType]:
+ """获取当前活跃的状态"""
+ return self._active_states.copy()
+
+ def is_state_active(self, state_type: NotificationType) -> bool:
+ """检查特定状态是否活跃"""
+ return state_type in self._active_states
+
+ def get_notification_history(self,
+ sender: Optional[str] = None,
+ target: Optional[str] = None,
+ limit: Optional[int] = None) -> List[Notification]:
+ """获取通知历史
+
+ Args:
+ sender: 过滤特定发送者的通知
+ target: 过滤特定接收者的通知
+ limit: 限制返回数量
+ """
+ history = self._notification_history
+
+ if sender:
+ history = [n for n in history if n.sender == sender]
+ if target:
+ history = [n for n in history if n.target == target]
+
+ if limit is not None:
+ history = history[-limit:]
+
+ return history
+
+# 一些常用的通知创建函数
+def create_new_message_notification(sender: str, target: str, message: Dict[str, Any]) -> Notification:
+ """创建新消息通知"""
+ return Notification(
+ type=NotificationType.NEW_MESSAGE,
+ timestamp=datetime.now().timestamp(),
+ sender=sender,
+ target=target,
+ data={
+ "message_id": message.get("message_id"),
+ "content": message.get("content"),
+ "sender": message.get("sender"),
+ "time": message.get("time")
+ }
+ )
+
+def create_cold_chat_notification(sender: str, target: str, is_cold: bool) -> StateNotification:
+ """创建冷场状态通知"""
+ return StateNotification(
+ type=NotificationType.COLD_CHAT,
+ timestamp=datetime.now().timestamp(),
+ sender=sender,
+ target=target,
+ data={"is_cold": is_cold},
+ is_active=is_cold
+ )
+
+def create_active_chat_notification(sender: str, target: str, is_active: bool) -> StateNotification:
+ """创建活跃状态通知"""
+ return StateNotification(
+ type=NotificationType.ACTIVE_CHAT,
+ timestamp=datetime.now().timestamp(),
+ sender=sender,
+ target=target,
+ data={"is_active": is_active},
+ is_active=is_active
+ )
+
+class ChatStateManager:
+ """聊天状态管理器"""
+
+ def __init__(self):
+ self.current_state = ChatState.NORMAL
+ self.state_info = ChatStateInfo(state=ChatState.NORMAL)
+ self.state_history: list[ChatStateInfo] = []
+
+ def update_state(self, new_state: ChatState, **kwargs):
+ """更新聊天状态
+
+ Args:
+ new_state: 新的状态
+ **kwargs: 其他状态信息
+ """
+ self.current_state = new_state
+ self.state_info.state = new_state
+
+ # 更新其他状态信息
+ for key, value in kwargs.items():
+ if hasattr(self.state_info, key):
+ setattr(self.state_info, key, value)
+
+ # 记录状态历史
+ self.state_history.append(self.state_info)
+
+ def get_current_state_info(self) -> ChatStateInfo:
+ """获取当前状态信息"""
+ return self.state_info
+
+ def get_state_history(self) -> list[ChatStateInfo]:
+ """获取状态历史"""
+ return self.state_history
+
+ def is_cold_chat(self, threshold: float = 60.0) -> bool:
+ """判断是否处于冷场状态
+
+ Args:
+ threshold: 冷场阈值(秒)
+
+ Returns:
+ bool: 是否冷场
+ """
+ if not self.state_info.last_message_time:
+ return True
+
+ current_time = datetime.now().timestamp()
+ return (current_time - self.state_info.last_message_time) > threshold
+
+ def is_active_chat(self, threshold: float = 5.0) -> bool:
+ """判断是否处于活跃状态
+
+ Args:
+ threshold: 活跃阈值(秒)
+
+ Returns:
+ bool: 是否活跃
+ """
+ if not self.state_info.last_message_time:
+ return False
+
+ current_time = datetime.now().timestamp()
+ return (current_time - self.state_info.last_message_time) <= threshold
\ No newline at end of file
diff --git a/src/plugins/PFC/message_storage.py b/src/plugins/PFC/message_storage.py
new file mode 100644
index 000000000..3c7cab8b3
--- /dev/null
+++ b/src/plugins/PFC/message_storage.py
@@ -0,0 +1,134 @@
+from abc import ABC, abstractmethod
+from typing import List, Dict, Any, Optional
+from src.common.database import db
+
+class MessageStorage(ABC):
+ """消息存储接口"""
+
+ @abstractmethod
+ async def get_messages_after(self, chat_id: str, message_id: Optional[str] = None) -> List[Dict[str, Any]]:
+ """获取指定消息ID之后的所有消息
+
+ Args:
+ chat_id: 聊天ID
+ message_id: 消息ID,如果为None则获取所有消息
+
+ Returns:
+ List[Dict[str, Any]]: 消息列表
+ """
+ pass
+
+ @abstractmethod
+ async def get_messages_before(self, chat_id: str, time_point: float, limit: int = 5) -> List[Dict[str, Any]]:
+ """获取指定时间点之前的消息
+
+ Args:
+ chat_id: 聊天ID
+ time_point: 时间戳
+ limit: 最大消息数量
+
+ Returns:
+ List[Dict[str, Any]]: 消息列表
+ """
+ pass
+
+ @abstractmethod
+ async def has_new_messages(self, chat_id: str, after_time: float) -> bool:
+ """检查是否有新消息
+
+ Args:
+ chat_id: 聊天ID
+ after_time: 时间戳
+
+ Returns:
+ bool: 是否有新消息
+ """
+ pass
+
+class MongoDBMessageStorage(MessageStorage):
+ """MongoDB消息存储实现"""
+
+ def __init__(self):
+ self.db = db
+
+ async def get_messages_after(self, chat_id: str, message_id: Optional[str] = None) -> List[Dict[str, Any]]:
+ query = {"chat_id": chat_id}
+
+ if message_id:
+ # 获取ID大于message_id的消息
+ last_message = self.db.messages.find_one({"message_id": message_id})
+ if last_message:
+ query["time"] = {"$gt": last_message["time"]}
+
+ return list(
+ self.db.messages.find(query).sort("time", 1)
+ )
+
+ async def get_messages_before(self, chat_id: str, time_point: float, limit: int = 5) -> List[Dict[str, Any]]:
+ query = {
+ "chat_id": chat_id,
+ "time": {"$lt": time_point}
+ }
+
+ messages = list(
+ self.db.messages.find(query).sort("time", -1).limit(limit)
+ )
+
+ # 将消息按时间正序排列
+ messages.reverse()
+ return messages
+
+ async def has_new_messages(self, chat_id: str, after_time: float) -> bool:
+ query = {
+ "chat_id": chat_id,
+ "time": {"$gt": after_time}
+ }
+
+ return self.db.messages.find_one(query) is not None
+
+# # 创建一个内存消息存储实现,用于测试
+# class InMemoryMessageStorage(MessageStorage):
+# """内存消息存储实现,主要用于测试"""
+
+# def __init__(self):
+# self.messages: Dict[str, List[Dict[str, Any]]] = {}
+
+# async def get_messages_after(self, chat_id: str, message_id: Optional[str] = None) -> List[Dict[str, Any]]:
+# if chat_id not in self.messages:
+# return []
+
+# messages = self.messages[chat_id]
+# if not message_id:
+# return messages
+
+# # 找到message_id的索引
+# try:
+# index = next(i for i, m in enumerate(messages) if m["message_id"] == message_id)
+# return messages[index + 1:]
+# except StopIteration:
+# return []
+
+# async def get_messages_before(self, chat_id: str, time_point: float, limit: int = 5) -> List[Dict[str, Any]]:
+# if chat_id not in self.messages:
+# return []
+
+# messages = [
+# m for m in self.messages[chat_id]
+# if m["time"] < time_point
+# ]
+
+# return messages[-limit:]
+
+# async def has_new_messages(self, chat_id: str, after_time: float) -> bool:
+# if chat_id not in self.messages:
+# return False
+
+# return any(m["time"] > after_time for m in self.messages[chat_id])
+
+# # 测试辅助方法
+# def add_message(self, chat_id: str, message: Dict[str, Any]):
+# """添加测试消息"""
+# if chat_id not in self.messages:
+# self.messages[chat_id] = []
+# self.messages[chat_id].append(message)
+# self.messages[chat_id].sort(key=lambda m: m["time"])
\ No newline at end of file
diff --git a/src/plugins/PFC/pfc.py b/src/plugins/PFC/pfc.py
index 446b280c3..46a7fde4e 100644
--- a/src/plugins/PFC/pfc.py
+++ b/src/plugins/PFC/pfc.py
@@ -2,7 +2,7 @@
#Prefrontal cortex
import datetime
import asyncio
-from typing import List, Optional, Dict, Any, Tuple, Literal
+from typing import List, Optional, Dict, Any, Tuple, Literal, Set
from enum import Enum
from src.common.logger import get_module_logger
from ..chat.chat_stream import ChatStream
@@ -19,7 +19,9 @@ from .pfc_KnowledgeFetcher import KnowledgeFetcher
from .reply_checker import ReplyChecker
from .pfc_utils import get_items_from_json
from src.individuality.individuality import Individuality
+from .chat_states import NotificationHandler, Notification, NotificationType
import time
+from dataclasses import dataclass, field
logger = get_module_logger("pfc")
@@ -42,6 +44,99 @@ class ConversationState(Enum):
ActionType = Literal["direct_reply", "fetch_knowledge", "wait"]
+@dataclass
+class DecisionInfo:
+ """决策信息类,用于收集和管理来自chat_observer的通知信息"""
+
+ # 消息相关
+ last_message_time: Optional[float] = None
+ last_message_content: Optional[str] = None
+ last_message_sender: Optional[str] = None
+ new_messages_count: int = 0
+ unprocessed_messages: List[Dict[str, Any]] = field(default_factory=list)
+
+ # 对话状态
+ is_cold_chat: bool = False
+ cold_chat_duration: float = 0.0
+ last_bot_speak_time: Optional[float] = None
+ last_user_speak_time: Optional[float] = None
+
+ # 对话参与者
+ active_users: Set[str] = field(default_factory=set)
+ bot_id: str = field(default="")
+
+ def update_from_message(self, message: Dict[str, Any]):
+ """从消息更新信息
+
+ Args:
+ message: 消息数据
+ """
+ self.last_message_time = message["time"]
+ self.last_message_content = message.get("processed_plain_text", "")
+
+ user_info = UserInfo.from_dict(message.get("user_info", {}))
+ self.last_message_sender = user_info.user_id
+
+ if user_info.user_id == self.bot_id:
+ self.last_bot_speak_time = message["time"]
+ else:
+ self.last_user_speak_time = message["time"]
+ self.active_users.add(user_info.user_id)
+
+ self.new_messages_count += 1
+ self.unprocessed_messages.append(message)
+
+ def update_cold_chat_status(self, is_cold: bool, current_time: float):
+ """更新冷场状态
+
+ Args:
+ is_cold: 是否冷场
+ current_time: 当前时间
+ """
+ self.is_cold_chat = is_cold
+ if is_cold and self.last_message_time:
+ self.cold_chat_duration = current_time - self.last_message_time
+
+ def get_active_duration(self) -> float:
+ """获取当前活跃时长
+
+ Returns:
+ float: 最后一条消息到现在的时长(秒)
+ """
+ if not self.last_message_time:
+ return 0.0
+ return time.time() - self.last_message_time
+
+ def get_user_response_time(self) -> Optional[float]:
+ """获取用户响应时间
+
+ Returns:
+ Optional[float]: 用户最后发言到现在的时长(秒),如果没有用户发言则返回None
+ """
+ if not self.last_user_speak_time:
+ return None
+ return time.time() - self.last_user_speak_time
+
+ def get_bot_response_time(self) -> Optional[float]:
+ """获取机器人响应时间
+
+ Returns:
+ Optional[float]: 机器人最后发言到现在的时长(秒),如果没有机器人发言则返回None
+ """
+ if not self.last_bot_speak_time:
+ return None
+ return time.time() - self.last_bot_speak_time
+
+ def clear_unprocessed_messages(self):
+ """清空未处理消息列表"""
+ self.unprocessed_messages.clear()
+ self.new_messages_count = 0
+
+
+# Forward reference for type hints
+DecisionInfoType = DecisionInfo
+
+
class ActionPlanner:
"""行动规划器"""
@@ -62,22 +157,24 @@ class ActionPlanner:
method: str,
reasoning: str,
action_history: List[Dict[str, str]] = None,
- chat_observer: Optional[ChatObserver] = None, # 添加chat_observer参数
+ decision_info: DecisionInfoType = None # Use DecisionInfoType here
) -> Tuple[str, str]:
"""规划下一步行动
Args:
goal: 对话目标
+ method: 实现方法
reasoning: 目标原因
action_history: 行动历史记录
+ decision_info: 决策信息
Returns:
Tuple[str, str]: (行动类型, 行动原因)
"""
# 构建提示词
- # 获取最近20条消息
- self.chat_observer.waiting_start_time = time.time()
+ logger.debug(f"开始规划行动:当前目标: {goal}")
+ # 获取最近20条消息
messages = self.chat_observer.get_message_history(limit=20)
chat_history_text = ""
for msg in messages:
@@ -92,22 +189,42 @@ class ActionPlanner:
# 构建action历史文本
action_history_text = ""
- if action_history:
- if action_history[-1]['action'] == "direct_reply":
- action_history_text = "你刚刚发言回复了对方"
+ if action_history and action_history[-1]['action'] == "direct_reply":
+ action_history_text = "你刚刚发言回复了对方"
+
+ # 构建决策信息文本
+ decision_info_text = ""
+ if decision_info:
+ decision_info_text = "当前对话状态:\n"
+ if decision_info.is_cold_chat:
+ decision_info_text += f"对话处于冷场状态,已持续{int(decision_info.cold_chat_duration)}秒\n"
+
+ if decision_info.new_messages_count > 0:
+ decision_info_text += f"有{decision_info.new_messages_count}条新消息未处理\n"
+
+ user_response_time = decision_info.get_user_response_time()
+ if user_response_time:
+ decision_info_text += f"距离用户上次发言已过去{int(user_response_time)}秒\n"
+
+ bot_response_time = decision_info.get_bot_response_time()
+ if bot_response_time:
+ decision_info_text += f"距离你上次发言已过去{int(bot_response_time)}秒\n"
+
+ if decision_info.active_users:
+ decision_info_text += f"当前活跃用户数: {len(decision_info.active_users)}\n"
- # 获取时间信息
- time_info = self.chat_observer.get_time_info()
+ prompt = f"""{personality_text}。现在你在参与一场QQ聊天,请分析以下内容,根据信息决定下一步行动:
- prompt = f"""现在你在参与一场QQ聊天,请分析以下内容,根据信息决定下一步行动:
-{personality_text}
当前对话目标:{goal}
实现该对话目标的方式:{method}
产生该对话目标的原因:{reasoning}
-{time_info}
+
+{decision_info_text}
+{action_history_text}
+
最近的对话记录:
{chat_history_text}
-{action_history_text}
+
请你接下去想想要你要做什么,可以发言,可以等待,可以倾听,可以调取知识。注意不同行动类型的要求,不要重复发言:
行动类型:
fetch_knowledge: 需要调取知识,当需要专业知识或特定信息时选择
@@ -413,16 +530,23 @@ class Waiter:
Returns:
bool: 是否超时(True表示超时)
"""
- wait_start_time = self.chat_observer.waiting_start_time
- while not self.chat_observer.new_message_after(wait_start_time):
- await asyncio.sleep(1)
- logger.info("等待中...")
- # 检查是否超过60秒
+ # 使用当前时间作为等待开始时间
+ wait_start_time = time.time()
+ self.chat_observer.waiting_start_time = wait_start_time # 设置等待开始时间
+
+ while True:
+ # 检查是否有新消息
+ if self.chat_observer.new_message_after(wait_start_time):
+ logger.info("等待结束,收到新消息")
+ return False
+
+ # 检查是否超时
if time.time() - wait_start_time > 300:
logger.info("等待超过300秒,结束对话")
return True
- logger.info("等待结束")
- return False
+
+ await asyncio.sleep(1)
+ logger.info("等待中...")
class ReplyGenerator:
@@ -519,16 +643,16 @@ class ReplyGenerator:
try:
content, _ = await self.llm.generate_response_async(prompt)
logger.info(f"生成的回复: {content}")
- is_new = self.chat_observer.check()
- logger.debug(f"再看一眼聊天记录,{'有' if is_new else '没有'}新消息")
+ # is_new = self.chat_observer.check()
+ # logger.debug(f"再看一眼聊天记录,{'有' if is_new else '没有'}新消息")
# 如果有新消息,重新生成回复
- if is_new:
- logger.info("检测到新消息,重新生成回复")
- return await self.generate(
- goal, chat_history, knowledge_cache,
- None, retry_count
- )
+ # if is_new:
+ # logger.info("检测到新消息,重新生成回复")
+ # return await self.generate(
+ # goal, chat_history, knowledge_cache,
+ # None, retry_count
+ # )
return content
@@ -555,12 +679,69 @@ class ReplyGenerator:
return await self.reply_checker.check(reply, goal, retry_count)
+class PFCNotificationHandler(NotificationHandler):
+ """PFC的通知处理器"""
+
+ def __init__(self, conversation: 'Conversation'):
+ self.conversation = conversation
+ self.logger = get_module_logger("pfc_notification")
+ self.decision_info = conversation.decision_info
+
+ async def handle_notification(self, notification: Notification):
+ """处理通知"""
+ try:
+ if not notification or not hasattr(notification, 'data') or notification.data is None:
+ self.logger.error("收到无效的通知:notification 或 data 为空")
+ return
+
+ if notification.type == NotificationType.NEW_MESSAGE:
+ # 处理新消息通知
+ message = notification.data
+ if not isinstance(message, dict):
+ self.logger.error(f"无效的消息格式: {type(message)}")
+ return
+
+ content = message.get('content', '')
+ self.logger.info(f"收到新消息通知: {content[:30] if content else ''}...")
+
+ # 更新决策信息
+ try:
+ self.decision_info.update_from_message(message)
+ except Exception as e:
+ self.logger.error(f"更新决策信息失败: {e}")
+ return
+
+ # 触发对话系统更新
+ self.conversation.chat_observer.trigger_update()
+
+ elif notification.type == NotificationType.COLD_CHAT:
+ # 处理冷场通知
+ try:
+ is_cold = bool(notification.data.get("is_cold", False))
+ # 更新决策信息
+ self.decision_info.update_cold_chat_status(is_cold, time.time())
+
+ if is_cold:
+ self.logger.info("检测到对话冷场")
+ else:
+ self.logger.info("对话恢复活跃")
+ except Exception as e:
+ self.logger.error(f"处理冷场状态失败: {e}")
+ return
+
+ except Exception as e:
+ self.logger.error(f"处理通知时出错: {str(e)}")
+ # 添加更详细的错误信息
+ self.logger.error(f"通知类型: {getattr(notification, 'type', None)}")
+ self.logger.error(f"通知数据: {getattr(notification, 'data', None)}")
+
+
class Conversation:
# 类级别的实例管理
_instances: Dict[str, 'Conversation'] = {}
- _instance_lock = asyncio.Lock() # 类级别的全局锁
- _init_events: Dict[str, asyncio.Event] = {} # 初始化完成事件
- _initializing: Dict[str, bool] = {} # 标记是否正在初始化
+ _instance_lock = asyncio.Lock()
+ _init_events: Dict[str, asyncio.Event] = {}
+ _initializing: Dict[str, bool] = {}
@classmethod
async def get_instance(cls, stream_id: str) -> Optional['Conversation']:
@@ -573,102 +754,89 @@ class Conversation:
Optional[Conversation]: 对话实例,如果创建或等待失败则返回None
"""
try:
- # 使用全局锁来确保线程安全
- async with cls._instance_lock:
- # 如果已经在初始化中,等待初始化完成
- if stream_id in cls._initializing and cls._initializing[stream_id]:
- # 释放锁等待初始化
- cls._instance_lock.release()
- try:
- await asyncio.wait_for(cls._init_events[stream_id].wait(), timeout=5.0)
- except asyncio.TimeoutError:
- logger.error(f"等待实例 {stream_id} 初始化超时")
- return None
- finally:
- await cls._instance_lock.acquire()
-
- # 如果实例不存在,创建新实例
- if stream_id not in cls._instances:
- cls._instances[stream_id] = cls(stream_id)
- cls._init_events[stream_id] = asyncio.Event()
- cls._initializing[stream_id] = True
- logger.info(f"创建新的对话实例: {stream_id}")
-
+ # 检查是否已经有实例
+ if stream_id in cls._instances:
return cls._instances[stream_id]
+
+ async with cls._instance_lock:
+ # 再次检查,防止在获取锁的过程中其他线程创建了实例
+ if stream_id in cls._instances:
+ return cls._instances[stream_id]
+
+ # 如果正在初始化,等待初始化完成
+ if stream_id in cls._initializing and cls._initializing[stream_id]:
+ event = cls._init_events.get(stream_id)
+ if event:
+ try:
+ # 在等待之前释放锁
+ cls._instance_lock.release()
+ await asyncio.wait_for(event.wait(), timeout=10.0) # 增加超时时间到10秒
+ # 重新获取锁
+ await cls._instance_lock.acquire()
+ if stream_id in cls._instances:
+ return cls._instances[stream_id]
+ except asyncio.TimeoutError:
+ logger.error(f"等待实例 {stream_id} 初始化超时")
+ # 清理超时的初始化状态
+ cls._initializing[stream_id] = False
+ if stream_id in cls._init_events:
+ del cls._init_events[stream_id]
+ return None
+
+ # 创建新实例
+ logger.info(f"创建新的对话实例: {stream_id}")
+ cls._initializing[stream_id] = True
+ cls._init_events[stream_id] = asyncio.Event()
+
+ # 在锁保护下创建实例
+ instance = cls(stream_id)
+ cls._instances[stream_id] = instance
+
+ # 启动实例初始化(在后台运行)
+ asyncio.create_task(instance._initialize())
+
+ return instance
+
except Exception as e:
logger.error(f"获取对话实例失败: {e}")
return None
+
+ async def _initialize(self):
+ """初始化实例(在后台运行)"""
+ try:
+ logger.info(f"开始初始化对话实例: {self.stream_id}")
+ self.chat_observer.start() # 启动观察器
+ await asyncio.sleep(1) # 给观察器一些启动时间
+
+ # 获取初始目标
+ self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
+
+ # 标记初始化完成
+ self.__class__._initializing[self.stream_id] = False
+ if self.stream_id in self.__class__._init_events:
+ self.__class__._init_events[self.stream_id].set()
+
+ # 启动对话循环
+ asyncio.create_task(self._conversation_loop())
+
+ except Exception as e:
+ logger.error(f"初始化对话实例失败: {e}")
+ # 清理失败的初始化
+ self.__class__._initializing[self.stream_id] = False
+ if self.stream_id in self.__class__._init_events:
+ self.__class__._init_events[self.stream_id].set()
+ if self.stream_id in self.__class__._instances:
+ del self.__class__._instances[self.stream_id]
- @classmethod
- async def remove_instance(cls, stream_id: str):
- """删除对话实例
-
- Args:
- stream_id: 聊天流ID
- """
- async with cls._instance_lock:
- if stream_id in cls._instances:
- # 停止相关组件
- instance = cls._instances[stream_id]
- instance.chat_observer.stop()
- # 删除实例
- del cls._instances[stream_id]
- if stream_id in cls._init_events:
- del cls._init_events[stream_id]
- if stream_id in cls._initializing:
- del cls._initializing[stream_id]
- logger.info(f"已删除对话实例 {stream_id}")
-
- def __init__(self, stream_id: str):
- """初始化对话系统"""
- self.stream_id = stream_id
- self.state = ConversationState.INIT
- self.current_goal: Optional[str] = None
- self.current_method: Optional[str] = None
- self.goal_reasoning: Optional[str] = None
- self.generated_reply: Optional[str] = None
- self.should_continue = True
-
- # 初始化聊天观察器
- self.chat_observer = ChatObserver.get_instance(stream_id)
-
- # 添加action历史记录
- self.action_history: List[Dict[str, str]] = []
-
- # 知识缓存
- self.knowledge_cache: Dict[str, str] = {} # 确保初始化为字典
-
- # 初始化各个组件
- self.goal_analyzer = GoalAnalyzer(self.stream_id)
- self.action_planner = ActionPlanner(self.stream_id)
- self.reply_generator = ReplyGenerator(self.stream_id)
- self.knowledge_fetcher = KnowledgeFetcher()
- self.direct_sender = DirectMessageSender()
- self.waiter = Waiter(self.stream_id)
-
- # 创建聊天流
- self.chat_stream = chat_manager.get_stream(self.stream_id)
-
- def _clear_knowledge_cache(self):
- """清空知识缓存"""
- self.knowledge_cache.clear() # 使用clear方法清空字典
-
async def start(self):
"""开始对话流程"""
try:
logger.info("对话系统启动")
self.should_continue = True
- self.chat_observer.start() # 启动观察器
- await asyncio.sleep(1)
- # 启动对话循环
await self._conversation_loop()
except Exception as e:
logger.error(f"启动对话系统失败: {e}")
raise
- finally:
- # 标记初始化完成
- self._init_events[self.stream_id].set()
- self._initializing[self.stream_id] = False
async def _conversation_loop(self):
"""对话循环"""
@@ -681,17 +849,21 @@ class Conversation:
if not await self.chat_observer.wait_for_update():
logger.warning("等待消息更新超时")
+ # 使用决策信息来辅助行动规划
action, reason = await self.action_planner.plan(
self.current_goal,
self.current_method,
self.goal_reasoning,
- self.action_history, # 传入action历史
- self.chat_observer # 传入chat_observer
+ self.action_history,
+ self.decision_info # 传入决策信息
)
# 执行行动
await self._handle_action(action, reason)
+ # 清理已处理的消息
+ self.decision_info.clear_unprocessed_messages()
+
def _convert_to_message(self, msg_dict: Dict[str, Any]) -> Message:
"""将消息字典转换为Message对象"""
try:
@@ -742,87 +914,6 @@ class Conversation:
self.current_goal
)
- if not is_suitable:
- logger.warning(f"生成的回复不合适,原因: {reason}")
- if need_replan:
- # 尝试切换到其他备选目标
- alternative_goals = await self.goal_analyzer.get_alternative_goals()
- if alternative_goals:
- # 有备选目标,尝试使用下一个目标
- self.current_goal, self.current_method, self.goal_reasoning = alternative_goals[0]
- logger.info(f"切换到备选目标: {self.current_goal}")
- # 使用新目标生成回复
- self.generated_reply = await self.reply_generator.generate(
- self.current_goal,
- self.current_method,
- [self._convert_to_message(msg) for msg in messages],
- self.knowledge_cache
- )
- # 检查使用新目标生成的回复是否合适
- is_suitable, reason, _ = await self.reply_generator.check_reply(
- self.generated_reply,
- self.current_goal
- )
- if is_suitable:
- # 如果新目标的回复合适,调整目标优先级
- await self.goal_analyzer._update_goals(
- self.current_goal,
- self.current_method,
- self.goal_reasoning
- )
- else:
- # 如果新目标还是不合适,重新思考目标
- self.state = ConversationState.RETHINKING
- self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
- return
- else:
- # 没有备选目标,重新分析
- self.state = ConversationState.RETHINKING
- self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
- return
- else:
- # 重新生成回复
- self.generated_reply = await self.reply_generator.generate(
- self.current_goal,
- self.current_method,
- [self._convert_to_message(msg) for msg in messages],
- self.knowledge_cache,
- self.generated_reply # 将不合适的回复作为previous_reply传入
- )
-
- while self.chat_observer.check():
- if not is_suitable:
- logger.warning(f"生成的回复不合适,原因: {reason}")
- if need_replan:
- # 尝试切换到其他备选目标
- alternative_goals = await self.goal_analyzer.get_alternative_goals()
- if alternative_goals:
- # 有备选目标,尝试使用下一个目标
- self.current_goal, self.current_method, self.goal_reasoning = alternative_goals[0]
- logger.info(f"切换到备选目标: {self.current_goal}")
- # 使用新目标生成回复
- self.generated_reply = await self.reply_generator.generate(
- self.current_goal,
- self.current_method,
- [self._convert_to_message(msg) for msg in messages],
- self.knowledge_cache
- )
- is_suitable = True # 假设使用新目标后回复是合适的
- else:
- # 没有备选目标,重新分析
- self.state = ConversationState.RETHINKING
- self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
- return
- else:
- # 重新生成回复
- self.generated_reply = await self.reply_generator.generate(
- self.current_goal,
- self.current_method,
- [self._convert_to_message(msg) for msg in messages],
- self.knowledge_cache,
- self.generated_reply # 将不合适的回复作为previous_reply传入
- )
-
await self._send_reply()
elif action == "fetch_knowledge":
@@ -836,59 +927,6 @@ class Conversation:
if knowledge != "未找到相关知识":
self.knowledge_cache[sources] = knowledge
-
- self.generated_reply = await self.reply_generator.generate(
- self.current_goal,
- self.current_method,
- [self._convert_to_message(msg) for msg in messages],
- self.knowledge_cache
- )
-
- # 检查回复是否合适
- is_suitable, reason, need_replan = await self.reply_generator.check_reply(
- self.generated_reply,
- self.current_goal
- )
-
- if not is_suitable:
- logger.warning(f"生成的回复不合适,原因: {reason}")
- if need_replan:
- # 尝试切换到其他备选目标
- alternative_goals = await self.goal_analyzer.get_alternative_goals()
- if alternative_goals:
- # 有备选目标,尝试使用
- self.current_goal, self.current_method, self.goal_reasoning = alternative_goals[0]
- logger.info(f"切换到备选目标: {self.current_goal}")
- # 使用新目标获取知识并生成回复
- knowledge, sources = await self.knowledge_fetcher.fetch(
- self.current_goal,
- [self._convert_to_message(msg) for msg in messages]
- )
- if knowledge != "未找到相关知识":
- self.knowledge_cache[sources] = knowledge
-
- self.generated_reply = await self.reply_generator.generate(
- self.current_goal,
- self.current_method,
- [self._convert_to_message(msg) for msg in messages],
- self.knowledge_cache
- )
- else:
- # 没有备选目标,重新分析
- self.state = ConversationState.RETHINKING
- self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
- return
- else:
- # 重新生成回复
- self.generated_reply = await self.reply_generator.generate(
- self.current_goal,
- self.current_method,
- [self._convert_to_message(msg) for msg in messages],
- self.knowledge_cache,
- self.generated_reply # 将不合适的回复作为previous_reply传入
- )
-
- await self._send_reply()
elif action == "rethink_goal":
self.state = ConversationState.RETHINKING
diff --git a/src/plugins/config/config.py b/src/plugins/config/config.py
index 46f549374..c16d1360b 100644
--- a/src/plugins/config/config.py
+++ b/src/plugins/config/config.py
@@ -25,9 +25,9 @@ config_config = LogConfig(
logger = get_module_logger("config", config=config_config)
#考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
-is_test = False
-mai_version_main = "0.6.1"
-mai_version_fix = ""
+is_test = True
+mai_version_main = "0.6.2"
+mai_version_fix = "snapshot-1"
if mai_version_fix:
if is_test:
mai_version = f"test-{mai_version_main}-{mai_version_fix}"
@@ -441,6 +441,7 @@ class BotConfig:
config.emoji_response_penalty = willing_config.get(
"emoji_response_penalty", config.emoji_response_penalty
)
+ if config.INNER_VERSION in SpecifierSet(">=1.2.5"):
config.mentioned_bot_inevitable_reply = willing_config.get(
"mentioned_bot_inevitable_reply", config.mentioned_bot_inevitable_reply
)
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index d7a5cdaea..70cf0e0b7 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -1,5 +1,5 @@
[inner]
-version = "1.2.4"
+version = "1.2.5"
#以下是给开发人员阅读的,一般用户不需要阅读
From 3e3ee2621e6ebfb09439e0287aa2bf91c6064c3b Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 8 Apr 2025 00:31:44 +0800
Subject: [PATCH 02/13] Update pfc.py
---
src/plugins/PFC/pfc.py | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/src/plugins/PFC/pfc.py b/src/plugins/PFC/pfc.py
index 46a7fde4e..7d0339356 100644
--- a/src/plugins/PFC/pfc.py
+++ b/src/plugins/PFC/pfc.py
@@ -805,20 +805,33 @@ class Conversation:
"""初始化实例(在后台运行)"""
try:
logger.info(f"开始初始化对话实例: {self.stream_id}")
+
+ start_time = time.time()
+ logger.info("启动观察器...")
self.chat_observer.start() # 启动观察器
+ logger.info(f"观察器启动完成,耗时: {time.time() - start_time:.2f}秒")
+
await asyncio.sleep(1) # 给观察器一些启动时间
# 获取初始目标
+ logger.info("开始分析初始对话目标...")
+ goal_start_time = time.time()
self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
+ logger.info(f"目标分析完成,耗时: {time.time() - goal_start_time:.2f}秒")
# 标记初始化完成
+ logger.info("标记初始化完成...")
self.__class__._initializing[self.stream_id] = False
if self.stream_id in self.__class__._init_events:
self.__class__._init_events[self.stream_id].set()
# 启动对话循环
+ logger.info("启动对话循环...")
asyncio.create_task(self._conversation_loop())
+ total_time = time.time() - start_time
+ logger.info(f"实例初始化完成,总耗时: {total_time:.2f}秒")
+
except Exception as e:
logger.error(f"初始化对话实例失败: {e}")
# 清理失败的初始化
From e20030ba1a601db552e4623f097b79c42ed9541f Mon Sep 17 00:00:00 2001
From: UnCLAS-Prommer
Date: Tue, 8 Apr 2025 17:22:32 +0800
Subject: [PATCH 03/13] =?UTF-8?q?=E5=B0=9D=E8=AF=95prettify=20statics?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/utils/statistic.py | 25 +++++++++++++++++--------
1 file changed, 17 insertions(+), 8 deletions(-)
diff --git a/src/plugins/utils/statistic.py b/src/plugins/utils/statistic.py
index eef10c01d..4b9afff39 100644
--- a/src/plugins/utils/statistic.py
+++ b/src/plugins/utils/statistic.py
@@ -2,7 +2,7 @@ import threading
import time
from collections import defaultdict
from datetime import datetime, timedelta
-from typing import Any, Dict
+from typing import Any, Dict, List
from src.common.logger import get_module_logger
from ...common.database import db
@@ -22,6 +22,7 @@ class LLMStatistics:
self.stats_thread = None
self.console_thread = None
self._init_database()
+ self.name_dict: Dict[List] = {}
def _init_database(self):
"""初始化数据库集合"""
@@ -137,16 +138,24 @@ class LLMStatistics:
# user_id = str(doc.get("user_info", {}).get("user_id", "unknown"))
chat_info = doc.get("chat_info", {})
user_info = doc.get("user_info", {})
+ message_time = doc.get("time", 0)
group_info = chat_info.get("group_info") if chat_info else {}
# print(f"group_info: {group_info}")
group_name = None
if group_info:
+ group_id = f"g{group_info.get('group_id')}"
group_name = group_info.get("group_name", f"群{group_info.get('group_id')}")
if user_info and not group_name:
+ group_id = f"u{user_info['user_id']}"
group_name = user_info["user_nickname"]
+ if self.name_dict.get(group_id):
+ if message_time > self.name_dict.get(group_id)[1]:
+ self.name_dict[group_id] = [group_name, message_time]
+ else:
+ self.name_dict[group_id] = [group_name, message_time]
# print(f"group_name: {group_name}")
stats["messages_by_user"][user_id] += 1
- stats["messages_by_chat"][group_name] += 1
+ stats["messages_by_chat"][group_id] += 1
return stats
@@ -187,7 +196,7 @@ class LLMStatistics:
tokens = stats["tokens_by_model"][model_name]
cost = stats["costs_by_model"][model_name]
output.append(
- data_fmt.format(model_name[:32] + ".." if len(model_name) > 32 else model_name, count, tokens, cost)
+ data_fmt.format(model_name[:30] + ".." if len(model_name) > 32 else model_name, count, tokens, cost)
)
output.append("")
@@ -221,8 +230,8 @@ class LLMStatistics:
# 添加聊天统计
output.append("群组统计:")
output.append(("群组名称 消息数量"))
- for group_name, count in sorted(stats["messages_by_chat"].items()):
- output.append(f"{group_name[:32]:<32} {count:>10}")
+ for group_id, count in sorted(stats["messages_by_chat"].items()):
+ output.append(f"{self.name_dict[group_id][0][:32]:<32} {count:>10}")
return "\n".join(output)
@@ -250,7 +259,7 @@ class LLMStatistics:
tokens = stats["tokens_by_model"][model_name]
cost = stats["costs_by_model"][model_name]
output.append(
- data_fmt.format(model_name[:32] + ".." if len(model_name) > 32 else model_name, count, tokens, cost)
+ data_fmt.format(model_name[:30] + ".." if len(model_name) > 32 else model_name, count, tokens, cost)
)
output.append("")
@@ -284,8 +293,8 @@ class LLMStatistics:
# 添加聊天统计
output.append("群组统计:")
output.append(("群组名称 消息数量"))
- for group_name, count in sorted(stats["messages_by_chat"].items()):
- output.append(f"{group_name[:32]:<32} {count:>10}")
+ for group_id, count in sorted(stats["messages_by_chat"].items()):
+ output.append(f"{self.name_dict[group_id][0][:32]:<32} {count:>10}")
return "\n".join(output)
From e3b2d5b88cbc68ea7429a65513734772998ba1c3 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 8 Apr 2025 17:38:42 +0800
Subject: [PATCH 04/13] =?UTF-8?q?fix=EF=BC=9A=E6=A8=A1=E5=9D=97=E5=8C=96PF?=
=?UTF-8?q?C?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
bot.py | 4 +
src/common/crash_logger.py | 72 ++++
src/plugins/PFC/action_planner.py | 157 +++++++
src/plugins/PFC/chat_observer.py | 1 -
src/plugins/PFC/conversation.py | 264 ++++++++++++
src/plugins/PFC/decision_info.py | 116 +++++
src/plugins/PFC/pfc.py | 654 +----------------------------
src/plugins/PFC/pfc_manager.py | 97 +++++
src/plugins/PFC/reply_generator.py | 153 +++++++
src/plugins/chat/bot.py | 35 +-
10 files changed, 878 insertions(+), 675 deletions(-)
create mode 100644 src/common/crash_logger.py
create mode 100644 src/plugins/PFC/action_planner.py
create mode 100644 src/plugins/PFC/conversation.py
create mode 100644 src/plugins/PFC/decision_info.py
create mode 100644 src/plugins/PFC/pfc_manager.py
create mode 100644 src/plugins/PFC/reply_generator.py
diff --git a/bot.py b/bot.py
index a0bf3a3cb..ca214967e 100644
--- a/bot.py
+++ b/bot.py
@@ -8,6 +8,7 @@ import time
import platform
from dotenv import load_dotenv
from src.common.logger import get_module_logger
+from src.common.crash_logger import install_crash_handler
from src.main import MainSystem
logger = get_module_logger("main_bot")
@@ -193,6 +194,9 @@ def raw_main():
if platform.system().lower() != "windows":
time.tzset()
+ # 安装崩溃日志处理器
+ install_crash_handler()
+
check_eula()
print("检查EULA和隐私条款完成")
easter_egg()
diff --git a/src/common/crash_logger.py b/src/common/crash_logger.py
new file mode 100644
index 000000000..658e1bb02
--- /dev/null
+++ b/src/common/crash_logger.py
@@ -0,0 +1,72 @@
+import sys
+import traceback
+import logging
+from pathlib import Path
+from logging.handlers import RotatingFileHandler
+
+def setup_crash_logger():
+ """设置崩溃日志记录器"""
+ # 创建logs/crash目录(如果不存在)
+ crash_log_dir = Path("logs/crash")
+ crash_log_dir.mkdir(parents=True, exist_ok=True)
+
+ # 创建日志记录器
+ crash_logger = logging.getLogger('crash_logger')
+ crash_logger.setLevel(logging.ERROR)
+
+ # 设置日志格式
+ formatter = logging.Formatter(
+ '%(asctime)s - %(name)s - %(levelname)s\n'
+ '异常类型: %(exc_info)s\n'
+ '详细信息:\n%(message)s\n'
+ '-------------------\n'
+ )
+
+ # 创建按大小轮转的文件处理器(最大10MB,保留5个备份)
+ log_file = crash_log_dir / "crash.log"
+ file_handler = RotatingFileHandler(
+ log_file,
+ maxBytes=10*1024*1024, # 10MB
+ backupCount=5,
+ encoding='utf-8'
+ )
+ file_handler.setFormatter(formatter)
+ crash_logger.addHandler(file_handler)
+
+ return crash_logger
+
+def log_crash(exc_type, exc_value, exc_traceback):
+ """记录崩溃信息到日志文件"""
+ if exc_type is None:
+ return
+
+ # 获取崩溃日志记录器
+ crash_logger = logging.getLogger('crash_logger')
+
+ # 获取完整的异常堆栈信息
+ stack_trace = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
+
+ # 记录崩溃信息
+ crash_logger.error(
+ stack_trace,
+ exc_info=(exc_type, exc_value, exc_traceback)
+ )
+
+def install_crash_handler():
+ """安装全局异常处理器"""
+ # 设置崩溃日志记录器
+ setup_crash_logger()
+
+ # 保存原始的异常处理器
+ original_hook = sys.excepthook
+
+ def exception_handler(exc_type, exc_value, exc_traceback):
+ """全局异常处理器"""
+ # 记录崩溃信息
+ log_crash(exc_type, exc_value, exc_traceback)
+
+ # 调用原始的异常处理器
+ original_hook(exc_type, exc_value, exc_traceback)
+
+ # 设置全局异常处理器
+ sys.excepthook = exception_handler
\ No newline at end of file
diff --git a/src/plugins/PFC/action_planner.py b/src/plugins/PFC/action_planner.py
new file mode 100644
index 000000000..c24cc0903
--- /dev/null
+++ b/src/plugins/PFC/action_planner.py
@@ -0,0 +1,157 @@
+import datetime
+import asyncio
+from typing import List, Optional, Dict, Any, Tuple, Literal, Set
+from enum import Enum
+from src.common.logger import get_module_logger
+from ..chat.chat_stream import ChatStream
+from ..message.message_base import UserInfo, Seg
+from ..chat.message import Message
+from ..models.utils_model import LLM_request
+from ..config.config import global_config
+from src.plugins.chat.message import MessageSending
+from ..message.api import global_api
+from ..storage.storage import MessageStorage
+from .chat_observer import ChatObserver
+from .reply_checker import ReplyChecker
+from .pfc_utils import get_items_from_json
+from src.individuality.individuality import Individuality
+from .chat_states import NotificationHandler, Notification, NotificationType
+import time
+from dataclasses import dataclass, field
+from .pfc import DecisionInfo, DecisionInfoType
+
+logger = get_module_logger("action_planner")
+
+class ActionPlanner:
+ """行动规划器"""
+
+ def __init__(self, stream_id: str):
+ self.llm = LLM_request(
+ model=global_config.llm_normal,
+ temperature=0.7,
+ max_tokens=1000,
+ request_type="action_planning"
+ )
+ self.personality_info = Individuality.get_instance().get_prompt(type = "personality", x_person = 2, level = 2)
+ self.name = global_config.BOT_NICKNAME
+ self.chat_observer = ChatObserver.get_instance(stream_id)
+
+ async def plan(
+ self,
+ goal: str,
+ method: str,
+ reasoning: str,
+ action_history: List[Dict[str, str]] = None,
+ decision_info: DecisionInfoType = None # Use DecisionInfoType here
+ ) -> Tuple[str, str]:
+ """规划下一步行动
+
+ Args:
+ goal: 对话目标
+ method: 实现方法
+ reasoning: 目标原因
+ action_history: 行动历史记录
+ decision_info: 决策信息
+
+ Returns:
+ Tuple[str, str]: (行动类型, 行动原因)
+ """
+ # 构建提示词
+ logger.debug(f"开始规划行动:当前目标: {goal}")
+
+ # 获取最近20条消息
+ messages = self.chat_observer.get_message_history(limit=20)
+ chat_history_text = ""
+ for msg in messages:
+ time_str = datetime.datetime.fromtimestamp(msg["time"]).strftime("%H:%M:%S")
+ user_info = UserInfo.from_dict(msg.get("user_info", {}))
+ sender = user_info.user_nickname or f"用户{user_info.user_id}"
+ if sender == self.name:
+ sender = "你说"
+ chat_history_text += f"{time_str},{sender}:{msg.get('processed_plain_text', '')}\n"
+
+ personality_text = f"你的名字是{self.name},{self.personality_info}"
+
+ # 构建action历史文本
+ action_history_text = ""
+ if action_history and action_history[-1]['action'] == "direct_reply":
+ action_history_text = "你刚刚发言回复了对方"
+
+ # 构建决策信息文本
+ decision_info_text = ""
+ if decision_info:
+ decision_info_text = "当前对话状态:\n"
+ if decision_info.is_cold_chat:
+ decision_info_text += f"对话处于冷场状态,已持续{int(decision_info.cold_chat_duration)}秒\n"
+
+ if decision_info.new_messages_count > 0:
+ decision_info_text += f"有{decision_info.new_messages_count}条新消息未处理\n"
+
+ user_response_time = decision_info.get_user_response_time()
+ if user_response_time:
+ decision_info_text += f"距离用户上次发言已过去{int(user_response_time)}秒\n"
+
+ bot_response_time = decision_info.get_bot_response_time()
+ if bot_response_time:
+ decision_info_text += f"距离你上次发言已过去{int(bot_response_time)}秒\n"
+
+ if decision_info.active_users:
+ decision_info_text += f"当前活跃用户数: {len(decision_info.active_users)}\n"
+
+ prompt = f"""{personality_text}。现在你在参与一场QQ聊天,请分析以下内容,根据信息决定下一步行动:
+
+当前对话目标:{goal}
+实现该对话目标的方式:{method}
+产生该对话目标的原因:{reasoning}
+
+{decision_info_text}
+{action_history_text}
+
+最近的对话记录:
+{chat_history_text}
+
+请你接下去想想要你要做什么,可以发言,可以等待,可以倾听,可以调取知识。注意不同行动类型的要求,不要重复发言:
+行动类型:
+fetch_knowledge: 需要调取知识,当需要专业知识或特定信息时选择
+wait: 当你做出了发言,对方尚未回复时等待对方的回复
+listening: 倾听对方发言,当你认为对方发言尚未结束时采用
+direct_reply: 不符合上述情况,回复对方,注意不要过多或者重复发言
+rethink_goal: 重新思考对话目标,当发现对话目标不合适时选择,会重新思考对话目标
+judge_conversation: 判断对话是否结束,当发现对话目标已经达到或者希望停止对话时选择,会判断对话是否结束
+
+请以JSON格式输出,包含以下字段:
+1. action: 行动类型,注意你之前的行为
+2. reason: 选择该行动的原因,注意你之前的行为(简要解释)
+
+注意:请严格按照JSON格式输出,不要包含任何其他内容。"""
+
+ logger.debug(f"发送到LLM的提示词: {prompt}")
+ try:
+ content, _ = await self.llm.generate_response_async(prompt)
+ logger.debug(f"LLM原始返回内容: {content}")
+
+ # 使用简化函数提取JSON内容
+ success, result = get_items_from_json(
+ content,
+ "action", "reason",
+ default_values={"action": "direct_reply", "reason": "默认原因"}
+ )
+
+ if not success:
+ return "direct_reply", "JSON解析失败,选择直接回复"
+
+ action = result["action"]
+ reason = result["reason"]
+
+ # 验证action类型
+ if action not in ["direct_reply", "fetch_knowledge", "wait", "listening", "rethink_goal", "judge_conversation"]:
+ logger.warning(f"未知的行动类型: {action},默认使用listening")
+ action = "listening"
+
+ logger.info(f"规划的行动: {action}")
+ logger.info(f"行动原因: {reason}")
+ return action, reason
+
+ except Exception as e:
+ logger.error(f"规划行动时出错: {str(e)}")
+ return "direct_reply", "发生错误,选择直接回复"
\ No newline at end of file
diff --git a/src/plugins/PFC/chat_observer.py b/src/plugins/PFC/chat_observer.py
index 62ce6d7f9..2fda95d2c 100644
--- a/src/plugins/PFC/chat_observer.py
+++ b/src/plugins/PFC/chat_observer.py
@@ -2,7 +2,6 @@ import time
import asyncio
from typing import Optional, Dict, Any, List, Tuple
from src.common.logger import get_module_logger
-from src.common.database import db
from ..message.message_base import UserInfo
from ..config.config import global_config
from .chat_states import NotificationManager, create_new_message_notification, create_cold_chat_notification
diff --git a/src/plugins/PFC/conversation.py b/src/plugins/PFC/conversation.py
new file mode 100644
index 000000000..5321b9c45
--- /dev/null
+++ b/src/plugins/PFC/conversation.py
@@ -0,0 +1,264 @@
+import asyncio
+import datetime
+from typing import Dict, Any
+from ..chat.message import Message
+from .pfc import ConversationState, ChatObserver,GoalAnalyzer, Waiter, DirectMessageSender, PFCNotificationHandler
+from src.common.logger import get_module_logger
+from .action_planner import ActionPlanner
+from .decision_info import DecisionInfo
+from .reply_generator import ReplyGenerator
+from ..chat.chat_stream import ChatStream
+from ..message.message_base import UserInfo
+from ..config.config import global_config
+from src.plugins.chat.chat_stream import chat_manager
+from .pfc_KnowledgeFetcher import KnowledgeFetcher
+import time
+import traceback
+
+logger = get_module_logger("pfc_conversation")
+
+
+class Conversation:
+ """对话类,负责管理单个对话的状态和行为"""
+
+ def __init__(self, stream_id: str):
+ """初始化对话实例
+
+ Args:
+ stream_id: 聊天流ID
+ """
+ self.stream_id = stream_id
+ self.state = ConversationState.INIT
+ self.should_continue = False
+
+ # 目标和规划
+ self.current_goal = "保持友好的对话"
+ self.current_method = "以友好的态度回应"
+ self.goal_reasoning = "确保对话顺利进行"
+
+ # 知识缓存和行动历史
+ self.knowledge_cache = {}
+ self.action_history = []
+
+ # 回复相关
+ self.generated_reply = ""
+
+ async def _initialize(self):
+ """初始化实例,注册所有组件"""
+ try:
+
+ self.chat_observer = ChatObserver.get_instance(self.stream_id)
+ self.action_planner = ActionPlanner(self.stream_id)
+ self.goal_analyzer = GoalAnalyzer(self.stream_id)
+ self.reply_generator = ReplyGenerator(self.stream_id)
+ self.knowledge_fetcher = KnowledgeFetcher()
+ self.waiter = Waiter(self.stream_id)
+ self.direct_sender = DirectMessageSender()
+
+ # 获取聊天流信息
+ self.chat_stream = chat_manager.get_stream(self.stream_id)
+
+ # 决策信息
+ self.decision_info = DecisionInfo()
+ self.decision_info.bot_id = global_config.BOT_QQ
+
+ # 创建通知处理器
+ self.notification_handler = PFCNotificationHandler(self)
+
+ except Exception as e:
+ logger.error(f"初始化对话实例:注册组件失败: {e}")
+ logger.error(traceback.format_exc())
+ raise
+
+ try:
+ start_time = time.time()
+ self.chat_observer.start() # 启动观察器
+ logger.info(f"观察器启动完成,耗时: {time.time() - start_time:.2f}秒")
+
+ await asyncio.sleep(1) # 给观察器一些启动时间
+
+ total_time = time.time() - start_time
+ logger.info(f"实例初始化完成,总耗时: {total_time:.2f}秒")
+
+ self.should_continue = True
+ asyncio.create_task(self.start())
+
+ except Exception as e:
+ logger.error(f"初始化对话实例失败: {e}")
+ logger.error(traceback.format_exc())
+ raise
+
+ async def start(self):
+ """开始对话流程"""
+ try:
+ logger.info("对话系统启动")
+ while self.should_continue:
+ await self._do_a_step()
+ except Exception as e:
+ logger.error(f"启动对话系统失败: {e}")
+ raise
+
+ async def _do_a_step(self):
+ """思考步"""
+ # 获取最近的消息历史
+ self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
+
+ self.chat_observer.trigger_update() # 触发立即更新
+ if not await self.chat_observer.wait_for_update():
+ logger.warning("等待消息更新超时")
+
+ # 使用决策信息来辅助行动规划
+ action, reason = await self.action_planner.plan(
+ self.current_goal,
+ self.current_method,
+ self.goal_reasoning,
+ self.action_history,
+ self.decision_info # 传入决策信息
+ )
+
+ # 执行行动
+ await self._handle_action(action, reason)
+
+ # # 清理已处理的消息
+ # self.decision_info.clear_unprocessed_messages()
+
+ def _convert_to_message(self, msg_dict: Dict[str, Any]) -> Message:
+ """将消息字典转换为Message对象"""
+ try:
+ chat_info = msg_dict.get("chat_info", {})
+ chat_stream = ChatStream.from_dict(chat_info)
+ user_info = UserInfo.from_dict(msg_dict.get("user_info", {}))
+
+ return Message(
+ message_id=msg_dict["message_id"],
+ chat_stream=chat_stream,
+ time=msg_dict["time"],
+ user_info=user_info,
+ processed_plain_text=msg_dict.get("processed_plain_text", ""),
+ detailed_plain_text=msg_dict.get("detailed_plain_text", "")
+ )
+ except Exception as e:
+ logger.warning(f"转换消息时出错: {e}")
+ raise
+
+ async def _handle_action(self, action: str, reason: str):
+ """处理规划的行动"""
+ logger.info(f"执行行动: {action}, 原因: {reason}")
+
+ # 记录action历史
+ self.action_history.append({
+ "action": action,
+ "reason": reason,
+ "time": datetime.datetime.now().strftime("%H:%M:%S")
+ })
+
+ # 只保留最近的10条记录
+ if len(self.action_history) > 10:
+ self.action_history = self.action_history[-10:]
+
+ if action == "direct_reply":
+ self.state = ConversationState.GENERATING
+ messages = self.chat_observer.get_message_history(limit=30)
+ self.generated_reply = await self.reply_generator.generate(
+ self.current_goal,
+ self.current_method,
+ [self._convert_to_message(msg) for msg in messages],
+ self.knowledge_cache
+ )
+
+ # 检查回复是否合适
+ is_suitable, reason, need_replan = await self.reply_generator.check_reply(
+ self.generated_reply,
+ self.current_goal
+ )
+
+ await self._send_reply()
+
+ elif action == "fetch_knowledge":
+ self.state = ConversationState.GENERATING
+ messages = self.chat_observer.get_message_history(limit=30)
+ knowledge, sources = await self.knowledge_fetcher.fetch(
+ self.current_goal,
+ [self._convert_to_message(msg) for msg in messages]
+ )
+ logger.info(f"获取到知识,来源: {sources}")
+
+ if knowledge != "未找到相关知识":
+ self.knowledge_cache[sources] = knowledge
+
+ elif action == "rethink_goal":
+ self.state = ConversationState.RETHINKING
+ self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
+
+ elif action == "judge_conversation":
+ self.state = ConversationState.JUDGING
+ self.goal_achieved, self.stop_conversation, self.reason = await self.goal_analyzer.analyze_conversation(self.current_goal, self.goal_reasoning)
+
+ # 如果当前目标达成但还有其他目标
+ if self.goal_achieved and not self.stop_conversation:
+ alternative_goals = await self.goal_analyzer.get_alternative_goals()
+ if alternative_goals:
+ # 切换到下一个目标
+ self.current_goal, self.current_method, self.goal_reasoning = alternative_goals[0]
+ logger.info(f"当前目标已达成,切换到新目标: {self.current_goal}")
+ return
+
+ if self.stop_conversation:
+ await self._stop_conversation()
+
+ elif action == "listening":
+ self.state = ConversationState.LISTENING
+ logger.info("倾听对方发言...")
+ if await self.waiter.wait(): # 如果返回True表示超时
+ await self._send_timeout_message()
+ await self._stop_conversation()
+
+ else: # wait
+ self.state = ConversationState.WAITING
+ logger.info("等待更多信息...")
+ if await self.waiter.wait(): # 如果返回True表示超时
+ await self._send_timeout_message()
+ await self._stop_conversation()
+
+ async def _send_timeout_message(self):
+ """发送超时结束消息"""
+ try:
+ messages = self.chat_observer.get_message_history(limit=1)
+ if not messages:
+ return
+
+ latest_message = self._convert_to_message(messages[0])
+ await self.direct_sender.send_message(
+ chat_stream=self.chat_stream,
+ content="抱歉,由于等待时间过长,我需要先去忙别的了。下次再聊吧~",
+ reply_to_message=latest_message
+ )
+ except Exception as e:
+ logger.error(f"发送超时消息失败: {str(e)}")
+
+ async def _send_reply(self):
+ """发送回复"""
+ if not self.generated_reply:
+ logger.warning("没有生成回复")
+ return
+
+ messages = self.chat_observer.get_message_history(limit=1)
+ if not messages:
+ logger.warning("没有最近的消息可以回复")
+ return
+
+ latest_message = self._convert_to_message(messages[0])
+ try:
+ await self.direct_sender.send_message(
+ chat_stream=self.chat_stream,
+ content=self.generated_reply,
+ reply_to_message=latest_message
+ )
+ self.chat_observer.trigger_update() # 触发立即更新
+ if not await self.chat_observer.wait_for_update():
+ logger.warning("等待消息更新超时")
+
+ self.state = ConversationState.ANALYZING
+ except Exception as e:
+ logger.error(f"发送消息失败: {str(e)}")
+ self.state = ConversationState.ANALYZING
\ No newline at end of file
diff --git a/src/plugins/PFC/decision_info.py b/src/plugins/PFC/decision_info.py
new file mode 100644
index 000000000..29beb3484
--- /dev/null
+++ b/src/plugins/PFC/decision_info.py
@@ -0,0 +1,116 @@
+#Programmable Friendly Conversationalist
+#Prefrontal cortex
+import datetime
+import asyncio
+from typing import List, Optional, Dict, Any, Tuple, Literal, Set
+from enum import Enum
+from src.common.logger import get_module_logger
+from ..chat.chat_stream import ChatStream
+from ..message.message_base import UserInfo, Seg
+from ..chat.message import Message
+from ..models.utils_model import LLM_request
+from ..config.config import global_config
+from src.plugins.chat.message import MessageSending
+from ..message.api import global_api
+from ..storage.storage import MessageStorage
+from .chat_observer import ChatObserver
+from .reply_generator import ReplyGenerator
+from .pfc_utils import get_items_from_json
+from src.individuality.individuality import Individuality
+from .chat_states import NotificationHandler, Notification, NotificationType
+import time
+from dataclasses import dataclass, field
+from .conversation import Conversation
+
+
+@dataclass
+class DecisionInfo:
+ """决策信息类,用于收集和管理来自chat_observer的通知信息"""
+
+ # 消息相关
+ last_message_time: Optional[float] = None
+ last_message_content: Optional[str] = None
+ last_message_sender: Optional[str] = None
+ new_messages_count: int = 0
+ unprocessed_messages: List[Dict[str, Any]] = field(default_factory=list)
+
+ # 对话状态
+ is_cold_chat: bool = False
+ cold_chat_duration: float = 0.0
+ last_bot_speak_time: Optional[float] = None
+ last_user_speak_time: Optional[float] = None
+
+ # 对话参与者
+ active_users: Set[str] = field(default_factory=set)
+ bot_id: str = field(default="")
+
+ def update_from_message(self, message: Dict[str, Any]):
+ """从消息更新信息
+
+ Args:
+ message: 消息数据
+ """
+ self.last_message_time = message["time"]
+ self.last_message_content = message.get("processed_plain_text", "")
+
+ user_info = UserInfo.from_dict(message.get("user_info", {}))
+ self.last_message_sender = user_info.user_id
+
+ if user_info.user_id == self.bot_id:
+ self.last_bot_speak_time = message["time"]
+ else:
+ self.last_user_speak_time = message["time"]
+ self.active_users.add(user_info.user_id)
+
+ self.new_messages_count += 1
+ self.unprocessed_messages.append(message)
+
+ def update_cold_chat_status(self, is_cold: bool, current_time: float):
+ """更新冷场状态
+
+ Args:
+ is_cold: 是否冷场
+ current_time: 当前时间
+ """
+ self.is_cold_chat = is_cold
+ if is_cold and self.last_message_time:
+ self.cold_chat_duration = current_time - self.last_message_time
+
+ def get_active_duration(self) -> float:
+ """获取当前活跃时长
+
+ Returns:
+ float: 最后一条消息到现在的时长(秒)
+ """
+ if not self.last_message_time:
+ return 0.0
+ return time.time() - self.last_message_time
+
+ def get_user_response_time(self) -> Optional[float]:
+ """获取用户响应时间
+
+ Returns:
+ Optional[float]: 用户最后发言到现在的时长(秒),如果没有用户发言则返回None
+ """
+ if not self.last_user_speak_time:
+ return None
+ return time.time() - self.last_user_speak_time
+
+ def get_bot_response_time(self) -> Optional[float]:
+ """获取机器人响应时间
+
+ Returns:
+ Optional[float]: 机器人最后发言到现在的时长(秒),如果没有机器人发言则返回None
+ """
+ if not self.last_bot_speak_time:
+ return None
+ return time.time() - self.last_bot_speak_time
+
+ def clear_unprocessed_messages(self):
+ """清空未处理消息列表"""
+ self.unprocessed_messages.clear()
+ self.new_messages_count = 0
+
+
+# Forward reference for type hints
+DecisionInfoType = DecisionInfo
\ No newline at end of file
diff --git a/src/plugins/PFC/pfc.py b/src/plugins/PFC/pfc.py
index 7d0339356..072066660 100644
--- a/src/plugins/PFC/pfc.py
+++ b/src/plugins/PFC/pfc.py
@@ -11,17 +11,16 @@ from ..chat.message import Message
from ..models.utils_model import LLM_request
from ..config.config import global_config
from src.plugins.chat.message import MessageSending
-from src.plugins.chat.chat_stream import chat_manager
from ..message.api import global_api
from ..storage.storage import MessageStorage
from .chat_observer import ChatObserver
-from .pfc_KnowledgeFetcher import KnowledgeFetcher
-from .reply_checker import ReplyChecker
+from .reply_generator import ReplyGenerator
from .pfc_utils import get_items_from_json
from src.individuality.individuality import Individuality
from .chat_states import NotificationHandler, Notification, NotificationType
import time
from dataclasses import dataclass, field
+from .conversation import Conversation
logger = get_module_logger("pfc")
@@ -43,235 +42,6 @@ class ConversationState(Enum):
ActionType = Literal["direct_reply", "fetch_knowledge", "wait"]
-
-@dataclass
-class DecisionInfo:
- """决策信息类,用于收集和管理来自chat_observer的通知信息"""
-
- # 消息相关
- last_message_time: Optional[float] = None
- last_message_content: Optional[str] = None
- last_message_sender: Optional[str] = None
- new_messages_count: int = 0
- unprocessed_messages: List[Dict[str, Any]] = field(default_factory=list)
-
- # 对话状态
- is_cold_chat: bool = False
- cold_chat_duration: float = 0.0
- last_bot_speak_time: Optional[float] = None
- last_user_speak_time: Optional[float] = None
-
- # 对话参与者
- active_users: Set[str] = field(default_factory=set)
- bot_id: str = field(default="")
-
- def update_from_message(self, message: Dict[str, Any]):
- """从消息更新信息
-
- Args:
- message: 消息数据
- """
- self.last_message_time = message["time"]
- self.last_message_content = message.get("processed_plain_text", "")
-
- user_info = UserInfo.from_dict(message.get("user_info", {}))
- self.last_message_sender = user_info.user_id
-
- if user_info.user_id == self.bot_id:
- self.last_bot_speak_time = message["time"]
- else:
- self.last_user_speak_time = message["time"]
- self.active_users.add(user_info.user_id)
-
- self.new_messages_count += 1
- self.unprocessed_messages.append(message)
-
- def update_cold_chat_status(self, is_cold: bool, current_time: float):
- """更新冷场状态
-
- Args:
- is_cold: 是否冷场
- current_time: 当前时间
- """
- self.is_cold_chat = is_cold
- if is_cold and self.last_message_time:
- self.cold_chat_duration = current_time - self.last_message_time
-
- def get_active_duration(self) -> float:
- """获取当前活跃时长
-
- Returns:
- float: 最后一条消息到现在的时长(秒)
- """
- if not self.last_message_time:
- return 0.0
- return time.time() - self.last_message_time
-
- def get_user_response_time(self) -> Optional[float]:
- """获取用户响应时间
-
- Returns:
- Optional[float]: 用户最后发言到现在的时长(秒),如果没有用户发言则返回None
- """
- if not self.last_user_speak_time:
- return None
- return time.time() - self.last_user_speak_time
-
- def get_bot_response_time(self) -> Optional[float]:
- """获取机器人响应时间
-
- Returns:
- Optional[float]: 机器人最后发言到现在的时长(秒),如果没有机器人发言则返回None
- """
- if not self.last_bot_speak_time:
- return None
- return time.time() - self.last_bot_speak_time
-
- def clear_unprocessed_messages(self):
- """清空未处理消息列表"""
- self.unprocessed_messages.clear()
- self.new_messages_count = 0
-
-
-# Forward reference for type hints
-DecisionInfoType = DecisionInfo
-
-
-class ActionPlanner:
- """行动规划器"""
-
- def __init__(self, stream_id: str):
- self.llm = LLM_request(
- model=global_config.llm_normal,
- temperature=0.7,
- max_tokens=1000,
- request_type="action_planning"
- )
- self.personality_info = Individuality.get_instance().get_prompt(type = "personality", x_person = 2, level = 2)
- self.name = global_config.BOT_NICKNAME
- self.chat_observer = ChatObserver.get_instance(stream_id)
-
- async def plan(
- self,
- goal: str,
- method: str,
- reasoning: str,
- action_history: List[Dict[str, str]] = None,
- decision_info: DecisionInfoType = None # Use DecisionInfoType here
- ) -> Tuple[str, str]:
- """规划下一步行动
-
- Args:
- goal: 对话目标
- method: 实现方法
- reasoning: 目标原因
- action_history: 行动历史记录
- decision_info: 决策信息
-
- Returns:
- Tuple[str, str]: (行动类型, 行动原因)
- """
- # 构建提示词
- logger.debug(f"开始规划行动:当前目标: {goal}")
-
- # 获取最近20条消息
- messages = self.chat_observer.get_message_history(limit=20)
- chat_history_text = ""
- for msg in messages:
- time_str = datetime.datetime.fromtimestamp(msg["time"]).strftime("%H:%M:%S")
- user_info = UserInfo.from_dict(msg.get("user_info", {}))
- sender = user_info.user_nickname or f"用户{user_info.user_id}"
- if sender == self.name:
- sender = "你说"
- chat_history_text += f"{time_str},{sender}:{msg.get('processed_plain_text', '')}\n"
-
- personality_text = f"你的名字是{self.name},{self.personality_info}"
-
- # 构建action历史文本
- action_history_text = ""
- if action_history and action_history[-1]['action'] == "direct_reply":
- action_history_text = "你刚刚发言回复了对方"
-
- # 构建决策信息文本
- decision_info_text = ""
- if decision_info:
- decision_info_text = "当前对话状态:\n"
- if decision_info.is_cold_chat:
- decision_info_text += f"对话处于冷场状态,已持续{int(decision_info.cold_chat_duration)}秒\n"
-
- if decision_info.new_messages_count > 0:
- decision_info_text += f"有{decision_info.new_messages_count}条新消息未处理\n"
-
- user_response_time = decision_info.get_user_response_time()
- if user_response_time:
- decision_info_text += f"距离用户上次发言已过去{int(user_response_time)}秒\n"
-
- bot_response_time = decision_info.get_bot_response_time()
- if bot_response_time:
- decision_info_text += f"距离你上次发言已过去{int(bot_response_time)}秒\n"
-
- if decision_info.active_users:
- decision_info_text += f"当前活跃用户数: {len(decision_info.active_users)}\n"
-
- prompt = f"""{personality_text}。现在你在参与一场QQ聊天,请分析以下内容,根据信息决定下一步行动:
-
-当前对话目标:{goal}
-实现该对话目标的方式:{method}
-产生该对话目标的原因:{reasoning}
-
-{decision_info_text}
-{action_history_text}
-
-最近的对话记录:
-{chat_history_text}
-
-请你接下去想想要你要做什么,可以发言,可以等待,可以倾听,可以调取知识。注意不同行动类型的要求,不要重复发言:
-行动类型:
-fetch_knowledge: 需要调取知识,当需要专业知识或特定信息时选择
-wait: 当你做出了发言,对方尚未回复时等待对方的回复
-listening: 倾听对方发言,当你认为对方发言尚未结束时采用
-direct_reply: 不符合上述情况,回复对方,注意不要过多或者重复发言
-rethink_goal: 重新思考对话目标,当发现对话目标不合适时选择,会重新思考对话目标
-judge_conversation: 判断对话是否结束,当发现对话目标已经达到或者希望停止对话时选择,会判断对话是否结束
-
-请以JSON格式输出,包含以下字段:
-1. action: 行动类型,注意你之前的行为
-2. reason: 选择该行动的原因,注意你之前的行为(简要解释)
-
-注意:请严格按照JSON格式输出,不要包含任何其他内容。"""
-
- logger.debug(f"发送到LLM的提示词: {prompt}")
- try:
- content, _ = await self.llm.generate_response_async(prompt)
- logger.debug(f"LLM原始返回内容: {content}")
-
- # 使用简化函数提取JSON内容
- success, result = get_items_from_json(
- content,
- "action", "reason",
- default_values={"action": "direct_reply", "reason": "默认原因"}
- )
-
- if not success:
- return "direct_reply", "JSON解析失败,选择直接回复"
-
- action = result["action"]
- reason = result["reason"]
-
- # 验证action类型
- if action not in ["direct_reply", "fetch_knowledge", "wait", "listening", "rethink_goal", "judge_conversation"]:
- logger.warning(f"未知的行动类型: {action},默认使用listening")
- action = "listening"
-
- logger.info(f"规划的行动: {action}")
- logger.info(f"行动原因: {reason}")
- return action, reason
-
- except Exception as e:
- logger.error(f"规划行动时出错: {str(e)}")
- return "direct_reply", "发生错误,选择直接回复"
-
-
class GoalAnalyzer:
"""对话目标分析器"""
@@ -548,136 +318,6 @@ class Waiter:
await asyncio.sleep(1)
logger.info("等待中...")
-
-class ReplyGenerator:
- """回复生成器"""
-
- def __init__(self, stream_id: str):
- self.llm = LLM_request(
- model=global_config.llm_normal,
- temperature=0.7,
- max_tokens=300,
- request_type="reply_generation"
- )
- self.personality_info = Individuality.get_instance().get_prompt(type = "personality", x_person = 2, level = 2)
- self.name = global_config.BOT_NICKNAME
- self.chat_observer = ChatObserver.get_instance(stream_id)
- self.reply_checker = ReplyChecker(stream_id)
-
- async def generate(
- self,
- goal: str,
- chat_history: List[Message],
- knowledge_cache: Dict[str, str],
- previous_reply: Optional[str] = None,
- retry_count: int = 0
- ) -> str:
- """生成回复
-
- Args:
- goal: 对话目标
- chat_history: 聊天历史
- knowledge_cache: 知识缓存
- previous_reply: 上一次生成的回复(如果有)
- retry_count: 当前重试次数
-
- Returns:
- str: 生成的回复
- """
- # 构建提示词
- logger.debug(f"开始生成回复:当前目标: {goal}")
- self.chat_observer.trigger_update() # 触发立即更新
- if not await self.chat_observer.wait_for_update():
- logger.warning("等待消息更新超时")
-
- messages = self.chat_observer.get_message_history(limit=20)
- chat_history_text = ""
- for msg in messages:
- time_str = datetime.datetime.fromtimestamp(msg["time"]).strftime("%H:%M:%S")
- user_info = UserInfo.from_dict(msg.get("user_info", {}))
- sender = user_info.user_nickname or f"用户{user_info.user_id}"
- if sender == self.name:
- sender = "你说"
- chat_history_text += f"{time_str},{sender}:{msg.get('processed_plain_text', '')}\n"
-
- # 整理知识缓存
- knowledge_text = ""
- if knowledge_cache:
- knowledge_text = "\n相关知识:"
- if isinstance(knowledge_cache, dict):
- for _source, content in knowledge_cache.items():
- knowledge_text += f"\n{content}"
- elif isinstance(knowledge_cache, list):
- for item in knowledge_cache:
- knowledge_text += f"\n{item}"
-
- # 添加上一次生成的回复信息
- previous_reply_text = ""
- if previous_reply:
- previous_reply_text = f"\n上一次生成的回复(需要改进):\n{previous_reply}"
-
- personality_text = f"你的名字是{self.name},{self.personality_info}"
-
- prompt = f"""{personality_text}。现在你在参与一场QQ聊天,请根据以下信息生成回复:
-
-当前对话目标:{goal}
-{knowledge_text}
-{previous_reply_text}
-最近的聊天记录:
-{chat_history_text}
-
-请根据上述信息,以你的性格特征生成一个自然、得体的回复。回复应该:
-1. 符合对话目标,以"你"的角度发言
-2. 体现你的性格特征
-3. 自然流畅,像正常聊天一样,简短
-4. 适当利用相关知识,但不要生硬引用
-{'5. 改进上一次回复中的问题' if previous_reply else ''}
-
-请注意把握聊天内容,不要回复的太有条理,可以有个性。请分清"你"和对方说的话,不要把"你"说的话当做对方说的话,这是你自己说的话。
-请你回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话
-请你注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
-不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。
-
-请直接输出回复内容,不需要任何额外格式。"""
-
- try:
- content, _ = await self.llm.generate_response_async(prompt)
- logger.info(f"生成的回复: {content}")
- # is_new = self.chat_observer.check()
- # logger.debug(f"再看一眼聊天记录,{'有' if is_new else '没有'}新消息")
-
- # 如果有新消息,重新生成回复
- # if is_new:
- # logger.info("检测到新消息,重新生成回复")
- # return await self.generate(
- # goal, chat_history, knowledge_cache,
- # None, retry_count
- # )
-
- return content
-
- except Exception as e:
- logger.error(f"生成回复时出错: {e}")
- return "抱歉,我现在有点混乱,让我重新思考一下..."
-
- async def check_reply(
- self,
- reply: str,
- goal: str,
- retry_count: int = 0
- ) -> Tuple[bool, str, bool]:
- """检查回复是否合适
-
- Args:
- reply: 生成的回复
- goal: 对话目标
- retry_count: 当前重试次数
-
- Returns:
- Tuple[bool, str, bool]: (是否合适, 原因, 是否需要重新规划)
- """
- return await self.reply_checker.check(reply, goal, retry_count)
-
class PFCNotificationHandler(NotificationHandler):
"""PFC的通知处理器"""
@@ -736,296 +376,6 @@ class PFCNotificationHandler(NotificationHandler):
self.logger.error(f"通知数据: {getattr(notification, 'data', None)}")
-class Conversation:
- # 类级别的实例管理
- _instances: Dict[str, 'Conversation'] = {}
- _instance_lock = asyncio.Lock()
- _init_events: Dict[str, asyncio.Event] = {}
- _initializing: Dict[str, bool] = {}
-
- @classmethod
- async def get_instance(cls, stream_id: str) -> Optional['Conversation']:
- """获取或创建对话实例
-
- Args:
- stream_id: 聊天流ID
-
- Returns:
- Optional[Conversation]: 对话实例,如果创建或等待失败则返回None
- """
- try:
- # 检查是否已经有实例
- if stream_id in cls._instances:
- return cls._instances[stream_id]
-
- async with cls._instance_lock:
- # 再次检查,防止在获取锁的过程中其他线程创建了实例
- if stream_id in cls._instances:
- return cls._instances[stream_id]
-
- # 如果正在初始化,等待初始化完成
- if stream_id in cls._initializing and cls._initializing[stream_id]:
- event = cls._init_events.get(stream_id)
- if event:
- try:
- # 在等待之前释放锁
- cls._instance_lock.release()
- await asyncio.wait_for(event.wait(), timeout=10.0) # 增加超时时间到10秒
- # 重新获取锁
- await cls._instance_lock.acquire()
- if stream_id in cls._instances:
- return cls._instances[stream_id]
- except asyncio.TimeoutError:
- logger.error(f"等待实例 {stream_id} 初始化超时")
- # 清理超时的初始化状态
- cls._initializing[stream_id] = False
- if stream_id in cls._init_events:
- del cls._init_events[stream_id]
- return None
-
- # 创建新实例
- logger.info(f"创建新的对话实例: {stream_id}")
- cls._initializing[stream_id] = True
- cls._init_events[stream_id] = asyncio.Event()
-
- # 在锁保护下创建实例
- instance = cls(stream_id)
- cls._instances[stream_id] = instance
-
- # 启动实例初始化(在后台运行)
- asyncio.create_task(instance._initialize())
-
- return instance
-
- except Exception as e:
- logger.error(f"获取对话实例失败: {e}")
- return None
-
- async def _initialize(self):
- """初始化实例(在后台运行)"""
- try:
- logger.info(f"开始初始化对话实例: {self.stream_id}")
-
- start_time = time.time()
- logger.info("启动观察器...")
- self.chat_observer.start() # 启动观察器
- logger.info(f"观察器启动完成,耗时: {time.time() - start_time:.2f}秒")
-
- await asyncio.sleep(1) # 给观察器一些启动时间
-
- # 获取初始目标
- logger.info("开始分析初始对话目标...")
- goal_start_time = time.time()
- self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
- logger.info(f"目标分析完成,耗时: {time.time() - goal_start_time:.2f}秒")
-
- # 标记初始化完成
- logger.info("标记初始化完成...")
- self.__class__._initializing[self.stream_id] = False
- if self.stream_id in self.__class__._init_events:
- self.__class__._init_events[self.stream_id].set()
-
- # 启动对话循环
- logger.info("启动对话循环...")
- asyncio.create_task(self._conversation_loop())
-
- total_time = time.time() - start_time
- logger.info(f"实例初始化完成,总耗时: {total_time:.2f}秒")
-
- except Exception as e:
- logger.error(f"初始化对话实例失败: {e}")
- # 清理失败的初始化
- self.__class__._initializing[self.stream_id] = False
- if self.stream_id in self.__class__._init_events:
- self.__class__._init_events[self.stream_id].set()
- if self.stream_id in self.__class__._instances:
- del self.__class__._instances[self.stream_id]
-
- async def start(self):
- """开始对话流程"""
- try:
- logger.info("对话系统启动")
- self.should_continue = True
- await self._conversation_loop()
- except Exception as e:
- logger.error(f"启动对话系统失败: {e}")
- raise
-
- async def _conversation_loop(self):
- """对话循环"""
- # 获取最近的消息历史
- self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
-
- while self.should_continue:
- # 执行行动
- self.chat_observer.trigger_update() # 触发立即更新
- if not await self.chat_observer.wait_for_update():
- logger.warning("等待消息更新超时")
-
- # 使用决策信息来辅助行动规划
- action, reason = await self.action_planner.plan(
- self.current_goal,
- self.current_method,
- self.goal_reasoning,
- self.action_history,
- self.decision_info # 传入决策信息
- )
-
- # 执行行动
- await self._handle_action(action, reason)
-
- # 清理已处理的消息
- self.decision_info.clear_unprocessed_messages()
-
- def _convert_to_message(self, msg_dict: Dict[str, Any]) -> Message:
- """将消息字典转换为Message对象"""
- try:
- chat_info = msg_dict.get("chat_info", {})
- chat_stream = ChatStream.from_dict(chat_info)
- user_info = UserInfo.from_dict(msg_dict.get("user_info", {}))
-
- return Message(
- message_id=msg_dict["message_id"],
- chat_stream=chat_stream,
- time=msg_dict["time"],
- user_info=user_info,
- processed_plain_text=msg_dict.get("processed_plain_text", ""),
- detailed_plain_text=msg_dict.get("detailed_plain_text", "")
- )
- except Exception as e:
- logger.warning(f"转换消息时出错: {e}")
- raise
-
- async def _handle_action(self, action: str, reason: str):
- """处理规划的行动"""
- logger.info(f"执行行动: {action}, 原因: {reason}")
-
- # 记录action历史
- self.action_history.append({
- "action": action,
- "reason": reason,
- "time": datetime.datetime.now().strftime("%H:%M:%S")
- })
-
- # 只保留最近的10条记录
- if len(self.action_history) > 10:
- self.action_history = self.action_history[-10:]
-
- if action == "direct_reply":
- self.state = ConversationState.GENERATING
- messages = self.chat_observer.get_message_history(limit=30)
- self.generated_reply = await self.reply_generator.generate(
- self.current_goal,
- self.current_method,
- [self._convert_to_message(msg) for msg in messages],
- self.knowledge_cache
- )
-
- # 检查回复是否合适
- is_suitable, reason, need_replan = await self.reply_generator.check_reply(
- self.generated_reply,
- self.current_goal
- )
-
- await self._send_reply()
-
- elif action == "fetch_knowledge":
- self.state = ConversationState.GENERATING
- messages = self.chat_observer.get_message_history(limit=30)
- knowledge, sources = await self.knowledge_fetcher.fetch(
- self.current_goal,
- [self._convert_to_message(msg) for msg in messages]
- )
- logger.info(f"获取到知识,来源: {sources}")
-
- if knowledge != "未找到相关知识":
- self.knowledge_cache[sources] = knowledge
-
- elif action == "rethink_goal":
- self.state = ConversationState.RETHINKING
- self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
-
- elif action == "judge_conversation":
- self.state = ConversationState.JUDGING
- self.goal_achieved, self.stop_conversation, self.reason = await self.goal_analyzer.analyze_conversation(self.current_goal, self.goal_reasoning)
-
- # 如果当前目标达成但还有其他目标
- if self.goal_achieved and not self.stop_conversation:
- alternative_goals = await self.goal_analyzer.get_alternative_goals()
- if alternative_goals:
- # 切换到下一个目标
- self.current_goal, self.current_method, self.goal_reasoning = alternative_goals[0]
- logger.info(f"当前目标已达成,切换到新目标: {self.current_goal}")
- return
-
- if self.stop_conversation:
- await self._stop_conversation()
-
- elif action == "listening":
- self.state = ConversationState.LISTENING
- logger.info("倾听对方发言...")
- if await self.waiter.wait(): # 如果返回True表示超时
- await self._send_timeout_message()
- await self._stop_conversation()
-
- else: # wait
- self.state = ConversationState.WAITING
- logger.info("等待更多信息...")
- if await self.waiter.wait(): # 如果返回True表示超时
- await self._send_timeout_message()
- await self._stop_conversation()
-
- async def _stop_conversation(self):
- """完全停止对话"""
- logger.info("停止对话")
- self.should_continue = False
- self.state = ConversationState.ENDED
- # 删除实例(这会同时停止chat_observer)
- await self.remove_instance(self.stream_id)
-
- async def _send_timeout_message(self):
- """发送超时结束消息"""
- try:
- messages = self.chat_observer.get_message_history(limit=1)
- if not messages:
- return
-
- latest_message = self._convert_to_message(messages[0])
- await self.direct_sender.send_message(
- chat_stream=self.chat_stream,
- content="抱歉,由于等待时间过长,我需要先去忙别的了。下次再聊吧~",
- reply_to_message=latest_message
- )
- except Exception as e:
- logger.error(f"发送超时消息失败: {str(e)}")
-
- async def _send_reply(self):
- """发送回复"""
- if not self.generated_reply:
- logger.warning("没有生成回复")
- return
-
- messages = self.chat_observer.get_message_history(limit=1)
- if not messages:
- logger.warning("没有最近的消息可以回复")
- return
-
- latest_message = self._convert_to_message(messages[0])
- try:
- await self.direct_sender.send_message(
- chat_stream=self.chat_stream,
- content=self.generated_reply,
- reply_to_message=latest_message
- )
- self.chat_observer.trigger_update() # 触发立即更新
- if not await self.chat_observer.wait_for_update():
- logger.warning("等待消息更新超时")
-
- self.state = ConversationState.ANALYZING
- except Exception as e:
- logger.error(f"发送消息失败: {str(e)}")
- self.state = ConversationState.ANALYZING
-
class DirectMessageSender:
"""直接发送消息到平台的发送器"""
diff --git a/src/plugins/PFC/pfc_manager.py b/src/plugins/PFC/pfc_manager.py
new file mode 100644
index 000000000..7e5f4cdb4
--- /dev/null
+++ b/src/plugins/PFC/pfc_manager.py
@@ -0,0 +1,97 @@
+from typing import Dict, Optional
+from src.common.logger import get_module_logger
+from .pfc import Conversation
+import traceback
+
+logger = get_module_logger("pfc_manager")
+
+class PFCManager:
+ """PFC对话管理器,负责管理所有对话实例"""
+
+ # 单例模式
+ _instance = None
+
+ # 会话实例管理
+ _instances: Dict[str, Conversation] = {}
+ _initializing: Dict[str, bool] = {}
+
+ @classmethod
+ def get_instance(cls) -> 'PFCManager':
+ """获取管理器单例
+
+ Returns:
+ PFCManager: 管理器实例
+ """
+ if cls._instance is None:
+ cls._instance = PFCManager()
+ return cls._instance
+
+ async def get_or_create_conversation(self, stream_id: str) -> Optional[Conversation]:
+ """获取或创建对话实例
+
+ Args:
+ stream_id: 聊天流ID
+
+ Returns:
+ Optional[Conversation]: 对话实例,创建失败则返回None
+ """
+ # 检查是否已经有实例
+ if stream_id in self._initializing and self._initializing[stream_id]:
+ logger.debug(f"会话实例正在初始化中: {stream_id}")
+ return None
+
+ if stream_id in self._instances:
+ logger.debug(f"使用现有会话实例: {stream_id}")
+ return self._instances[stream_id]
+
+ try:
+ # 创建新实例
+ logger.info(f"创建新的对话实例: {stream_id}")
+ self._initializing[stream_id] = True
+ # 创建实例
+ conversation_instance = Conversation(stream_id)
+ self._instances[stream_id] = conversation_instance
+
+ # 启动实例初始化
+ await self._initialize_conversation(conversation_instance)
+ except Exception as e:
+ logger.error(f"创建会话实例失败: {stream_id}, 错误: {e}")
+ return None
+
+ return conversation_instance
+
+
+ async def _initialize_conversation(self, conversation: Conversation):
+ """初始化会话实例
+
+ Args:
+ conversation: 要初始化的会话实例
+ """
+ stream_id = conversation.stream_id
+
+ try:
+ logger.info(f"开始初始化会话实例: {stream_id}")
+ # 启动初始化流程
+ await conversation._initialize()
+
+ # 标记初始化完成
+ self._initializing[stream_id] = False
+
+ logger.info(f"会话实例 {stream_id} 初始化完成")
+
+ except Exception as e:
+ logger.error(f"管理器初始化会话实例失败: {stream_id}, 错误: {e}")
+ logger.error(traceback.format_exc())
+ # 清理失败的初始化
+
+
+ async def get_conversation(self, stream_id: str) -> Optional[Conversation]:
+ """获取已存在的会话实例
+
+ Args:
+ stream_id: 聊天流ID
+
+ Returns:
+ Optional[Conversation]: 会话实例,不存在则返回None
+ """
+ return self._instances.get(stream_id)
\ No newline at end of file
diff --git a/src/plugins/PFC/reply_generator.py b/src/plugins/PFC/reply_generator.py
new file mode 100644
index 000000000..70be6eebc
--- /dev/null
+++ b/src/plugins/PFC/reply_generator.py
@@ -0,0 +1,153 @@
+import datetime
+import asyncio
+from typing import List, Optional, Dict, Any, Tuple, Literal, Set
+from enum import Enum
+from src.common.logger import get_module_logger
+from ..chat.chat_stream import ChatStream
+from ..message.message_base import UserInfo, Seg
+from ..chat.message import Message
+from ..models.utils_model import LLM_request
+from ..config.config import global_config
+from src.plugins.chat.message import MessageSending
+from ..message.api import global_api
+from ..storage.storage import MessageStorage
+from .chat_observer import ChatObserver
+from .reply_checker import ReplyChecker
+from .pfc_utils import get_items_from_json
+from src.individuality.individuality import Individuality
+from .chat_states import NotificationHandler, Notification, NotificationType
+import time
+from dataclasses import dataclass, field
+from .conversation import Conversation
+
+logger = get_module_logger("reply_generator")
+
+
+class ReplyGenerator:
+ """回复生成器"""
+
+ def __init__(self, stream_id: str):
+ self.llm = LLM_request(
+ model=global_config.llm_normal,
+ temperature=0.7,
+ max_tokens=300,
+ request_type="reply_generation"
+ )
+ self.personality_info = Individuality.get_instance().get_prompt(type = "personality", x_person = 2, level = 2)
+ self.name = global_config.BOT_NICKNAME
+ self.chat_observer = ChatObserver.get_instance(stream_id)
+ self.reply_checker = ReplyChecker(stream_id)
+
+ async def generate(
+ self,
+ goal: str,
+ chat_history: List[Message],
+ knowledge_cache: Dict[str, str],
+ previous_reply: Optional[str] = None,
+ retry_count: int = 0
+ ) -> str:
+ """生成回复
+
+ Args:
+ goal: 对话目标
+ chat_history: 聊天历史
+ knowledge_cache: 知识缓存
+ previous_reply: 上一次生成的回复(如果有)
+ retry_count: 当前重试次数
+
+ Returns:
+ str: 生成的回复
+ """
+ # 构建提示词
+ logger.debug(f"开始生成回复:当前目标: {goal}")
+ self.chat_observer.trigger_update() # 触发立即更新
+ if not await self.chat_observer.wait_for_update():
+ logger.warning("等待消息更新超时")
+
+ messages = self.chat_observer.get_message_history(limit=20)
+ chat_history_text = ""
+ for msg in messages:
+ time_str = datetime.datetime.fromtimestamp(msg["time"]).strftime("%H:%M:%S")
+ user_info = UserInfo.from_dict(msg.get("user_info", {}))
+ sender = user_info.user_nickname or f"用户{user_info.user_id}"
+ if sender == self.name:
+ sender = "你说"
+ chat_history_text += f"{time_str},{sender}:{msg.get('processed_plain_text', '')}\n"
+
+ # 整理知识缓存
+ knowledge_text = ""
+ if knowledge_cache:
+ knowledge_text = "\n相关知识:"
+ if isinstance(knowledge_cache, dict):
+ for _source, content in knowledge_cache.items():
+ knowledge_text += f"\n{content}"
+ elif isinstance(knowledge_cache, list):
+ for item in knowledge_cache:
+ knowledge_text += f"\n{item}"
+
+ # 添加上一次生成的回复信息
+ previous_reply_text = ""
+ if previous_reply:
+ previous_reply_text = f"\n上一次生成的回复(需要改进):\n{previous_reply}"
+
+ personality_text = f"你的名字是{self.name},{self.personality_info}"
+
+ prompt = f"""{personality_text}。现在你在参与一场QQ聊天,请根据以下信息生成回复:
+
+当前对话目标:{goal}
+{knowledge_text}
+{previous_reply_text}
+最近的聊天记录:
+{chat_history_text}
+
+请根据上述信息,以你的性格特征生成一个自然、得体的回复。回复应该:
+1. 符合对话目标,以"你"的角度发言
+2. 体现你的性格特征
+3. 自然流畅,像正常聊天一样,简短
+4. 适当利用相关知识,但不要生硬引用
+{'5. 改进上一次回复中的问题' if previous_reply else ''}
+
+请注意把握聊天内容,不要回复的太有条理,可以有个性。请分清"你"和对方说的话,不要把"你"说的话当做对方说的话,这是你自己说的话。
+请你回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话
+请你注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
+不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。
+
+请直接输出回复内容,不需要任何额外格式。"""
+
+ try:
+ content, _ = await self.llm.generate_response_async(prompt)
+ logger.info(f"生成的回复: {content}")
+ # is_new = self.chat_observer.check()
+ # logger.debug(f"再看一眼聊天记录,{'有' if is_new else '没有'}新消息")
+
+ # 如果有新消息,重新生成回复
+ # if is_new:
+ # logger.info("检测到新消息,重新生成回复")
+ # return await self.generate(
+ # goal, chat_history, knowledge_cache,
+ # None, retry_count
+ # )
+
+ return content
+
+ except Exception as e:
+ logger.error(f"生成回复时出错: {e}")
+ return "抱歉,我现在有点混乱,让我重新思考一下..."
+
+ async def check_reply(
+ self,
+ reply: str,
+ goal: str,
+ retry_count: int = 0
+ ) -> Tuple[bool, str, bool]:
+ """检查回复是否合适
+
+ Args:
+ reply: 生成的回复
+ goal: 对话目标
+ retry_count: 当前重试次数
+
+ Returns:
+ Tuple[bool, str, bool]: (是否合适, 原因, 是否需要重新规划)
+ """
+ return await self.reply_checker.check(reply, goal, retry_count)
\ No newline at end of file
diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py
index 2953fa6ce..119d1aa01 100644
--- a/src/plugins/chat/bot.py
+++ b/src/plugins/chat/bot.py
@@ -1,14 +1,13 @@
from ..moods.moods import MoodManager # 导入情绪管理器
from ..config.config import global_config
from .message import MessageRecv
-from ..PFC.pfc import Conversation, ConversationState
+from ..PFC.pfc_manager import PFCManager
from .chat_stream import chat_manager
from ..chat_module.only_process.only_message_process import MessageProcessor
from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
from ..chat_module.think_flow_chat.think_flow_chat import ThinkFlowChat
from ..chat_module.reasoning_chat.reasoning_chat import ReasoningChat
-import asyncio
import traceback
# 定义日志配置
@@ -31,10 +30,15 @@ class ChatBot:
self.think_flow_chat = ThinkFlowChat()
self.reasoning_chat = ReasoningChat()
self.only_process_chat = MessageProcessor()
+
+ # 创建初始化PFC管理器的任务,会在_ensure_started时执行
+ self.pfc_manager = PFCManager.get_instance()
async def _ensure_started(self):
"""确保所有任务已启动"""
if not self._started:
+ logger.info("确保ChatBot所有任务已启动")
+
self._started = True
async def _create_PFC_chat(self, message: MessageRecv):
@@ -42,27 +46,11 @@ class ChatBot:
chat_id = str(message.chat_stream.stream_id)
if global_config.enable_pfc_chatting:
- # 获取或创建对话实例
- conversation = await Conversation.get_instance(chat_id)
- if conversation is None:
- logger.error(f"创建或获取对话实例失败: {chat_id}")
- return
-
- # 如果是新创建的实例,启动对话系统
- if conversation.state == ConversationState.INIT:
- asyncio.create_task(conversation.start())
- logger.info(f"为聊天 {chat_id} 创建新的对话实例")
- elif conversation.state == ConversationState.ENDED:
- # 如果实例已经结束,重新创建
- await Conversation.remove_instance(chat_id)
- conversation = await Conversation.get_instance(chat_id)
- if conversation is None:
- logger.error(f"重新创建对话实例失败: {chat_id}")
- return
- asyncio.create_task(conversation.start())
- logger.info(f"为聊天 {chat_id} 重新创建对话实例")
+
+ await self.pfc_manager.get_or_create_conversation(chat_id)
+
except Exception as e:
- logger.error(f"创建PFC聊天流失败: {e}")
+ logger.error(f"创建PFC聊天失败: {e}")
async def message_process(self, message_data: str) -> None:
"""处理转化后的统一格式消息
@@ -90,6 +78,9 @@ class ChatBot:
- 性能计时
"""
try:
+ # 确保所有任务已启动
+ await self._ensure_started()
+
message = MessageRecv(message_data)
groupinfo = message.message_info.group_info
userinfo = message.message_info.user_info
From 8d12341c451c3d94c3ce98438bc0bdd347fd307b Mon Sep 17 00:00:00 2001
From: DrSmoothl <1787882683@qq.com>
Date: Tue, 8 Apr 2025 22:01:10 +0800
Subject: [PATCH 05/13] =?UTF-8?q?=E7=BE=8E=E5=8C=96README=E6=96=87?=
=?UTF-8?q?=E4=BB=B6=EF=BC=8C=E5=A2=9E=E5=8A=A0=E8=A2=AB=E7=A7=BB=E9=99=A4?=
=?UTF-8?q?=E7=9A=84=E6=BC=94=E7=A4=BA=E8=A7=86=E9=A2=91=E5=B0=81=E9=9D=A2?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
README.md | 155 ++++++++++++++++++++++------------------
depends-data/maimai.png | Bin 0 -> 465565 bytes
depends-data/video.png | Bin 0 -> 63149 bytes
3 files changed, 86 insertions(+), 69 deletions(-)
create mode 100644 depends-data/maimai.png
create mode 100644 depends-data/video.png
diff --git a/README.md b/README.md
index fa97fec14..95fcf006c 100644
--- a/README.md
+++ b/README.md
@@ -1,24 +1,62 @@
# 麦麦!MaiCore-MaiMBot (编辑中)
+
+
+
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+
+
+
+
+
+
+
MaiBot(麦麦)
+
+ 一款专注于 群组聊天 的赛博网友
+
+ 探索本项目的文档 »
+
+
+ 查看Demo
+ ·
+ 报告Bug
+ ·
+ 提出新特性
+
+
+
## 新版0.6.0部署前先阅读:https://docs.mai-mai.org/manual/usage/mmc_q_a
-
-
-
-
-
-
-
## 📝 项目简介
**🍔MaiCore是一个基于大语言模型的可交互智能体**
-- LLM 提供对话能力
-- 动态Prompt构建器
-- 实时的思维系统
-- MongoDB 提供数据持久化支持
-- 可扩展,可支持多种平台和多种功能
+
+- 💭 **智能对话系统**:基于LLM的自然语言交互
+- 🤔 **实时思维系统**:模拟人类思考过程
+- 💝 **情感表达系统**:丰富的表情包和情绪表达
+- 🧠 **持久记忆系统**:基于MongoDB的长期记忆存储
+- 🔄 **动态人格系统**:自适应的性格特征
+
+
+
+
+### 📢 版本信息
**最新版本: v0.6.0** ([查看更新日志](changelogs/changelog.md))
> [!WARNING]
@@ -28,19 +66,12 @@
> 次版本MaiBot将基于MaiCore运行,不再依赖于nonebot相关组件运行。
> MaiBot将通过nonebot的插件与nonebot建立联系,然后nonebot与QQ建立联系,实现MaiBot与QQ的交互
-**分支介绍:**
-- main 稳定版本
-- dev 开发版(不知道什么意思就别下)
-- classical 0.6.0以前的版本
+**分支说明:**
+- `main`: 稳定发布版本
+- `dev`: 开发测试版本(不知道什么意思就别下)
+- `classical`: 0.6.0之前的版本
-
> [!WARNING]
> - 项目处于活跃开发阶段,代码可能随时更改
@@ -49,6 +80,12 @@
> - 由于持续迭代,可能存在一些已知或未知的bug
> - 由于开发中,可能消耗较多token
+### ⚠️ 重要提示
+
+- 升级到v0.6.0版本前请务必阅读:[升级指南](https://docs.mai-mai.org/manual/usage/mmc_q_a)
+- 本版本基于MaiCore重构,通过nonebot插件与QQ平台交互
+- 项目处于活跃开发阶段,功能和API可能随时调整
+
### 💬交流群(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
- [五群](https://qm.qq.com/q/JxvHZnxyec) 1022489779
- [一群](https://qm.qq.com/q/VQ3XZrWgMs) 766798517 【已满】
@@ -72,55 +109,35 @@
## 🎯 功能介绍
-### 💬 聊天功能
-- 提供思维流(心流)聊天和推理聊天两种对话逻辑
-- 支持关键词检索主动发言:对消息的话题topic进行识别,如果检测到麦麦存储过的话题就会主动进行发言
-- 支持bot名字呼唤发言:检测到"麦麦"会主动发言,可配置
-- 支持多模型,多厂商自定义配置
-- 动态的prompt构建器,更拟人
-- 支持图片,转发消息,回复消息的识别
-- 支持私聊功能,可使用PFC模式的有目的多轮对话(实验性)
+| 模块 | 主要功能 | 特点 |
+|------|---------|------|
+| 💬 聊天系统 | • 思维流/推理聊天
• 关键词主动发言
• 多模型支持
• 动态prompt构建
• 私聊功能(PFC) | 拟人化交互 |
+| 🧠 思维流系统 | • 实时思考生成
• 自动启停机制
• 日程系统联动 | 智能化决策 |
+| 🧠 记忆系统 2.0 | • 优化记忆抽取
• 海马体记忆机制
• 聊天记录概括 | 持久化记忆 |
+| 😊 表情包系统 | • 情绪匹配发送
• GIF支持
• 自动收集与审查 | 丰富表达 |
+| 📅 日程系统 | • 动态日程生成
• 自定义想象力
• 思维流联动 | 智能规划 |
+| 👥 关系系统 2.0 | • 关系管理优化
• 丰富接口支持
• 个性化交互 | 深度社交 |
+| 📊 统计系统 | • 使用数据统计
• LLM调用记录
• 实时控制台显示 | 数据可视 |
+| 🔧 系统功能 | • 优雅关闭机制
• 自动数据保存
• 异常处理完善 | 稳定可靠 |
-### 🧠 思维流系统
-- 思维流能够在回复前后进行思考,生成实时想法
-- 思维流自动启停机制,提升资源利用效率
-- 思维流与日程系统联动,实现动态日程生成
+## 📐 项目架构
-### 🧠 记忆系统 2.0
-- 优化记忆抽取策略和prompt结构
-- 改进海马体记忆提取机制,提升自然度
-- 对聊天记录进行概括存储,在需要时调用
+```mermaid
+graph TD
+ A[MaiCore] --> B[对话系统]
+ A --> C[思维流系统]
+ A --> D[记忆系统]
+ A --> E[情感系统]
+ B --> F[多模型支持]
+ B --> G[动态Prompt]
+ C --> H[实时思考]
+ C --> I[日程联动]
+ D --> J[记忆存储]
+ D --> K[记忆检索]
+ E --> L[表情管理]
+ E --> M[情绪识别]
+```
-### 😊 表情包系统
-- 支持根据发言内容发送对应情绪的表情包
-- 支持识别和处理gif表情包
-- 会自动偷群友的表情包
-- 表情包审查功能
-- 表情包文件完整性自动检查
-- 自动清理缓存图片
-
-### 📅 日程系统
-- 动态更新的日程生成
-- 可自定义想象力程度
-- 与聊天情况交互(思维流模式下)
-
-### 👥 关系系统 2.0
-- 优化关系管理系统,适用于新版本
-- 提供更丰富的关系接口
-- 针对每个用户创建"关系",实现个性化回复
-
-### 📊 统计系统
-- 详细的使用数据统计
-- LLM调用统计
-- 在控制台显示统计信息
-
-### 🔧 系统功能
-- 支持优雅的shutdown机制
-- 自动保存功能,定期保存聊天记录和关系数据
-- 完善的异常处理机制
-- 可自定义时区设置
-- 优化的日志输出格式
-- 配置自动更新功能
## 开发计划TODO:LIST
diff --git a/depends-data/maimai.png b/depends-data/maimai.png
new file mode 100644
index 0000000000000000000000000000000000000000..faccb856b925eba35bec09996f4a89317e422a33
GIT binary patch
literal 465565
zcmd43hdj!~qOj+t@n
z`MplB-{<`|{M_8?b$ex;=ks|zuIs+86RoA8bnXoO83Y1xPDNQ>2Z11h|0G6`k-}dF
z?2-rs0Ro{SFQe!4YWc|b1sghW=4=qAhl#GqPTzLrp`oFni!>37G*T~=xKTIkx;Ev&
zj@r#P3b!=t{}}t3$D>w_HS19>0(Lzp?C$6HA}&4I>qe>ELq=>iF|o=~EKw;4`uV@5
ze-}$D0{h;6lHlKt+RI-e&8se>ZzV7r3zWi+mfTE>>iz5=+`qGPKtN6!+Hs$k_W$R9
zF^V_(2O|D^krXtH4O8M|AwQWepAPNtk-ATT_;(3A1f%v?VIF~Tm+r?{VG2gnQzW4*
z8L=t
zFiT@-P5k87*RNmqCMG5f%Ep>YP&T}@r>Oq#r$_x1rU+q?Mtkz4^+zvwnDk+Bexh05
zoTmm|TwIv-sSAwC#)ABg5BF9a>*&98J{WRkd{9NZvpJ$EWx$mZg!cSFFjYU2YEu%1
zw9p@m;EYk9*@^r6_iqE6%KnLIv&TZ`#zwUbt>RDL=~F)2)i{;LUBccd(f
z3T1P1bC_mzJnvy01>I)Sr1Bi|ia28cOKFtmk7enBW&0MePz2
z5@sdnrH%pBI9l?aj{iL(Lb5Ab)FisPx>bpZiS2y+{5bSwR63IQ%698&pr)4o*d=0u
zwpA)b_}tuFrG}c?sK1}zG4tKKcjK3uSGRU6u)gn{Gl#Ia&i36)C$i(?;~$t0JDBa^
z3))!7*vJUGca9>>&;wqcp6A3jdRb47R~lx_EA+V9mYP4GzRyc>`ZK}5dx3e<<;p0p
z{qg6|*W#k0`vvLg=`)TEQ>@9!$tI#%H1LS$=Q;Ne4%UCg-uk5^wSOVerYYFVYrE{c
z)Ztoxis~Hx@hscCRx$}X6NaoF*^At5(;R#yBG)9xZ56$snN$XB_Juum4i8w6RgprnbqOa5`RP3fi5W30&{KboYR6
zJ`B0rR_ULOIvav?dh}?2W^d4YZ9y}bK0#uec42juV`^%OmWgS5#s_n5W5p%--aS*(
z<(cvE-2k36Ts%DZU@8UIM^;u{zPFk#gmyIUJpbQ$-HyZv&o>WdiMan6O}Sy%*5Ug7
z0>Y$bg4z^)&C1Fu4C_0&k^D;BH|bei+*9P$mxU!z8Yka-dwUJfgok{<9eO6Hx18Q<
z%h?v!JQMQ7oiS36>mr=lD(6gVKDuY|@gYcA$<5sJi8FgyGl}s`5~a0MD-~`{haxD~
z?LtWqfU%NO(9gkt@=VNDQ|JD6qogvjm!kRW>Q8@m>T~z
z`(gk5($d+3W1=T{oV)W%QmTCEayPRv$
z@>gGxKxYsi>Plo)db~J#Jt|Mx6rFPC&Yd0-nce-}-9|-@Lqf9rlXV}{$7Vtoi3ujI
zUuB2i%&VSkpFyMQr#2FX5r!9P{rK_o!>_lFDCuZvpJxo%){i+#I8SY-X}%J^{bImY
z#jgHo$9+b|mqZ`%=b(qcx@hUVkf317%=B~+4_Qd9^HfO1lC_%~C5OhVCpN~98Bf)A
zoxeg{u_T+sCVsq&c~e#vyC$wlETjDS^XJ1cj%=S_XBRv&*2I}V1zQ{cfg__^vD8x{
z_W>tvYikRI-`UqGgM5M!HcBNK5wOf)??Iu~p_?TeakJA!4BvxpH|_c3WczntGKm>l
zDELmAu;YaA9J^Mafg~mfn1Q0CWnk!v
zl-#^hJLUXXHeFG6W?%`+Hv<;EGdezF*a%W!Ni%On)9st!O-^3PQTijkM|kX{&(A~l+qG9
zIy&S_^!8smWcbePgzIsItFg-GVeu~Z`g9(Em7{dJ>^q#y(1`R^*SdO1G2`-Gdu_9J6|EV>fN_OD3F&`h_
z)ffFX0|En2RSP!Ob8~Yg&CsVM6yX3ozMrFtpZVVfixX*&u7)ii)S*|5KkiO?sev?hy-c+fXP_t-!
zJd4ip{gXbFBkX#08ja>SM^0|P7tl83`D47U#dR%EDuA5pDEe6KA=Ii|&4j$6;U(W+
z(HzQG6Q75$M7cOSXR(BdCk|l?>?1;;(8$Lb5&vEvrM4B1<8P(7o%tExY;4|6F6m;t
z>9yC2OBxLQ^Cw<(wOKpu7P)~Z{{H^@DA(+2?y{+!z-X**ZbL&FTsxnjAW0Xmm)Tjr
zxWJk7{6u}$ltmg@otv8iC1Z|1ea(%HjYZbK-E%p@Xa?_(6rJnxF-x8Muycfh!AgJ(
zVL{4k!)*<#BN3ZGAZ360GRbQbUm;%cVyNbpLlWcr)%(q_cVA{@rI1|-cdYZPZ*M;d
zhX-bZ_S|8V?YYnU_uPMzLs7ZNRkPhYXvMxm#5lFdM|=>4ag|B6kx639#t3U9v6+tu
z7*i`Kq}rq-uM)5R^3TRLi3!JBiN#~a>sKc0SDGr88cc3Bm{+!}cuh4pi>-;fq{$&Y
z$^R;K=};?Bp9yzrHJAG1o4we~v#_ur!%ZWXKMZZFg^`^;tLV#;99hTj-)tLu;`I#;
zP_nba8kw1y3_K~S7Kt`{#tPaQD_&w_PIr<+SjZ}tC=u4E64sc!54_L5oPKSLFiy3k
zC3TvLC@t-^tq~476i!V|B}TdGb@8(ErGK^-XR?4kHf=u02t8kj_tX=>k`%NJtZ9vx
zySN;G>Z(u(E|bVS;f#63-Tx|&^F1_$tBG13W@rp_CG=Kp9;b*lwNk)L4&n1tEbfdP
zL_Z07Wst?$Se1HbTsAiGGSQbAUvnuk!UW{6n7n!W4BK^a7vF5Vy*3x+q}mSLp+~tv
zOJh@0lU9iy*OuQ8C|8aA;m1=eUiXc^dB%4f0r*2IB&nXWX*_0mLRShMRNGdexxXO!N%&;-fY6zM
zo|pipON`SUhQUkFL&WNcEqj!Z#N+kn(4M)aTs=KK&(+wqOZ15QtE*SM4va*zf-E0C
z?0-E$499NO@*hlnKjcBbsoj+sl<>(;v~FD}PnWC4(E7bDJLTBpRoaYK6-xnJDM@_k
z&r+4yD65RPS^M2GuzQcPvHrsG3~S<*L$0qFQLgJOWTOUU*Y0GhmybF2!Gn`Qere{b
z#*xhIz?lrQq@BfmCF4Wnd{Rm3d(%E*)-&Tvo`w-VE6ty!;bOy=esu`9p&sQH^b4IPo7nf4pN&;EZV*^t|O@
zfFwpLT!6;pF`lML_LNN}40%-~9s|!$!N#bXn+oO1D=Mn0r?1fRgY>aFsr6Hs~GFZq8ZHyyvB$m8?<^F4#t5p6MJI;iB
zxE|B#kfG>dhQ+KmzEWnZHR3Kj9l{cw$9YQT^?qCBV@8h0j7o8ijz36bkiud*^uK(w
z)yW(Pc^Oev_sUa;?FA;|6uTS+=qoH~pE^$wpj>Nzca=d|!5#W@eXfkI97(roO>V*{cQq4wCfibR*K2uuiTrK
z6yZq>!#_7sb?UB;)g3azEKK9uLIS2>TI
zoJ@_)%*s>yZHj(8I@lBu%gXEe5KkoSH1+pPh%}M3Pc}!D(K9to&E$mOL(J^Tb_o5V
zfE%FtuL}Y&$5aa_6WLCOA%i*Or1MsF5ir5oa)vM;qp4<>WLgLbS}xz+H%Tu%Jyn0D
zpAWu%JunA;Vq--n6x5lU#FA>2M4_%yUH_VFBhmJasDcY#rHPvC$q%?vzUq|d8R(DQ
zR!L$PA7c11K2G~qkE_Y8i4iBBaEiq!RlyWpK<^Y!U!900Lwwi?gz+z(YI9GU8ohHQ
zjSNezSn7rmR|eDfvJCRi;+9_`@~TbE#C7$N!Nv;z&iWPeA>5{7elE1Tx=Qxc=V{DGYv-|BrNv1>NgS3$cMP`kPRis0tf7Qy)Ct=0G2qqBMFwfW(EpTl+3
z`bf?(^?)r^y)%_;)DTV_Agfq4D*#nXe}9IY{n4T?Gc(L>Orzg@aG3(?gR|$k-
z#=xIMoe}}lO0<93nB$=9t2YiA8-qWu$>tx9auy^!oVw$ajcO@<`Qyu@TVj+9)l}tM
zcMB*h%v%>JW!3cc%VElI1J;2$ZE9=dp^La8n5nK)i5_;WyH88X-G5a~@!tIV-L~@z
z+EBBI3c97?He5_zScHW~HFr4uq<`JhXR4m_tHXZh*avjCEL|F!{?ZAPt-SVv&P
zOF{E*-aZSQ7cjAx*H2>m_~3TZfNeLtK>0hFD=BOXMl}-wZ}hn0fEELd^{O^0?+Ib~
zA{J#)nvC-3he9e#1#NQHRgdwJcuD4JZrw;056WNb!fYy&0D8@9inY}R
zmtNS*i_lBN)7Mlibqc5(rH0#+zA>+)>f#N^#-1LarHznbQ=trD(NY_Eb}x0TrM2~l
zcL>Kbgsaoz^4x-=bSW#{kGC;);kk9|*4y&(x}~js&&v^_2}kAtpE*>Lbjrqb;3pM2
zB>9MI0GKxlyE~^Nq{}KBcj$B{Rx@x>*XMVl(!1v77@ZPoQ4>a#rK95+=;W5)B0dGp
zTgfvI)jI?3Mzp+={%Lkv2XHXKd=lF=pKL6Gky|si!{^C=bIjwNVeZ?CB~wGgwC^)B
zIZJ>4N`A8!U;=1WbhqH^hi~5udT{UGV0|5t#Ii`@FeIVAKG33f+_74r=GxT1nt2#e
zS4JPl_l6fgRmwwsb&%FA_R3bYcX$)X!FBf^ItOy{d2ld9l)T=Vm5h)xCPa-r)QCHR
zg$xMVYX^FS^oRgi2to$gxllQ5FDaO@fK#lzx4A3IkI2&A(ZGgF7Uprnl&W{`s$W?<
z5li$px&O?sm10C-_)!C3`6#S!5=L03o)Bq~dG>U)@{56uyS&i;~a6Uz~pr$})VYjnWcDnj48^WeY
z?rh3B5?Z2ziM^Ui67kHAOsuW*6#M9SnvKY_KW>bt`m0};asm3YHlF6sm{!rdwc5-x
zxpD_It@~4V5Md;Z5tfL?JG5Mh%FxPzP=Ca@vL&l5Q%9KFeIOdwKco{}6sxo>x}qGb
zRC0wboW5Z6-J3du_USL?6vTo?Pao>gnV}sNue_B3;uoh#RBmuK9s_4s1v>j$poPPo
z%;*PI<1LMiyO#R;!K^f~H-F>y(o0H8u(930`hjpw-Me?M0wav>dm+gg2|~q(YVKI<
zUOi__n~m{1X9oDNyh)O>y?l)*hGs>vZYLWXj!v@85xx9w{z$Y%NjtBOezR#F_jOt0
zv2)Z*X5y!I#;M+wHC*JRRzm+8!g=c9JQc@)vmqa}L)#k~gr=va#0GJP`E@2MY9)H~
zIhmO^kg`efm|}pW%0(j|tGRW`4IFM?d-BEpngE~XIh-}$X<0IAg{P&ZmeyveHk>h(
z^sb}Q`8y=ki}$%kkv1pw>m^Zn?Jzc#mO_-`
zx%R>Oq^ymX8dl6cLK|u)OmRBiG;bB(sOE-B$Eo<;+h^PyfAps=?tP&$d$Q%`q<}gc
zMxz^x<4FDia~VXk%H^Y@I?A4AH#OdYZ4%QfI#{Qi(5D4
zG5jLnTy0({Wm1XO1jKTlAtI4RK|8MdfzH)LmQLbd4M^_KRKIyQjgs`dLuZ>NN
zDM4R@v%Brbk4l{e$Goj1@42aH3_sR`cVOT*y=#K~}=<8phiA@BSZ;B3g
zo_aIk1XD!zoSD9UqDE1!13mEc%TK7ix(O;&^z-V75uKdDMQ`8a?;P=jiaVOl%)iRN
zFlB$6H>L06gQ_(n`^{9v<$U^+;-?JGeWzKL$!grqyy$+;xrvBp8*!6rQx7}c(JmXS
zXrb8L%0)fM_>=8PkwqcRF`PG2Oe7m=qb+hamyw99A{XkP7m^u6-v>tvdfMDH+vCts`ZFQ?jA8X|;sw&j%EFdRe=`7Gsl|pBn
z8h-W?&wVQ^B0$=6zHPJ#D2wdu?23_QDkxgse_EgLsK+N&ui6#a;NbS@k&%(|(#6~H
z7$)fKC}2GicZ7ua;}qp|sTBy@9JiNWB6ttDg4WNbdk`t|XWqwgE%H!kmi>rXH@o1TAvi4HteHM+K9Q+G
zlq2~qhyu}iy!qWLrlPA?a79Nhi*077sYo&`7Y^TdJ&aqegvp7~J?ERNfZDdmhL&3x)R;LjYYue$zA~0|
z&E!5^%HDTQLI%N?I|sqF7>%NDZ6ngt{a?1l&x3v;Fuk?rv!5x=B0loc|;2Q4d{I(5lDr{LrRnlxn_Dw$jx^GO;tm
z>7=8|L$)=R5mu;lR_I@{QTM0nffVV0ZrTOIYUnW|TBAVco}8SVAqNMCoxZ-=b2f(x
zQ=TEivauDD@^7VjTn72WYtW66&Vs5#E3M-TKWkOUjL(JKcf&RY{GH?`_Q{TP30%4d
zc2UmJvR!WJV#Rirzr%sc*!b#u=gkZ+QBxosP^U&FCL5tFYgSc8H!8d#7CDxZiKz%6?I82?7I1gMP*_Agx
z9*y)$s*QZy-o67`5TMph`^V62)ET3#N|WF0uTsOKe0;(rnSS4odj
zo)taZX?(vyFM`JUD<+Q=un!Do=n~5~3z(Fs{CBBOzwv8@=Fpr`x~977xx6{~#U!*f
zivIaQok+p=#gqK4bJ}@dZn*-u1z|4R^V>`dLEYvK0&+#Jm2)=(gZIG#h=#OeGD=ezVft
zy*y5H!^WdQvDKeh-qY&@yVcy8%@vh|yTg-{8FX3n10ZFQIteg`b_jpb%2!KbbKJZO
zX3r5ARYNZe_l*0i9ehI3fFK3ZVRHp*
zqVnsqva_I432Zx5J<`iOF?Wl;cl)uZjo7NfCatJ{jnX;yeWCtpkRZlS!*>x#kMkv~
zzRWX@E54wd6*uoX9k0jcSK*&EBE#+|KC+}q
z3c>V~aQ~y`@9gY+_5-lXN-9}%_$otab`8+19oL=KSctOCIOSre8dSR52>v3{N}J#!lyUI!$CJFU(fsJ|9F&d
zQXVl8al^3U=lPSh^V&RVW|9xQ?#YgKBH#~
zG;*bQMmhk@`1X(xo40G9t2KX_fgu*u9T*ZohCmVkVIW|JqtKy~_Zjb33$Bz8u8dHy
zz^lNK`MTi2X_ZTAZucgeUgSgEHR{!dr&MEKM##7Ym%R#S!R6|#<_40=dxjBCV}4sq
zo^eClRfxXBWp&C|6YXU
zYxBxbWC+-S0ZaF0I$^{G7^gZLr!s)BVn;+obh3Leb8>udz?Q!;ou&f%Z|Gcy`7j-M
z&(o*$hk7G9z!_MT;yt!DHv=341CQEotGho42oOL1NTsO^TrXtxpwVsajIo7<63Fqu
z&J;x+*3m<0XmiuRxCdBPJ;7Z);WVDd??^mM6S3*9cW2o5*-j_H>+qi8-uJ5qv#-nS
ztN7yn>h7tIy1q`h=v+2IcY6AUm}?nb>_K=jjw(<}o`Yqir19ayhv2w^mjjeY&c)>x
zw4K2HV=>oXVelj$AMU#DfFriu8-yJcbiuwe=-pzhyhQr;FY_bH_=CFNqxgcGTN
zUyd?cV^L1fekEYc6W|4*@fc-9?;zSUp8n<^CT31sFYOn#DKC1@Q
zhX!*3fDvklU@lnFf`J632VTX5)19|gQC{`Y*{1}lOa4l5otwCeW|rs~oFG0=n5Cgv
z)J5HzRay$3tRhcZ7hF6%zpA>r@U}Kdx8J=NKx>_tFowx>2saT_veWHrXWL&ZC%rJe
zcaOmrJYOliPtB7p>bV$6MMZV8Fd#JZ>c`}yoAVR}kbUU7CRY>3xoKjLw3VP|CWyMG
z+RTD8uE_uqw+ahA4k~B
zm^^CmXHJ$nJ3B2ZDl5g_;?9Ew<;!c}2KemtYiIh43bUYV&dkhIoT4yXrG=%XAZ$!Te~!GYW&hNnFV)V0=WHP{bWpRkwbd^^gaMbB3VeTtA*qu?cSh8L*aXj4<1(3q
zXxmOb{c|l6>S}5~8D@4Bi)V6Py#gB?4_y{VjbCY;oi15^b_^CES;&)|!`~dcLnnJu
z2V;(7V*=%%Xa&FOvpz;)bApfE@`vw#Wd@Sg#Rq{F4AE119X<5S$Ovm(T-?@T^YO4^
z{yNZaANE9L>W#hkd7Ni15I+U6ReXq5yGxuVwxfPU>rYL6eS}xbIeq=x!opFqxk!x`
zlIbih`mCT9Ll5T0wpR9|dz*K>mrTB&31NZ8WcBbNjIlEd9)4*bBk2LdbNFXr{V#V7
zciDte|p%Lb9mbKcn`b&J6ZD=F6Tr%X!cUjm90L_ARS+9mUr8JDD&Y@S8eL{A>3h#
z{M9QEmG~5Nw1v(B-@=_yDQ{3oAX6c>1MDUnV-SAe8Z!s-z|H}Eq0w)PYjVYFx4pGm
z@0eJ;xeo)fC?!eR+
zj+X<2)7#s7yo7Ty|Adz^Fh?Qlt{K|(*T>T^dLKZChvvgc6AN=d2JRFzm-Ng`{5d`8
zJbUYTnPDaP`&3^c9F-;Rx5-I*>Qp!mJA|ujKW@Gr^UIN&hPB?=?(`K(lq>PLS&J+O
zsZ!)9`~&ztfJMfZk{?t>sj(A*#B}+V;Qqo<3C@<5bpGRPv3o1#EmbzQ0L&7OFvh`4
z&pUiR{JT+&W%8`Y%@yvgt*xIR(;|#=%~@Vp*c-of^!1kdAKc#Z-CM0}t$TkmL$JQY
z2r!*TcOkCvV%f>X#md?F93YZxtmWg!NWfEC)HdknJd@i=ib)Q&>BlFED1R-%d1}Yr
zVJEW3ftAOhWGPz$nd;yOYX#Y4*cx$8{rC~*bmuI}Rd!(Cl8vh%w~{LB^9>c9m#xO8
zrXLRv#p&ZnK?{TkhU>;}6%a|>_BLz)m*nYjCBpb1KYu=!`CuHS%#~8Ap{{-tQVm0|
zAjb21r){Sdm6n#4+_rjGd?l$h9Ez)DaO6_XLg`HY?j
z_&sbYXIeCxD$XiscTOTk1mcOh6`zbQcS7L$aZ
z2ki$tKR-Xoy>)1@y;#Yerr+D!^^lg@w(Go6`CgV-h9xHN38s;EQS)Flt=|S5g!|B;
z013CWFaS*h@Sp2Sf*3wn;aF-)j6dO}y*}JISWNZ!d0a4J)-)WfIW4Kb%{)+AAd;#U
z=#1AnczA4Ty{9?2Aegt~NOy+it**BIML>CtKI>N<>r9N^1wq>8ZAC>A+#+bwNqyF}
zssqgoVe;N*5Jd&D;GT==X<8Q~asaUb0mBA;hRA%_`9s5wahDa$L?Kn!$-foMdBv
zsS<+?KE?vrWeM8_v;a09(YU{fpr>OVpQwpXwi^3wKE9Cjbh-=za@
zQLd6v{e$;;AN>oxJQFL-PbuQPbLU%8>0%LPeb>ExH@4kpv!p0nwFl$s;OlF=3-msG
zVZkA9NpvFGDcL@=-=IvP=q>2-&k_>y%gSynd&vp*3hd8JeBv*SjAydY;}7KcewHbot{3r>AE7mQ>GYF)9`^QR0Cfy2
zOgo^mzM;{#MMR<#6E8&Nk*SaL!l~aIkvh_?25U+jh9@dAG7`cX+W-tcLn@31x;fXu
z7l6j=$vKDCZhn5v3CO&^YVD~v`*p01Y3vz>Sja#QJp|+iN+y{28$jGxlKXZeERmB;
z-j&bj0xKaO)5Qy|Lta~+nCG5V32la##O-vp_nwH>v$o;g$aR;(hbkp3s5xOi1?l0&JQQoY}snGIx
z(&SoxNY-vJe==Wf(g>8emZEC;-~l0kJt!``x&i^gh{(v{kdz@p!npuKrPymS`ke?W;|OiRU$
z(;**xvpGhWJm2F6#X9p7B*B319V;`$A9!Hjs|y|q*VOYk^L{*vtV~~OhOT*~_7C`9
z4~z}on^)$1w&n{qs6;y(kbl0tNxM?7U0+NNdXG@jyK}74yOl%kB
z&0wwE%7W6;mZ0{WT)`U|g_Z#UjM4IhaQPsFgg9j283YKz8n1+(+SaJp+S#R?>@uI^
zuPv5U_DCK~4AH>93=0Yg?W_n*`(Re;>gwQOf6p4C(xz4z^)v%ith_p$uOuZZC3S+g
zSuKqXz_35rnCt(fB)M?`-(sZxyT7tjfr)DA$OLAafU?2RNn`lpCF)dQelg@ui`CYA0FjA<`khr?JxoIK8Zn0BF=bW?z
z@2oNfg7x;Gn)@u2&hJR6!z(~2Wy;0~hj8-xV~+_-Ru5ooy-ZJkTIevpx(b{o0$5`%
zr{Ll0=o34|-d2oa$PC>8+IO`8V4AV|6^mPG{V~o{
zoN4{C1v(elIXKq;WV%F<^a0ZQFx@pIiY5MnNo6~Yr?1_ymljq)RpjH9l$TBfpgP&IDZf!Wb;Jf5LHqeBS
zvE95LMOP3~;)|XZ=AqrqQr^Vmb0giYn+IaQ(l&e}AaCv^Mq)-(o{a09GbfOo(5usJ9=)hhn$keva(15o4B93Kp4
zNX&7i$g(FxN#rKZ7_+J79B7;)V;a{$4(;>6EWfCqlew%=R%oiT1
zb)Q#&9oi?HlCmC|-|pg#XHV{v!V1UN5kBCh;QVL83BAeh70S{mlInB)ZkXf$w<+0T
zxARg`iJmH-I+e@&by7U?uWD*ifvgz|8CZ#JtN8|Hpl=Z(lKR*{>hYQk{yQ}mVSDbc
zVx_1rBA)HzxdFt;!?kE0d=tXAmgle
zu8Tz}f$1W=0v_}C%Gm)MDiAmFPo-Rp8UZO(3gv3wFlDKDD|6rh%mEw@_ft3?Vp?}H
z2iEnt3NFY_?);kMgI$3L{KkQmrfA(UB-vTBu}6!L+Lgqe1zIi#0r7Y2{ngMH*AH<+
zm@z|(Yha{8%rJHC%gfv9{I3vqS&y~TLE@Qdea1?P33`J&Qs-Ffd2m}zL*o}TFldmn
zC3>AzR;cIwbJAoTU%ns()L9{&0Awpo2l!8sMz(l5!{y5osr_@uyWgAKU2gI4d^>%`
zwB?8S-J%f}$m2|Vnq^^xw;*N?QC=N71_mmcOP8vut7V}V$dEzwLvjc=D^8G6^%}*H
zLIM&${&(9+9ngX054ng^^isGfPa0d8Q{c}_FCbk?{qM1(Wv=0?BlG1;Wk}owWn&sE
z#c`kcQBt_{p*J85TtvC{g8H+T7QC1C8?J}R8>-v!Z`J|kWi%1OaK)GkIAqPJUl%ui{6=ob~?%6^8U6dH;ud9
z;43=6Ubx;a@JR8^6VgNCFP8V(
zR+A}K#rJ)mscr%M1fGKLO|oRS$yuplyPM^;Uo~*YdgAMw<==%~8J3;#5LJf;Ja!}I
zm&IN7m^9NZJt-91CAdbrLrGJFib(^1`XbAV^$B%RBBb{W2lausKZgVjL{QSvK
zeF+rj&XvH|UZ9Nr*uUl1I@|mx8R@34{lXeaY*1z7F%y7Ohm=PQ5b)~8#wf*n0+Un7
zxDEg8hKH1%9(;9{wDXILS78m~7+{+h->K7x(3rgUla6(Se?Ifmo;r_rDajG5szKBW
zP+8C24tLj;G6((y@8q5joR6*y+A5pq=#V*$2l&1V1!=wgx7wdUtNp?W5u-E1G$Es}
zqd%Q0<_9^=tH$tx_+w|JA&@^)bT|Au2rr$P*}
z#C6GGJd&ncj~%su${92f2K5*xeEYKcatTEbn6F
zlBoCZ-o#x%+wQ40fh(2AV}R#%bM{|kE3*+J-i$fU;*lV}U3t?X;xS?Ex|r}|^7l6`
zy*}~((%i;ND}H(WZ7mJk2%*qedx*%d5GS!+$r(Be
zCtl>+qlS|m{MJo!`|(nHHX{Q=vT!_Wxk2nJsbD6l5qmFH
zA68(BB@frSjsaTsLFIG9?#f*b&1|+1mgOY2;n@Ja9;KtsESp57#24@SOp|Ojr8dXrtfLjVpe-|6OF1x4?}73Jo{}tQUAs`<|+)se$!3
zz^az@0P8^_kp(3e>|DPlQzqv-nIK=cv8k*DGS@rj>v4_U?B@fh3mI_{;3GgTsM%Dm
zirrKRjK`2YzIp!xE$#PAY>b0EBw
zAv~G)`SAKefci)yU>#ITOH1&uz(D(6U1i`(ultZhkt?GTPy+mKGWVUk_<3Le_F%q5
zvBYlk(=u~w`(|;*%wl<&67WLm%k1nPU@cc-9&ldB>+$7E>4XKBceS-|sAId2@tYnK
zPW9KxWTTP4T%4S0K_DK6O$2XG&ugILb}QyzvCsH3Rj&>jpb=7yp8(-Cu<_dS%+b*i
z^a~XDE5Kr0hj9B=RWEy8Uj?4$nevxK^JPp#c?mt>{p*d#wc^rR13`T0zX3bs2JodH
zGJpm23Ej3`cWjsKt4Jsfid^sc;9WWBLmxcNAmextGP8lZ9r?7Q(LFE1TQAkhm<9l9
zJhN1MQb9z?+h3jT^k)>p;?R|~z;^KZZ^k7gWWw5l
zBdEYq!$LwrN3yxnh_DG_6C&WbjHuI>$^Ez?R8Oq{e89@j&x9q+e$3HR3+)+>{Mzho
z=;Bh`p~+95$LS_!ocN)>a_gyAjGSmx+ipH>z@rsGX
zmXut_Crj$XkkAIcf)EYK9HabUVM1x7LKWc$SUO1J?*G0pQdACF?y2pEm92lypFh9)
zb$3J>k#$B*3O&rZuOX%-OD5CuBlTPt?WZ|62N~q2dS~XSr}u|37Zom)mb$?XBrHBb
zs@vTx?IJfn6PeL1Nm2~K6PmwZDV}9M*+nrw5}-dhgt%n;;m*nN%n<04kc7DVJKEL^S_tgW)?)kTGI;E(Xwa#PZHR8xGf+M28ZWv8c*(oQ^eQHi^R}p
zWsq$B)y*Ko3>%aQI%e3qR2eN-7?kbzC3A#==dx3WHV?%JgFTe+jaoh-+?Kc`ce$Ad
zoXh44xe)&j!IkGT
z0eP|2$|V4U1X{#R(*g7uxii8o9*iKg0+=-uhQ|9zGBG&0ncSx%@0sE-3&=218JBJD@`Jvb%238$UR
z7JUe-bysh_Un2y%6EIVB^RP2&Rwil^w7}L}oZ$Q=lZzZ&+HY|;~x?TW6(93fqwQ|6G^SA&d^N@gA
zK?be@gaZ{Mv15~|Bp!{9ALK{zYHyXQ}y;;%*)0+35Y_I|Inx@>tV
zBn8eeXT2Txm)!@c`2sNkWbh!j&Jxi7U<
zqNxJH-Id_|qF`0~DQVb|yTZ-=hUD>ZG1HA0D_#nw;d!Z}EcyjVV*%$jGBZ=CGWrN%
zbD!*@u1;{bgOJ382oN?!f26@9frSG&VX&D%-1yB;U^2WHwntcA%EmdNJuf7n@B$aO
zlDR(~*#*wv1B`-R*{Ub*L!_9Xez@XIp5l
zVA9UVZbLs8_`qCCx>U%mnBMy9QPDFL|5nU9bR
zynXxj1^OdEdL0B@Y0-nE01V(RfNBQ>i;X6>`$ozqdG_RCSfRS2l^fSw_o}l6{kAPkf>=_hv>~cK4dSreAx#a6d>&z#&@gA-vtzz+3kzDwEV1A~N
zo&oYmDzszw=fr9adatPr#!9pLY%ugU$;5EblA(4StCH9mIe4DdL>e%GoBZi3C`YC6WfclVtdi#$@
z{*du{y>))HDk?~!=BiViO@J02UNmaH&YT8LA}%g|I95({U3ilslEW*qR6+|l2@WfVHZz!?FJ(i&Dl
zFQ1C=m_GrO1rm2aJf^cU%9Qh`@Swi_7;rN_d2J_mK9hP%(-JR|4RPzH>;illCsCJQbkm}9t{3ipuPP(8l<(8
zZ>uNAt2;1ul<_gkzU5`^VS$?hhhq5$+M;J?^Q62B8zD_Hdu0)^CZ4de;`|=;A~?-o
zJTD{eXR!ZM1UdjUeQ~-25p`HfDUv#J2w1jahIgtI%T=z-;^732DscmOj&3_9<9NM%vIYAG)OiN2U>Uoq7uAEd8
zP-RA985jdq;3a~Dj^Dr0ICDYa0&FV6a;FI8=K4-ZMd0T5{5oC4VbrTnLh2)PzSFRR
zD=333EiJ9LHh#1wK0~ZEU|Rzkk02CeP(5h2#^nZakc)&xz?tf|PrOVg@0x$S|NT0i
z%2SBcP7Te^7ek67vAwFfd0*(HLu&U_>t2BQUcS^mD~M%zoS!P#!
zOZO5Uu?S(v{vFofsKLG0ZmIKVs5jvEARkl6lJQuysa?ab*A7V@aN~7i*dsGbcK7~9
z5{yD_TLi2t9HyOYSeT+c)y>d$0H2Q!D%nW7*RljXFSc&_0ASaY`P=054dSsGSt{_U89U{2x0NDgWK-u@84p~
zla>W}5IBC4%=Z%L8i>m9rE`6BmzjY<_u$|lxW<&{c5X%A5V+54&NHQBTjzoKUU*sWL{ggI8_
z5OlV$GkM2-7EF6UH1KDt%`ocPu+u5QV}d7M@?exgVx^;+n>HfXD7B!$r2JT?WV5lf
zv|X9ZShGN*@wDYO&BSvPvqcHh`(;o8v*N41!I^$H%fQKO$gp4948B*i%h#$oPuC
z@+A9v(p*Qm43+{vG_fQAML^3|Y}>x;Pozc&ZLabQ3qnv0ax%_SK1<~dT3jjkqo}Ws
z*K;7#ZL%B->A|1yCKKJT0GjeRuXJV61hb+81+T=3CCQO;j@bAZI@paW;hn?13QeqYWQ
zHOgjP%C(DA02%fpVDEwjt8a4)_{G>dYLv#Bk505I$hA_oVZ<8-8
zA}M@}WM5FV&SIl^iY{IFcV_B%N?)%{1WeLCBLd!V$kNY`9_A?A4$g>(2yk^E3=Gp0
z@&W+gC4sbnfBwL*>@z&TL_kNMhlb9EJ^$;~a^$0_IdmSDXh#GFGWj%ovz6GBXQkjh
zVkD@DJN_R_X97-jyR~s5Lz#*sb5W_6QYqSI86t#K$k=3xsJ+Qdrp!?qNZ2x@LdZNt
zQOQi(tdhtqQwZO^JKuM`*LR(Bu5*gL|NrM%>t6T$TiMLFn-Qvs#ef|_uaA+3bW!e(
z#Uo>iSU9i&Sq~E0C0+R$aivC|G<~>A(@(lH;`RJxjP41B{#WryixG_~NsWHOOlaZo
z-=T(?q@cc_Gri{EV0H86ktglfjJZuItie(n5QOB0FZ>s=8-ViUt@bvt0>861&@-`+GVg8{pOCP7=*Hu5ng0TKyE8K{
zUc1IZl^riD#_gCe5ooK
ztU41C)+jK)$yAnuu)p$XH#REY)ov55md>F>2`^85uc$
ztw&)}CbA$upNn@LsYcT2iYbwMAB7W}{D+tHe$tikt)>rei=8B@uiO5pXzQH1#?Cu2
zo_GFy@`yX%t3ye3C#;zu2=9#5dMn9-`v>AZf<+f$2Y7IWS~%e4MGb;To^{ijv?6#?K6RwNw{S#%%q?4&8MuIR7x$l$c)=8xHdQ7Olp}=Q7N~oR
z>XBy&ouU8K3XT^9MquHWf;VlWwvG<^P$3#9F|Uh?P6k_cAodZ3Hf;SP1k{?Yt9ixg
z`dTUL#io&-OZ66O;a7~;^QKlVVYvF~=g&QuH;VGQ4HQejYf`gNS9tSj%W$Q>YzLon
zN_cHVlEJg9c_FV}Nx(i=BgujU2qejo62VVL!`|;g<`pJ`mY$wdM+Cj|y57~+GT`Dh
zMPA|Qu(XDT#`=eX37^?+dxXn)Bb<_W2H*b7;^9DrHhDt2&r_i(QQl)=GxZ}_k96o+
zg`qEoEEeGCImLMGCeTRwb53^0Mc|xNddNM_h3iy7sR^Q_7gJ5aasO
zC@(ua=tk)bdvR`1T+-Tu)hMn2VG?b
z9T*Jlu(Z6zIT$S(qD8y|TGSQeK=_jm{+xAl$W~(TYQ!b}3kCdYu5FwU=E%#fty{tN
zf=$^bU7*exS#Okx(b{{i+Y9vkx9oGIurm7dZByVq`|gu1rh;3v!)jnaSo%|^=za0hr9mMCWdtR-Hic%51vt99*3;e<
zWv1qcq>Fq6ZB2l>S%mj=P;9ES`0
zxWm%%5G&r--lIvB#h8J`x&|pyLq9;h6XQKUu;8EVJfsbuMt-22UN_}
zjX4nj0;z8HmE~jk*Oa0Rx*ZY{l5G2A{iSTG(NQ3!X2v5%Kv63#xc|LRC
zeVraHX;c$w!E~{*(qa_Non5)PkcF}F7Q8?b%dDw~V?>kjb>pi|R!}e*_sHx07*}|{
z?v1RMb_t_eLe9v1l&%y5$5jSj`yGC$5Rxk^g#tJX_Q#%l(V>DYivHQNDD!^xbavj-
zJ=@{qwJ_f0v+x*ZYMTQOMOx&2XDqAC9ct-LTL&Z#yD{Ca`k2dtT#@v~Q|t_txLQ!5BrFzP9y30MR?)%0D_WPd8TXvGy2vCqR+p{G#+o0S|8dB
zJ--dbRZqc$Y$I8cjS;GfT#lYU`f0Jy>R%8Xq*I6nKD4D+N)!qM%@~F`rNr)04u{jb2TaX+*+ApPpA{?o{ZGe46y}!7I0n;iB
zUFTk0ITd3F^>4rvnTPcy*u{$bY@{|ElN(cqrKRNuZ049@KcaVP
zUtCSW&E6>){=_!!5QTJ>LfT!QOJ7noHTQFoc_)IL2(!IMVEUm%{qg{Yu(8rG8E6x3
z4PE8dk1RblVt?zF(eelQBfH?Une{>HAwN4C*0^X@=?735$nh}Roq(f=n+
z^I)j@`Y&xN@XDI_$;a1~^=HOgyPji^;-UD!;S}9)75~6jbQ>&=uYKZXb2Cgk6=)Q>
z!#ad~{?3FwHyjw5XHwzGLV0
z`K&f`$9W^vc{G8olk+Ap7tJ~^+`bx18?bdZ$QYUXA@#&Yx7zc`r!}$+d$bq_Xj^~a
zP-%87)ZZVAp9#aWRU#n*iHnhv$uRPW$5TqX)Px86BA;pSf9|ugd(c3WAN2D(F!?{1
z0m^iz_37#9?aVuyc!LgIGd|ZX$x>|aoQgA|QKE{J@tcz)YZ}QDn4o2iE%VoP#fUoC
z#Vs`8;!{KsVEP(4O=3%Bi*pG14hn{EoahqksK2q#Xvt&4>JCZ_01XzO#s
z|H%f`l|p+Mh2wrk3T~I!dRdakRh=l1M3~L6+RvoYsjSUG&lRR-T6Sv-)Ebu`qtBqIl3dTy-WT(nfaIK5UHE|->Ndw;?{#q)3G)8p7U3_`g
zLzBif|0(YW+~^>v)PsZ5nny7yw7`U-M_pj^ehcJ*!q+%efET&=+Ng-Tt**q3QP||5
zQZfawg8z>%L=OJJlpQ|MyH~mxEwy6um6(<)D}I0a&*u;P+wGmxIV;=Yyb)x|n90iA
zk7#O|+{`=9#IG`w(xw!z4D;-C1Wz!T+=F*Wg(>-r=-IOycLp?9A1Q%5WGalz^iMbd
zQLMwKB+d2qlb4N#edK5_OED$WZq5BKs&BBA`h-##3XQEz2F1DXV%O(M8dDPP-*ez*z3
zoEIYs*`jHdiqb)UTO*|dL!1hQB!2MVB`+`eCt0T`gBhgQWmE6KF2<
zN}cBIi(V*AA(QY>v!*PU&bP0Z^X^d1AXAlDyIJ~lKyVs}S
z0$g8)tI!OMr2?Oz%Q`yrS~}W@UPaJ3yN*~ldiF^*Sb&E>2gjAkYa^~}J-~k_27r?PuBmcb9BxaaI_j
zvXD#P;!8kt!KH?nXiA4&a$baWJ1xX?_*UO9FB7P-L7U@8P0*a+oW_(NPSNT+?)f&0
z>PM9l)bbu&3AB(ZYTHLDF+ScBvB^3mur?vEqGbvVe0+n7{(ZBk74xRjhxb~FgW86Z
zDCS&h2n7xeT|qYp5vx*93^piA?3FH8v%-B$)fXzJ*6S5l@)TEQmX}NFM>k9!L{(ih
zm7JT1P{a}(W$gopSq7yk6v}24XrZ=oqR}Vs;v6y(SEpBIl8#kRfzdq#$-Fix_;Gj+
z!RMMQ9^9=5U$W{hzgu!JKq@oM0LO6Yb*JZ^drO4~SFO9^8OuQT;;Y__0{Xzi!NgnN
zLsA0nt`EH`>|aLOKT0sFE^y>$-oT^f3ubqI1t~Ep%Ly`)j2wx51fPoYY)Zs?VWUxS
znphcgLobCej~fAH3LKC%W1w0(Y7^y?Ykl6B1X}Twt_iO8hltmID^WQ@3!UbFoMvcs
zRKc2QXO4r%L#aLcL%z8wNd2|_8Oz;_0MQ_@t>11=?hWg*pCH>mcj&=$et$((;Eqkh
ziTpe^-yDH_VWw{+jCkVGlphRDmuysa>dVO*A*K36DR}4GoPTfyM#hXxUcvf?tSg1N
z2Q-8bY=;8186i1ug~U4eg0gMnPBR_y7k)JFwJx*{i^CE-re2RZorK=HXw-VK_Q{~5
z2((W0TKFZ*w8jpHlm>36XYg+#z11Sc3V#>emUeGcYA_wr+p8s=GE%J}KttdO=H>8HHEZ(FuQ!A`vz?Fm|{|$-gUUh0KQr4r&^2?$0jCpo@egYEC0{e
zESGv`j(7o#xIcDNik)7zz?5XXhKS<3G_a>->)Fm{ldgG1i?$(>VO*Cj^9
zM^JJ;xbi=uTkOrr%u{y=wD}6c-Q^lw@5dVBd1vRAY&f3JnzXQNB5ln$(%L0|^6Xit
z#!RW|+#=;&&e7FC&(MaV>&nS4JrO<1ZzPEA0Yov`d%TX
z#bp}!oj2QkMfvXaLqzED51GlrbTnURwV&aJ0c6wCI)@vOgjlVOv0A&s2`zzf+$%sI
zvU<`mo#`UpOy)JH&jp`Lfu`E|$z>k(f)<;d*h6&IL@&B~1@CEs~
zlt!TBIYBaKsZ~xoGu*$UlVs#=&v*6JyNe@`aRI
zX)}LYj~^<@G~Ju#$_ZX-blg%ldiZX>(t4Si^l-2S!29I18c{b`noH-gV=&>3Sxe8v
z-hpTKrARcY9s11DEX*1E2u9Vdq&zC@$ct%Nz>9%!+5
zxH*3V+;bXAm!IjW9i|VrR$KtsN2e{T?W-WeNbSi8A9!qFH?OY2odcbgNWclzB`nD>
z$UP!u_&m!eySj<*IIZEb3Z(`gCAIVFu(Lugv)Ggkw#MW`Y^K>(1E+wFNe8s3=2+#N
z%tG5rTT|s&2}Vv-tw`Md{Fi$KI>Rp~)?vD>rqQ&v@Lt{~UokR4_NXsc$0{U4Ba~WC(lW&OeUPh#5KdcbnVMRkms(EU3G#C8+Hx4U4e_G%Wgc&`q
zzJyM|+~`rb^MG=S9Cen~46JrDI!^Bz$?$QiQS*$E+Q=TY?OwQW+p|SAzq%>hUnRQK
zggEZ@er+LpFb_Jt6;%pfVT&yNGH-qDnl|n*3uqgUt>YVS0*F)#U
z+y6uT`A|9WS>dVt&IsMcXU2DJUUzwD`cKBD08A<~e-nHYXu_DIL5l=m&6poun!PH8
zXL;!y*G7z%S{5!$WWIMc6)VN<3$+~2B`i+(gL(M*RbQPufn;lAb6Rc2)Cvw?R1K8}
zR(K`4QzJO#n!w7!yPI5)*dJ7|Kef#@Ww_lqr|0p|NST6%S$1R3#KYYqL?=QXXA6O)
z`*{Sn&eUR@TGW{Cvy_Bk#5<^?TNqm#6BVZ_;oiD@@uJG(Cr{+Gn3np|pSowx4Z_qp
zgdkSbl?`k{;;5pszZTgG@S)ldMEJuKH{s-CTrC1L%(pF@7FQ-dm@M@06F_vQ@rC>g
znaC+h#FI-
z7WbYC>|gr^FQ4(<5iT42=@!}8H{NVB**?>O>WG1?jSmWV0BXha&zxstWUnNo$ZH8b
zt0Ze`Q%OG$(zC?m`6b^vXuZ4E+f=^G=nxHSHq@
z=fW6b7WjT{^D1i`E{eBLa_E8WCjE5c9)wIGgX}@AxXCv}d(UhWK<`~m;MM|%%>StT
zw)0Ku1z~}Eo=fc;bs++)goMOuTAFPaJ{mvuH`mY2dhcx#+Wimv;H2Mp`LLCAm`(er
zqx$k9+820XU=?vx$xp!$hB~)`HG8D}dwJuq`IbImsOP9{P!pjbM7t
z`m?;)-W6p-J5}jT1cyhT{6~J0?sEUOdG`_>Mu#27FM*N3&U_2f=;`XymWq69InKIT
zhT6%#xuc=EII=~>+(eH*st#H14X~mec-Yd>5r|WmL4`mOD>*m0k(G>euHlQ090gyy
zk-RQ(;4(?}2z!lQFcu;2V0L$uN-=qEsFzKF-LG?0s)RwmCP7P;WX68MuQgH)a^JO@
z{q;9w{FBby&>H3iY{2#A!4LwMveT>0JpjM|nwZ#)=Z31WBAUAg*Zq_nbX#`o!#i%=
zU0|z?usTW;zQWKwyAFM;uHSREp$YGaXw&SwMX%N!@}UQ63rmb9(>eR53Y0l45$HuK
z0@0BYl|m0%`7E1E&+KpLEaWI-Lj_nn#I=?6QWC5&%i(HVY&3T)ohVI3#ijG)GzJ
zsv0P?sv@oWkG0>JU>c>gKNizQBO{|KOo^%eUKy&HQu1ueOQDY6XUOvg|9Cp60i8^;
z$ztEiI8FZJc;ygE;3>>I*L*32cO?JoBlQK~h_mA8>{P)3SHVUEB&JmHM-vk8Xy65f
z=>ptTs|Xxk7%~QlYQlZ<()Er)UEl4&EK(qO{ygjk2v6;#fty(PPlXVqd-(+IJwB@#
zzni#CyrCQeS+4nbZ@BX`RRgNBzPek8yjh6T
zX9<48?bV2#3D?>*ulPx?EQMw7*K9
zIDxwKKHZL>Exe2P?Ar2kUfF?!H+A8;VNQ9uS0AZ}&;EvCs2b&b(=3*y;P%v_vGnpn
zzCjqsl;ut+joYbNFsh^#!FJ@*zleCz!A{#1P2EHVuLT4IyYi?ZdgE)%W&woY64lD7
z_YJCxi?++_x-OBu`%s9WwYBn&zA|#1R#=CxeV?+8>!!HLRNXHt
zB|D|^%2MSk6T_*QAqjdcGL
zdzTH0<v>OjSy=o!poHU4{*-STKw(b-WHkT$ut^IOucTA>Tm(3;{_i750H;irz6?hkDcGBEXG9HU%?a+p$4{GJVo-u1Us8HvT^rtMTcD=T{cEdhZGfs3WN~2K+^zcIQ6xX69mG*UsMx#
z65^iA#z9$QR+isagBVU_%)XuVEaGk@^#hu!R&H33F-DNm>B-AGJx5CF3mYM&=%cHjWwv7+*=+<;7C#uQa^OR`@ymaUQxTvw=@ALt71yu_>2-At4Ju#)pnT<+XE`ajPd7(6#)Xt6Tq-+D
z)^~R|C*B6s98P+izPL}~(k4oTBupahe#q|K)^_ubLX<-f2CJEwL5=jU>XQUo(e+T>
zX@J{6Wxa?XCx)UXc~cDf&ts-mP*h#Mb}bx}n2Enjvx5Z=KHg4Fk~>4c`*jcUlu}lwSB&
zp2+rK*v-h)lx6b|gXir}>(s2MezC@+T-mtQFzPjvLy4z|L)wfNa7j8pgQ*
z0WS!R_=p1l@qnGKpyFAObZkFOsWneoBGEj|=7IlOgbS0w*`qJdkh|F9n3M9hIC8Pz
zTATC0(YqLDb3PdvSPa-eA4r0s0~35Sr+QpNSyz)%0{iBiQPn(t^hg+qB=lv!Ut{7E
zg(&NTjWSi#0x$y`knVkCqNzYC8{D!DWsC1V$XaCw-apXzKeHDKdi0{D2Ca;TM%l;B
zSSZD>Nh(J+R81ZbaOE8Jg)ImsQ5aaT^n!<|9|trIRvD
zM;ry%yQH9U^?Ju_tPQFeP*oCHF$|>_W@6eMebrd+J9_
zx;i?xecH`1ol5PM(vwe<{v?c94&PKgvYX3fFI+d(KiOOA
z;7ow%TvJ0BdrrtEwIi=_@S40_%F>m+~opBeVF?`&cmw4=h5Fh=A-ceQc_ZmoL^{o$@E+
zh?1;`T2t!mv6k8HgQL2X<0jWwg`Zrjuvmyy5K3?vcccCkfK19TClEvcOmuW)qmh)6
z0l
zk1fg4QnR#Q3z`xf%U~@lZ~$=~)+*SWpSAsx+)#VVCs{oRKGnjW6Lf_)hwE|3hv*$`}dn9?%DZ>}Ea^^&li+&H)Ka
zh-oKC58bYN8v2SNOR^xuKA|$#L4TjO2bjfYb?BkR0eH?3Sz`=dX(Cw3Nq)%vjAPHU
z=-dl=v>m%!)TSJH#9F*R=ZiU5TGHU~rK#q2GfXjmZV2rY4PN!W`-?A<+aE7SrcI~V
zuJ(9IkQYIZO#Vqq|H_8~Y>QIuiiY7mvsdRZ9ti92t5X&5Sa$PP9QIJ=lq)0pkSA1=l``ydJ%NjA{D|9B{s%)xHz&-dX9G
zv#mEH`yZGCcTmlp?4g+sTyxWDf!T8}-i*1-mC;k-evjhg@olQ(13{hQN@6UzU~ijG
zvPF4a_t4x>nHa>4mp
z;6M#Pp|nGFgZ})<@1o-39ch+%UE^6cP5#y9H`yG~SxJ9{a*TNolu>lbyz7i!Y1OML
z`ThQDjmM@|wl8-ZXab=#wQ^IP^rKzO5f2NeQo=eaR@}7|N-&1_80l!G-Z*9K+xUI3
zQRN48d}vYs<~%!iR}#$J$#p$Tk4M_yX{>vv%ti@{B=DCFOvoZN7$X{ZbR$k^`TO|a
z1O{%z;0eMQ9?s@Jl@}7nM&iWl4^y`^&fJpoKvR(YRL1$DYMazK4o_c8S+PNCp!UGS
zd*D+CBZYm5rkeFq<$IUP_gFM7OWEmp{6RjjWZ&y?EbV9c%E);s&6
z>l*l}lX0G9=K~aZ`=S~(x~@C)j!ms}Y*tOx2(k+B(6L^vGLH--i@}cyy?+ziMhvN0_ow+Y7;fl)CjCuDZnr_2Q8lGUb
zOfP;Oo)5U`_!V8oA6@J@Myuo~{acq+fbjS?^5%$)p=nl!7uBK1%8#>;-&PtC=juaq
zEeF`xKkd?xhF^Y4X8&oC^U6*R-Mb}j+~j9ODVQrh)qR<8=KIt(lE>ZL!>(HSdCU=!
zxm;_jn^&*t{nE)#T3>zEW|ZznN2q1ra?jv)?4JJ@E5WPY_pR*vyc7gAJ!VB
zye^i~iSc=a?Xr%=CD8iE9}ix2bd^=>03aB+F27hl<@nw3Hfk*p##lxUfuoPO6o^p5
z++oS->5QP%Q;v0ioN!Sb*58W)7m>BYl9EXWB_zDo5RJA5&F$U>0S*i$WrzUom{G7O
zvFgOr;rR;>cB9auVUiB*anmkop;`jSV&7qA#|yGK!oXcN(xY$$0U6jni3EW(t10W%
zb#FamE+=b|VcYGSzs9XuP)N`Kz)3`u?Yl*ZACYjOWvu^sY~O#kYl{v^{kMln<*|VFN@NM|a7h;RrKF-e;iRnbUWOk7qeZLp$1(@f
z6I>RS-75#$3YUwMA;>*1O%9SZ^M5|56vF$Xy&1K+1`Y_EjZg?{_I>vF7&nP+pO|9O
z8$Vb<59YH1EDK+URD?(K!Uun*e-DHT6V5+T3P}+=FVDiOZyT4q#pgxX@uT9$BToQW
z0%aF|!qE2@JLg5a7yEO?;r7M_fyFfF!xmC1`o262ek{zSLKl5QcK9?amh9G$?aU}t
z$~tpWJ8hD{1M;swBTY0esUx!1T)ih--xaA#@$w?_!2FWE)ni3@*RtE_xb3>~7W%hY
zk+O#56N&@*dgqW5UtAPPYyzAiurM1^K8uLdQ`-jxc{QhpDatXlt;1;>-?oqk7c+OG
zVvZcd;y$w=MR^T(pEjx2xJiYX(3VzDDz}7>_U&isH+})!P0r=GpZjPk37Bx?MP6JK
zq#lvm+E4QH^J`<6G6+k83Qp`5je-3wHeP$~y=`wdl#vUx+m`anaK8?1&VMTEtWAx9
z3(B|qQt0|KX#JcPf6mj+HbIpAy{)%gGG(0ycqL
znE+ycz|k8;g*h*pWQcdjP|J%|Qd&la+@3<}Hw*goipNdgtqEEg)gTPQ`0U>-l{0a-
zkLhp%avyG}k-8q4F$*+~xzz*cf&xXkL~wQ?iz
z>H-HAR#vJ8S<^ZB^JsGR<1|~Ux#b4Gbq)e(Z?F`AzJ5dR1?o#~2yoS#$c-v@8fWET
zqW<>dU3^~=S^px1bO_iT=m`)b^jtTUmFk6(ncAdKzoM(RnXAubL7K3Eb%e+;DNWYJ
zR%NTG$wrnB5yzr!<|N!N00wBh@3A)7JR8ZSi5s~Dp?HoBJp};4Q~yE3@8aE3tyr?t
zIOf8xui|W#cVr(S7@Ul2(b;B3tfIE$CO1n2mBUR8G^D5
z`!r;`@ZTB~x1Iv{xzHP9+-%8Ho8_pqvTfeGd57->ucuflmPgZ4RTpu%0Si&VdH+w4
zm8{ycx2+O0UtHVxjH%tUKzg00bkbInuO{b{aEE#UQU4gVYZP1s)!n!IW&eFt(M3rHm8VmGaM}0(zdGt%^_IQkT719>dh?}9&W#O4D6O~D+;ERzO%tw=%;k&
z!X2;ahj3i-8JSOx3AYK}Qu{6CraE&&MvLxos>-YEc93!6c%=XQJPD-}|M462kTcli
zBMh)k6o3j0WyQE?Xh8O-3=?>bWUi?;y>))F_;3`dwV2d@?hJZ}#NF0?aLN0zRtp=(IYz!L!z>wy$SEJO0hKSABAczGZ1wku_)
zq`vdKO17l@n%?S_t)b|T8F_^IrSoqm%8lB{Eq0NE`1ldj4gDW{q?sA9y2*`J=cu#$
zm)n!r#;orDB?(G9*z*c#F_<27CNs!3%_d)JH)d@9#e{(xDk|KY|L+*(d@%K3kV_uJ
zkq}Tlu7I9jgXUWiOilU+8~<%MbR+X64Ky#973J%+2rtL4RgmG?!evKq-!T4-CQ@<-4mu2jE!pj^=?kzBvu?x_n
z8kFt`lByT@_y8sH3-|W<|Yuk_W#qFu?eon=V<72UUE@8iQ&T(7K`ao5=pUq}(`|
zr0Sfr#w=a3?P$o42~D%ql!J{FZ2rt@`cRguZ_nZOz&{wFAtaY-w6$v9%!`~5i#HVF9AbG?lQOhvrKUa^_GKgReoYVTMWK}i
zoQDM!t7#JROxByN_7%35)h1hM+^EL+N=!A_!7C{Ez{%my_T;cGj~%y(Hd^A`2#DiR
z``^GjSv*kv;RyMxI5&C;6q=n5C)=coviKsyeeNbvOrS;IeD{iXbBkcFRO)GI4>Jl-
zGiFWBy%q1%i6wi>b*x2wvc+jg=H&Q&m#zaDY+`!y%Es`T&8*(dgvaMzZ2MA6v1MXF(IKuG%OTWH~3Pd
zS>D&oxi;BXT?TWw8^xw?r5Zl}{++8dq$qXKN94r)wWzDqebi`Ub1EtX9rq-hx%xaB
zyQa~*7n?#Mr-5j@=R!ul7{Gj4F#$yiu-!ASLMy8aTnh5s4DpkTXXKtY2NR_v$m
zaIHNj-De=IoIili>yXR#gZo3bdb9BaZ%67p(%8^oYlPUD$$}QoJpEg7&%t+Jq@@8G
zGQc)Y2HyY9`+i^cLuCm*vH>t-8O=q>9c%Yj?Ve{mOK+pSqkB5)%eYu*?u(lS!(;qr
zrT)T9!D+kieROiDyr7yGOo&H30FOg2exdD1@f`iKWyC)i)RYr7y6umR9&}ufyQ&Q@
zgv46Jfx{~f$67M3{_75bpZ2u5t1e=hHFgoe=8`OIWetd4LqrzN;QCw`GF>FHTyuR%
zj-mPjHpG@l?sxqSQVx4thSP`Dw_jJ&S4o(dP;f;!S;P(h4;nR0cz`0o!)UcWX7D_J
zjPz^=-Kejc#d`TB8ri0O`^U(fCl;5yfND^S_ps3gm!njr&>_yj69lUFzpUve+
zBP^F5{v~C1W2Gr>1%RJmc7;a`FaQ_%x<10e+hZJOtE?L(m
zBNt~vBUk18)TVff+i
z;-J7ZLW=m5Md(rPM=ufN5dQp#bD_EPvP?c(*q3eadBS{+{u7*$e`)30U`|kxKUc2o
zej;>3q|QK}3kxkU55V5=r!+h1akA26ZM=d*BV!#arI0Z~%&iXXi`Q&ds&19Xgqz2e
zn`BBMXURbTOa2L&f>#atqpGm3`F{)HtQ;I(!2qs^#S=bRQ2P3_eYpqSsdxXh9$f&(
zZ6e{-pNfIKnynJ$t|?oHCUR+Fo6v^1&TFJ?LBKRZJ!Q)$kCB(ODoj|iDdrc#JMOAw
zou9}ik`g1ryKz>I9S1g~y*AgWf|=7%DMw6H-+av#x7hAhnQ(JsmfG%LA8?@6d^i8v
z7lB$VkdnxIFd=@qp^PEna8kyWo{%fXR0#YIo45rfwX$ER)k)(vZ6&m6HOH!6yX54eNjtV;R(i`Y|4C2>OP6X^5awbqcpwY*oUHv!FEhVM61~tJWRP=gw|JVEhCm
zD0DA*59elV`#PvYY?KY#Ej$mIo+oT>NZG3Nbol@005HLQg9BR)b8m%2gprkDvB3HF
zr~~i}#7{QmWdwIH-|^;l=&ALXZ%S9@#R>CwT7c
z<4++&lT%bAqs_E*^6=v)@Lf1sV}a;e75(|}6SN&i?Bdei^i143{8+T`JH4t%MP?=$
z_GHroHBt>`YKFjlx7dJEhbj#Fit+Tn_;W}wD!4VVRH7KxA4F@YrV6TSU0Yil=_|!>
zg*K)?aHk5o{n!FsG*GuXfrlYMb-qf+i(;j4Jlh1koBW3#
zYmX3{$y2)Jhvk39A5YgcRx?cGUk&XK=I)obi8mnL)o7H&$w~*Govzx
zu7CNxqrtqs?a_>r=C7~k3x;{?+6pL1`SS!;6v9k@=;%>WpKxV{s93ka2Oul6vamP-
zgQmdW+xB;T#_l#t>^U2drW+pt-Poiw$ar$2nIg8@`17p1iYAs1xd~#TiAhK(8Hw|)IhPnqa(GsYV+XgzgvuMC;e+9?L{xeNg1}{*p2d;e(h4M
zF``!Zzw>t3kwsN9$4HkCKbBm^(T#5jb7<1HmdGQnC9hRQE`Uv2h5p%STuqjwdSyHD
z^#QI;_{KL(I~InI%&4YZU3ug9Y<;YZz3=k#@gOaq3=u9;#>jkW!p$@@Z<-|NIK`$1
z#3jFwD-DJ(~RDhQ6xxwEmC#y&FEtj^e4_AWwX!isif4$uciZwueOU?yrq
zAR?&z{^FtW0Oj>z88(!h*t42723r+OtuL8T1Oc;G5Miz*)uFpv13wEgeAK{S@0IM6
zM4~^eV>l7eaS(M!9GG_Ej)a)@+|aIRSB`MeIi?;=DaR(Mp7~{xIP8MNOxn(Zp!RfVjVV_FBJ!cp}DZ!$pE>-vA>P4kq
zmbv)|^Go_%Xl!e2KTUPG&$~Rmx7ABW_K3mCalyr~dtZKe>i^P!Hx}XN&efml!&=#A
z(p96cI@%~ZTCcnxA>4cssobs<)ySQc_`30VzEt-?98C9eDE1|knJp^N9;lW1(rGwV
z9C}cbK8lOOuEbz~;bXO+EF}MUKHEJ`a%5~^n(%CjPb;1Z=lW_=@IW^+eb&TAo^v&n
z!E@w|^S?d3WU*ZcEwtsEO;D
z;O>%h{In}2ECQ=8ujV1L+ttB~8~l08DJEt{{T9}H+^_rg5v@|(O$m>#UA(xfH^;F(
zSK<>al{ogRwKvw8#h;=s^`VNe)JU9vLCpI=W1Bms@vQCzf#TnGX)rkp3&6*!ofHYKc=6g$^%&R_atAVEW;oS01cLlK&!pER6qNR+Uv
z2Kx@1JN7(r>-YMyUa@*(E~Fn!wt&+<%WX0q3*uR3r9p`HoD`+*zQ(@*+nh#amlOOC2T+
z5P4x}aV-4L;45@??BW)T+MH6{VaGhEP9f26@H0zoU!ZgR0cSBrlE6%BUF*zun#%vC
z6@$2Ggr1n_u`hVx;D@Fz{lKK2%oKijJCg97-Q6mMJygOB2vFfLV2^X<^0kbgxRL0X$V@d?dxdCx@X%IB$U@%rzNdmKEMW|h4RVW1(mWEjb=
z8q1nY-6DE-zG0!$+>o7}eF=pW;x9#BD+@}mZ;EbJ20-|VF@8=N(&&8-p`^(#vKv_<
zciHr2NP4DMaSdF`V7R@oP4N#QBXhrY8y6dlaMhSci`Ns{4}I&RuaW#=B%X#m@#zEt
z$!vAY%+-1KngqWY-A)%cs&kLdujfPiLzp5SFf9^hu@nsHvYi!~d^T1B-)i-=5AdOf@DOsD
zoE+<)l|%iFK>;;I2+Y=gXysejf^n}QDH)JSe_RgdYqH+SCv{fb)-APE{)UpUaA?Cw
zzfIg8G3hlMNe4?Q^D?D}q3lb;e^UOM*7cvJB{vBhEA!HwJ?pAdn5gP>BKqflEv#(I7$%9+v)}D=6S-A}(`|W)X4_-lzf&A;63jk?Wdi8`@X6C1)g-=>Z8*3*;Lpeo
z1>9+Q&KBnqq$g-+h~D9W#J{D1OM!q(2k1{ktHI+q_sQH-;@R*ep1-;4@CD(xb+X|3smj(x3<^d`_`!vBRW!YKsNHoWJy*KolLgq
zSaC(g0fTfyVfAS98%&sV6B5|SEn@gGPAvBXBhU$LhT6{{Fxax~-Z4u{$-{%eqM9Q_
z0`4$0=3H^zmnECGJN(6?PsO96-2OgO)TxAAgLWIN%2YeHI5+1$&%;V=d?@Y^MZo*M
zHH67JXnUe<+%YAMq}gwmtayU%Y`CF$Zr$s*#=4sst@Jgq-^`Z2%QeN5O`mGtWyeNc
zkm1L|GWEh9fe}V$o}`rtx2Gc
zj_PdQ9`?S8x`jgGVf|0X#sQ|NsAxLL>eot&yIy=5o6jeUX(>9CTJQBWi3+Z}$!Suhz!vYlG5N$D59E
zFx{!PGAY}aI$_Zc4;D6Gf0KzEu}0;BNBpkbE6NxSz1y3rQp*0C2yR!kf_i{*QZ*s2
zA+32uDD9>WWk`k%hB#PxHt4iXgM$Ht&`n{J4@ciGF=S+B7GKi}BE
zFQd5xmLFh=Af2jsxi{vX@?hpz$%k9fJTx?T4e;!ixXd=uGVnd@Dv&sQ3k5Q&h32|`
zpLH){sTmb$m6?fA1F|OASjrRhI=gW+pBpYpPlYH*r_=!svai
zl?}|yTiD!(n#Q%`gQDG6F5yqpZ1>LZ2A;ZG#D(0j^vgxGU5S>Lz&3DRka6uA#4i|X
z?WK6PBgC2l7}w7Xwyz&A{-6nW`k@@JCC1gdKUS+(l7)1zmS;e3o0{^m_`oa1u%GQ}
z9k6>7(CGQUx(VDMmr_)*+x1`89lG&X{m82KWbg50D;+k!L<8k`Y3IwwHRAZ`vc7*)
zJiUCJhh-PnrjUl%{Du0VTpK3UE*d-Dys|R!P~XL^5rcz`*`&@}cowu$Z{t3@e0X2(
zj>|GmA_08={T;4C;*~yhYE&^W*(Zg1ImU
z;!R{h-xBAk1-aJOA9Dut32Uyrmp*N1b))HN&BiDKZOqIqQz?yS)q0aJ^{aMm4Y@-K
z*+hCCzl^nh0L*<@0~-$Uz&rETq%(&k2k+Ikk<6&V3fKqKReT;(<-t(fn#Q(er?=Va
zGdfN6%a%}$`nOlbOGRf~R>Yp5@FX_T>yN;Q#fn5SZ}N
zVf><&tfz$QV9z~TU%%zx!LOZd#cVG)lkJ6TeA3^9Hqjk;i9|KdBe=S7K4a&=hdunY
zZnMWJ=l92iy}#H_>dV90<#M|>%eOYqGnM}Pj;;yM3)67~<@;>esYef!H2ntJ=uH(j
zha)9>VWk2Cg>Rc$s+10A+~Q?ZA+CJm#U`OX$L+YRs8-=!(RgsHox1krs`zZC->@qa
z(Lz)afqs)w!jtsN;xzD#LgC&;I5~J0Bf|Z$^g>VDE(uy6`LQJv!x-Ikf7G2Q$+mRt
zc02LieReE;27e5aeB5*QAP3>C3p@E-R1?-rFgv#JlalJJelFhMetucHwvWXFiy=Cv
z)OHL#^BdLvz=MO-Dx9{Ga=C~@KG23;s;-!`HXvf~Vd_Okz>}-{V|n#728}n~Fv>U(+@^t#v83<7jp8RG
zG>vNP3wgDFrivYUFkzq)hk(_gy87MA548JcoL2%SyRXzWS}PSRHZh=sUMzT{G17mg
zsdVqn<|FTh`yIQCy&Lkcw#I3*zU5Nek)d(?r9F+XSYce+M)-BKC3y^K!ufso-uZWb
z`H9!ZIP`3CkgCZzT2+&=aForKUQ(;riDtGV1U$5^9_h@x*_&ke$T0b=XmQm==
z?W8NFY
ztH(wv6w<><(Oq^-BRhr~k4hWtqr>O>Zuw@jcB^EjE(l)@@zT_tN*H#6)uMAUO?}rE
z%z5GNgJc8m4=N^0q~cNyGC4)$*{X^!Cvv${$Tj@3l7fJlp~qU%
z)b#h_A$o53PNha9SX7#ag(2`Vzs1)--;0F)q&PvxR{4ET=(==D;2K7Zcy9KEj%uaG
zh%wRoV#0@%B9V}gJp6Gf0YJ%OQ%5CU$46H0Q)pW>huS_6koa=9j_z;|KWm=7fk|u>vBC4gr(J_
zc}Wf?beGJ8ZPHx95qwdfW)nX$r|AB|l?~9`wg}FeZ#f@2n{R523Aq+L4}PnhvM$9y
z^wf>$?d-%^Rf8iA&mIRq6a5CXtSU=liA}&K?%ez-wli{b(ErhN9ne(w|Nq(}JIUTf
z5)zqpk=Zb!p+SnIVI*6wk&zuz$tI$rq>^N$&_t99S!E<4t#B)lwRqt(ri5BU?
zy9|Nj%~QxU@W5nn$si}BKq1IZ>3M?4Kz{PkJD*6?>?&r>+JYR=qv6v7a1>2%PnQ4t
zdE^3dMe1MaYhAnibh&9;g1i{xg|iV@
ziSLG{4kEW|r*T(B@4#-WJ@c^{xq*x0i*UR)V(-Iiu2=s!nPiq^xHY2e*z&?2q4oU{
ztJe(O-%dGL@SY;U+7)%Hhgi`FB{PB&oO%v%w+e*5jXlJ7s1`@~|zMkEGWOZSEYgHSQ;W@UEG;8K-?ZRY__&A&&e|l;X0%rd>AO@ynl-*T(WI9)jQgy}a=P%N
zHm&(_w&$t-X}<^6#(o6OQ3}UX-pdw!#4&(#4ipF=Znj$m4(7)4{Pd*u(~lZ6$-23a
zQA0FiYLGi=dA{GUCPuEQ2U}8#N!ypG{2
zX@<1un~_F-bK{1$fKD461)QsEqxqX08J12?*gF|&TypyY-&O@z%?ShRnpU%7NdOd%
zBm(zX%M!G=w_N~*qzaq~m&RXdUdLmsX4Zc(=D%!&Ftz16kry}@rh)nZ4
zQsIu~X`(;G=T8@rWa!#m8NL$R91eL&A1gI)oaO0$+Cz`T0geEPC3F3lXFlVI%q1F?
z81~dGjVgkI&eA~=$_V)#CgO8uR0-js?9+=MK
zf_V+gvycwOh9YO;s^M!_I+)k(3Y-mpP9U4JMdoi{#u!VOL0ZA9OvBZ0cptC4I0A`I
zwn?&P43Z0LOB}59Ra