feat: PFC谈话模式,可选择启用,实验性功能

This commit is contained in:
SengokuCola
2025-04-02 23:33:24 +08:00
parent 7b032ee9e8
commit 72ceb627af
15 changed files with 1537 additions and 80 deletions

1
.gitignore vendored
View File

@@ -14,6 +14,7 @@ queue_update.txt
memory_graph.gml memory_graph.gml
.env .env
.env.* .env.*
.cursor
config/bot_config_dev.toml config/bot_config_dev.toml
config/bot_config.toml config/bot_config.toml
config/bot_config.toml.bak config/bot_config.toml.bak

View File

@@ -144,6 +144,8 @@ class Heartflow:
添加一个SubHeartflow实例到self._subheartflows字典中 添加一个SubHeartflow实例到self._subheartflows字典中
并根据subheartflow_id为子心流创建一个观察对象 并根据subheartflow_id为子心流创建一个观察对象
""" """
try:
if subheartflow_id not in self._subheartflows: if subheartflow_id not in self._subheartflows:
logger.debug(f"创建 subheartflow: {subheartflow_id}") logger.debug(f"创建 subheartflow: {subheartflow_id}")
subheartflow = SubHeartflow(subheartflow_id) subheartflow = SubHeartflow(subheartflow_id)
@@ -161,6 +163,9 @@ class Heartflow:
self._subheartflows[subheartflow_id] = subheartflow self._subheartflows[subheartflow_id] = subheartflow
logger.info("添加 subheartflow 成功") logger.info("添加 subheartflow 成功")
return self._subheartflows[subheartflow_id] return self._subheartflows[subheartflow_id]
except Exception as e:
logger.error(f"创建 subheartflow 失败: {e}")
return None
def get_subheartflow(self, observe_chat_id): def get_subheartflow(self, observe_chat_id):
"""获取指定ID的SubHeartflow实例""" """获取指定ID的SubHeartflow实例"""

View File

@@ -1,3 +0,0 @@
#Programmable Friendly Conversationalist
#Prefrontal cortex

View File

@@ -0,0 +1,294 @@
import time
import datetime
import asyncio
from typing import Optional, Dict, Any, List
from src.common.logger import get_module_logger
from src.common.database import db
from ..message.message_base import UserInfo
from ..config.config import global_config
from ..chat.message import Message
logger = get_module_logger("chat_observer")
class ChatObserver:
"""聊天状态观察器"""
# 类级别的实例管理
_instances: Dict[str, 'ChatObserver'] = {}
@classmethod
def get_instance(cls, stream_id: str) -> 'ChatObserver':
"""获取或创建观察器实例
Args:
stream_id: 聊天流ID
Returns:
ChatObserver: 观察器实例
"""
if stream_id not in cls._instances:
cls._instances[stream_id] = cls(stream_id)
return cls._instances[stream_id]
def __init__(self, stream_id: str):
"""初始化观察器
Args:
stream_id: 聊天流ID
"""
if stream_id in self._instances:
raise RuntimeError(f"ChatObserver for {stream_id} already exists. Use get_instance() instead.")
self.stream_id = stream_id
self.last_user_speak_time: Optional[float] = None # 对方上次发言时间
self.last_bot_speak_time: Optional[float] = None # 机器人上次发言时间
self.last_check_time: float = time.time() # 上次查看聊天记录时间
self.last_message_read: Optional[str] = None # 最后读取的消息ID
self.last_message_time: Optional[float] = None # 最后一条消息的时间戳
self.waiting_start_time: Optional[float] = None # 等待开始时间
# 消息历史记录
self.message_history: List[Dict[str, Any]] = [] # 所有消息历史
self.last_message_id: Optional[str] = None # 最后一条消息的ID
self.message_count: int = 0 # 消息计数
# 运行状态
self._running: bool = False
self._task: Optional[asyncio.Task] = None
self._update_event = asyncio.Event() # 触发更新的事件
self._update_complete = asyncio.Event() # 更新完成的事件
def new_message_after(self, time_point: float) -> bool:
"""判断是否在指定时间点后有新消息
Args:
time_point: 时间戳
Returns:
bool: 是否有新消息
"""
return self.last_message_time is None or self.last_message_time > time_point
def _add_message_to_history(self, message: Dict[str, Any]):
"""添加消息到历史记录
Args:
message: 消息数据
"""
self.message_history.append(message)
self.last_message_id = message["message_id"]
self.last_message_time = message["time"] # 更新最后消息时间
self.message_count += 1
# 更新说话时间
user_info = UserInfo.from_dict(message.get("user_info", {}))
if user_info.user_id == global_config.BOT_QQ:
self.last_bot_speak_time = message["time"]
else:
self.last_user_speak_time = message["time"]
def get_message_history(
self,
start_time: Optional[float] = None,
end_time: Optional[float] = None,
limit: Optional[int] = None,
user_id: Optional[str] = None
) -> List[Dict[str, Any]]:
"""获取消息历史
Args:
start_time: 开始时间戳
end_time: 结束时间戳
limit: 限制返回消息数量
user_id: 指定用户ID
Returns:
List[Dict[str, Any]]: 消息列表
"""
filtered_messages = self.message_history
if start_time is not None:
filtered_messages = [m for m in filtered_messages if m["time"] >= start_time]
if end_time is not None:
filtered_messages = [m for m in filtered_messages if m["time"] <= end_time]
if user_id is not None:
filtered_messages = [
m for m in filtered_messages
if UserInfo.from_dict(m.get("user_info", {})).user_id == user_id
]
if limit is not None:
filtered_messages = filtered_messages[-limit:]
return filtered_messages
async def _fetch_new_messages(self) -> List[Dict[str, Any]]:
"""获取新消息
Returns:
List[Dict[str, Any]]: 新消息列表
"""
query = {"chat_id": self.stream_id}
if self.last_message_read:
# 获取ID大于last_message_read的消息
last_message = db.messages.find_one({"message_id": self.last_message_read})
if last_message:
query["time"] = {"$gt": last_message["time"]}
new_messages = list(
db.messages.find(query).sort("time", 1)
)
if new_messages:
self.last_message_read = new_messages[-1]["message_id"]
return new_messages
async def _fetch_new_messages_before(self, time_point: float) -> List[Dict[str, Any]]:
"""获取指定时间点之前的消息
Args:
time_point: 时间戳
Returns:
List[Dict[str, Any]]: 最多5条消息
"""
query = {
"chat_id": self.stream_id,
"time": {"$lt": time_point}
}
new_messages = list(
db.messages.find(query).sort("time", -1).limit(5) # 倒序获取5条
)
# 将消息按时间正序排列
new_messages.reverse()
if new_messages:
self.last_message_read = new_messages[-1]["message_id"]
return new_messages
async def _update_loop(self):
"""更新循环"""
try:
start_time = time.time()
messages = await self._fetch_new_messages_before(start_time)
for message in messages:
self._add_message_to_history(message)
except Exception as e:
logger.error(f"缓冲消息出错: {e}")
while self._running:
try:
# 等待事件或超时1秒
try:
await asyncio.wait_for(self._update_event.wait(), timeout=1)
except asyncio.TimeoutError:
pass # 超时后也执行一次检查
self._update_event.clear() # 重置触发事件
self._update_complete.clear() # 重置完成事件
# 获取新消息
new_messages = await self._fetch_new_messages()
if new_messages:
# 处理新消息
for message in new_messages:
self._add_message_to_history(message)
# 设置完成事件
self._update_complete.set()
except Exception as e:
logger.error(f"更新循环出错: {e}")
self._update_complete.set() # 即使出错也要设置完成事件
def trigger_update(self):
"""触发一次立即更新"""
self._update_event.set()
async def wait_for_update(self, timeout: float = 5.0) -> bool:
"""等待更新完成
Args:
timeout: 超时时间(秒)
Returns:
bool: 是否成功完成更新False表示超时
"""
try:
await asyncio.wait_for(self._update_complete.wait(), timeout=timeout)
return True
except asyncio.TimeoutError:
logger.warning(f"等待更新完成超时({timeout}秒)")
return False
def start(self):
"""启动观察器"""
if self._running:
return
self._running = True
self._task = asyncio.create_task(self._update_loop())
logger.info(f"ChatObserver for {self.stream_id} started")
def stop(self):
"""停止观察器"""
self._running = False
self._update_event.set() # 设置事件以解除等待
self._update_complete.set() # 设置完成事件以解除等待
if self._task:
self._task.cancel()
logger.info(f"ChatObserver for {self.stream_id} stopped")
async def process_chat_history(self, messages: list):
"""处理聊天历史
Args:
messages: 消息列表
"""
self.update_check_time()
for msg in messages:
try:
user_info = UserInfo.from_dict(msg.get("user_info", {}))
if user_info.user_id == global_config.BOT_QQ:
self.update_bot_speak_time(msg["time"])
else:
self.update_user_speak_time(msg["time"])
except Exception as e:
logger.warning(f"处理消息时间时出错: {e}")
continue
def update_check_time(self):
"""更新查看时间"""
self.last_check_time = time.time()
def update_bot_speak_time(self, speak_time: Optional[float] = None):
"""更新机器人说话时间"""
self.last_bot_speak_time = speak_time or time.time()
def update_user_speak_time(self, speak_time: Optional[float] = None):
"""更新用户说话时间"""
self.last_user_speak_time = speak_time or time.time()
def get_time_info(self) -> str:
"""获取时间信息文本"""
current_time = time.time()
time_info = ""
if self.last_bot_speak_time:
bot_speak_ago = current_time - self.last_bot_speak_time
time_info += f"\n距离你上次发言已经过去了{int(bot_speak_ago)}"
if self.last_user_speak_time:
user_speak_ago = current_time - self.last_user_speak_time
time_info += f"\n距离对方上次发言已经过去了{int(user_speak_ago)}"
return time_info

838
src/plugins/PFC/pfc.py Normal file
View File

@@ -0,0 +1,838 @@
#Programmable Friendly Conversationalist
#Prefrontal cortex
import datetime
import asyncio
from typing import List, Optional, Dict, Any, Tuple, Literal
from enum import Enum
from src.common.database import db
from src.common.logger import get_module_logger
from src.plugins.memory_system.Hippocampus import HippocampusManager
from ..chat.chat_stream import ChatStream
from ..message.message_base import UserInfo, Seg
from ..chat.message import Message
from ..models.utils_model import LLM_request
from ..config.config import global_config
from src.plugins.chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
from src.plugins.chat.message_sender import message_manager
from src.plugins.chat.chat_stream import chat_manager
from src.plugins.willing.willing_manager import willing_manager
from ..message.api import global_api
from ..storage.storage import MessageStorage
from .chat_observer import ChatObserver
from .pfc_KnowledgeFetcher import KnowledgeFetcher
from .reply_checker import ReplyChecker
import json
import time
logger = get_module_logger("pfc")
class ConversationState(Enum):
"""对话状态"""
INIT = "初始化"
RETHINKING = "重新思考"
ANALYZING = "分析历史"
PLANNING = "规划目标"
GENERATING = "生成回复"
CHECKING = "检查回复"
SENDING = "发送消息"
WAITING = "等待"
LISTENING = "倾听"
ENDED = "结束"
JUDGING = "判断"
ActionType = Literal["direct_reply", "fetch_knowledge", "wait"]
class ActionPlanner:
"""行动规划器"""
def __init__(self, stream_id: str):
self.llm = LLM_request(
model=global_config.llm_normal,
temperature=0.7,
max_tokens=1000,
request_type="action_planning"
)
self.personality_info = " ".join(global_config.PROMPT_PERSONALITY)
self.name = global_config.BOT_NICKNAME
self.chat_observer = ChatObserver.get_instance(stream_id)
async def plan(
self,
goal: str,
method: str,
reasoning: str,
action_history: List[Dict[str, str]] = None,
chat_observer: Optional[ChatObserver] = None, # 添加chat_observer参数
) -> Tuple[str, str]:
"""规划下一步行动
Args:
goal: 对话目标
method: 实现方式
reasoning: 目标原因
action_history: 行动历史记录
Returns:
Tuple[str, str]: (行动类型, 行动原因)
"""
# 构建提示词
# 获取最近20条消息
self.chat_observer.waiting_start_time = time.time()
messages = self.chat_observer.get_message_history(limit=20)
chat_history_text = ""
for msg in messages:
time_str = datetime.datetime.fromtimestamp(msg["time"]).strftime("%H:%M:%S")
user_info = UserInfo.from_dict(msg.get("user_info", {}))
sender = user_info.user_nickname or f"用户{user_info.user_id}"
if sender == self.name:
sender = "你说"
chat_history_text += f"{time_str},{sender}:{msg.get('processed_plain_text', '')}\n"
personality_text = f"你的名字是{self.name}{self.personality_info}"
# 构建action历史文本
action_history_text = ""
if action_history:
if action_history[-1]['action'] == "direct_reply":
action_history_text = "你刚刚发言回复了对方"
# 获取时间信息
time_info = self.chat_observer.get_time_info()
prompt = f"""现在你在参与一场QQ聊天请分析以下内容根据信息决定下一步行动
{personality_text}
当前对话目标:{goal}
实现该对话目标的方式:{method}
产生该对话目标的原因:{reasoning}
{time_info}
最近的对话记录:
{chat_history_text}
{action_history_text}
请你接下去想想要你要做什么,可以发言,可以等待,可以倾听,可以调取知识。注意不同行动类型的要求,不要重复发言:
行动类型:
fetch_knowledge: 需要调取知识,当需要专业知识或特定信息时选择
wait: 当你做出了发言,对方尚未回复时等待对方的回复
listening: 倾听对方发言,当你认为对方发言尚未结束时采用
direct_reply: 不符合上述情况,回复对方,注意不要过多或者重复发言
rethink_goal: 重新思考对话目标,当发现对话目标不合适时选择,会重新思考对话目标
judge_conversation: 判断对话是否结束,当发现对话目标已经达到或者希望停止对话时选择,会判断对话是否结束
请以JSON格式输出包含以下字段
1. action: 行动类型,注意你之前的行为
2. reason: 选择该行动的原因,注意你之前的行为(简要解释)
注意请严格按照JSON格式输出不要包含任何其他内容。"""
logger.debug(f"发送到LLM的提示词: {prompt}")
try:
content, _ = await self.llm.generate_response_async(prompt)
logger.debug(f"LLM原始返回内容: {content}")
# 清理内容尝试提取JSON部分
content = content.strip()
try:
# 尝试直接解析
result = json.loads(content)
except json.JSONDecodeError:
# 如果直接解析失败尝试查找和提取JSON部分
import re
json_pattern = r'\{[^{}]*\}'
json_match = re.search(json_pattern, content)
if json_match:
try:
result = json.loads(json_match.group())
except json.JSONDecodeError:
logger.error("提取的JSON内容解析失败返回默认行动")
return "direct_reply", "JSON解析失败选择直接回复"
else:
# 如果找不到JSON尝试从文本中提取行动和原因
if "direct_reply" in content.lower():
return "direct_reply", "从文本中提取的行动"
elif "fetch_knowledge" in content.lower():
return "fetch_knowledge", "从文本中提取的行动"
elif "wait" in content.lower():
return "wait", "从文本中提取的行动"
elif "listening" in content.lower():
return "listening", "从文本中提取的行动"
elif "rethink_goal" in content.lower():
return "rethink_goal", "从文本中提取的行动"
elif "judge_conversation" in content.lower():
return "judge_conversation", "从文本中提取的行动"
else:
logger.error("无法从返回内容中提取行动类型")
return "direct_reply", "无法解析响应,选择直接回复"
# 验证JSON字段
action = result.get("action", "direct_reply")
reason = result.get("reason", "默认原因")
# 验证action类型
if action not in ["direct_reply", "fetch_knowledge", "wait", "listening", "rethink_goal", "judge_conversation"]:
logger.warning(f"未知的行动类型: {action}默认使用listening")
action = "listening"
logger.info(f"规划的行动: {action}")
logger.info(f"行动原因: {reason}")
return action, reason
except Exception as e:
logger.error(f"规划行动时出错: {str(e)}")
return "direct_reply", "发生错误,选择直接回复"
class GoalAnalyzer:
"""对话目标分析器"""
def __init__(self, stream_id: str):
self.llm = LLM_request(
model=global_config.llm_normal,
temperature=0.7,
max_tokens=1000,
request_type="conversation_goal"
)
self.personality_info = " ".join(global_config.PROMPT_PERSONALITY)
self.name = global_config.BOT_NICKNAME
self.nick_name = global_config.BOT_ALIAS_NAMES
self.chat_observer = ChatObserver.get_instance(stream_id)
async def analyze_goal(self) -> Tuple[str, str, str]:
"""分析对话历史并设定目标
Args:
chat_history: 聊天历史记录列表
Returns:
Tuple[str, str, str]: (目标, 方法, 原因)
"""
max_retries = 3
for retry in range(max_retries):
try:
# 构建提示词
messages = self.chat_observer.get_message_history(limit=20)
chat_history_text = ""
for msg in messages:
time_str = datetime.datetime.fromtimestamp(msg["time"]).strftime("%H:%M:%S")
user_info = UserInfo.from_dict(msg.get("user_info", {}))
sender = user_info.user_nickname or f"用户{user_info.user_id}"
if sender == self.name:
sender = "你说"
chat_history_text += f"{time_str},{sender}:{msg.get('processed_plain_text', '')}\n"
personality_text = f"你的名字是{self.name}{self.personality_info}"
prompt = f"""{personality_text}。现在你在参与一场QQ聊天请分析以下聊天记录并根据你的性格特征确定一个明确的对话目标。
这个目标应该反映出对话的意图和期望的结果。
聊天记录:
{chat_history_text}
请以JSON格式输出包含以下字段
1. goal: 对话目标(简短的一句话)
2. reasoning: 对话原因,为什么设定这个目标(简要解释)
输出格式示例:
{{
"goal": "回答用户关于Python编程的具体问题",
"reasoning": "用户提出了关于Python的技术问题需要专业且准确的解答"
}}"""
logger.debug(f"发送到LLM的提示词: {prompt}")
content, _ = await self.llm.generate_response_async(prompt)
logger.debug(f"LLM原始返回内容: {content}")
# 清理和验证返回内容
if not content or not isinstance(content, str):
logger.error("LLM返回内容为空或格式不正确")
continue
# 尝试提取JSON部分
content = content.strip()
try:
# 尝试直接解析
result = json.loads(content)
except json.JSONDecodeError:
# 如果直接解析失败尝试查找和提取JSON部分
import re
json_pattern = r'\{[^{}]*\}'
json_match = re.search(json_pattern, content)
if json_match:
try:
result = json.loads(json_match.group())
except json.JSONDecodeError:
logger.error(f"提取的JSON内容解析失败重试第{retry + 1}")
continue
else:
logger.error(f"无法在返回内容中找到有效的JSON重试第{retry + 1}")
continue
# 验证JSON字段
if not all(key in result for key in ["goal", "reasoning"]):
logger.error(f"JSON缺少必要字段实际内容: {result},重试第{retry + 1}")
continue
goal = result["goal"]
reasoning = result["reasoning"]
# 验证字段内容
if not isinstance(goal, str) or not isinstance(reasoning, str):
logger.error(f"JSON字段类型错误goal和reasoning必须是字符串重试第{retry + 1}")
continue
if not goal.strip() or not reasoning.strip():
logger.error(f"JSON字段内容为空重试第{retry + 1}")
continue
# 使用默认的方法
method = "以友好的态度回应"
return goal, method, reasoning
except Exception as e:
logger.error(f"分析对话目标时出错: {str(e)},重试第{retry + 1}")
if retry == max_retries - 1:
return "保持友好的对话", "以友好的态度回应", "确保对话顺利进行"
continue
# 所有重试都失败后的默认返回
return "保持友好的对话", "以友好的态度回应", "确保对话顺利进行"
async def analyze_conversation(self,goal,reasoning):
messages = self.chat_observer.get_message_history()
chat_history_text = ""
for msg in messages:
time_str = datetime.datetime.fromtimestamp(msg["time"]).strftime("%H:%M:%S")
user_info = UserInfo.from_dict(msg.get("user_info", {}))
sender = user_info.user_nickname or f"用户{user_info.user_id}"
if sender == self.name:
sender = "你说"
chat_history_text += f"{time_str},{sender}:{msg.get('processed_plain_text', '')}\n"
personality_text = f"你的名字是{self.name}{self.personality_info}"
prompt = f"""{personality_text}。现在你在参与一场QQ聊天
当前对话目标:{goal}
产生该对话目标的原因:{reasoning}
请分析以下聊天记录,并根据你的性格特征评估该目标是否已经达到,或者你是否希望停止该次对话。
聊天记录:
{chat_history_text}
请以JSON格式输出包含以下字段
1. goal_achieved: 对话目标是否已经达到true/false
2. stop_conversation: 是否希望停止该次对话true/false
3. reason: 为什么希望停止该次对话(简要解释)
输出格式示例:
{{
"goal_achieved": true,
"stop_conversation": false,
"reason": "用户已经得到了满意的回答,但我仍希望继续聊天"
}}"""
logger.debug(f"发送到LLM的提示词: {prompt}")
try:
content, _ = await self.llm.generate_response_async(prompt)
logger.debug(f"LLM原始返回内容: {content}")
# 清理和验证返回内容
if not content or not isinstance(content, str):
logger.error("LLM返回内容为空或格式不正确")
return False, False, "确保对话顺利进行"
# 尝试提取JSON部分
content = content.strip()
try:
# 尝试直接解析
result = json.loads(content)
except json.JSONDecodeError:
# 如果直接解析失败尝试查找和提取JSON部分
import re
json_pattern = r'\{[^{}]*\}'
json_match = re.search(json_pattern, content)
if json_match:
try:
result = json.loads(json_match.group())
except json.JSONDecodeError as e:
logger.error(f"提取的JSON内容解析失败: {e}")
return False, False, "确保对话顺利进行"
else:
logger.error("无法在返回内容中找到有效的JSON")
return False, False, "确保对话顺利进行"
# 验证JSON字段
if not all(key in result for key in ["goal_achieved", "stop_conversation", "reason"]):
logger.error(f"JSON缺少必要字段实际内容: {result}")
return False, False, "确保对话顺利进行"
goal_achieved = result["goal_achieved"]
stop_conversation = result["stop_conversation"]
reason = result["reason"]
# 验证字段类型
if not isinstance(goal_achieved, bool):
logger.error("goal_achieved 必须是布尔值")
return False, False, "确保对话顺利进行"
if not isinstance(stop_conversation, bool):
logger.error("stop_conversation 必须是布尔值")
return False, False, "确保对话顺利进行"
if not isinstance(reason, str):
logger.error("reason 必须是字符串")
return False, False, "确保对话顺利进行"
if not reason.strip():
logger.error("reason 不能为空")
return False, False, "确保对话顺利进行"
return goal_achieved, stop_conversation, reason
except Exception as e:
logger.error(f"分析对话目标时出错: {str(e)}")
return False, False, "确保对话顺利进行"
class Waiter:
"""快 速 等 待"""
def __init__(self, stream_id: str):
self.chat_observer = ChatObserver.get_instance(stream_id)
self.personality_info = " ".join(global_config.PROMPT_PERSONALITY)
self.name = global_config.BOT_NICKNAME
async def wait(self) -> bool:
"""等待
Returns:
bool: 是否超时True表示超时
"""
wait_start_time = self.chat_observer.waiting_start_time
while not self.chat_observer.new_message_after(wait_start_time):
await asyncio.sleep(1)
logger.info("等待中...")
# 检查是否超过60秒
if time.time() - wait_start_time > 60:
logger.info("等待超过60秒结束对话")
return True
logger.info("等待结束")
return False
class ReplyGenerator:
"""回复生成器"""
def __init__(self, stream_id: str):
self.llm = LLM_request(
model=global_config.llm_normal,
temperature=0.7,
max_tokens=300,
request_type="reply_generation"
)
self.personality_info = " ".join(global_config.PROMPT_PERSONALITY)
self.name = global_config.BOT_NICKNAME
self.chat_observer = ChatObserver.get_instance(stream_id)
self.reply_checker = ReplyChecker(stream_id)
async def generate(
self,
goal: str,
chat_history: List[Message],
knowledge_cache: Dict[str, str],
previous_reply: Optional[str] = None,
retry_count: int = 0
) -> Tuple[str, bool]:
"""生成回复
Args:
goal: 对话目标
method: 实现方式
chat_history: 聊天历史
knowledge_cache: 知识缓存
previous_reply: 上一次生成的回复(如果有)
retry_count: 当前重试次数
Returns:
Tuple[str, bool]: (生成的回复, 是否需要重新规划)
"""
# 构建提示词
logger.debug(f"开始生成回复:当前目标: {goal}")
self.chat_observer.trigger_update() # 触发立即更新
if not await self.chat_observer.wait_for_update():
logger.warning("等待消息更新超时")
messages = self.chat_observer.get_message_history(limit=20)
chat_history_text = ""
for msg in messages:
time_str = datetime.datetime.fromtimestamp(msg["time"]).strftime("%H:%M:%S")
user_info = UserInfo.from_dict(msg.get("user_info", {}))
sender = user_info.user_nickname or f"用户{user_info.user_id}"
if sender == self.name:
sender = "你说"
chat_history_text += f"{time_str},{sender}:{msg.get('processed_plain_text', '')}\n"
# 整理知识缓存
knowledge_text = ""
if knowledge_cache:
knowledge_text = "\n相关知识:"
if isinstance(knowledge_cache, dict):
for source, content in knowledge_cache.items():
knowledge_text += f"\n{content}"
elif isinstance(knowledge_cache, list):
for item in knowledge_cache:
knowledge_text += f"\n{item}"
# 添加上一次生成的回复信息
previous_reply_text = ""
if previous_reply:
previous_reply_text = f"\n上一次生成的回复(需要改进):\n{previous_reply}"
personality_text = f"你的名字是{self.name}{self.personality_info}"
prompt = f"""{personality_text}。现在你在参与一场QQ聊天请根据以下信息生成回复
当前对话目标:{goal}
{knowledge_text}
{previous_reply_text}
最近的聊天记录:
{chat_history_text}
请根据上述信息,以你的性格特征生成一个自然、得体的回复。回复应该:
1. 符合对话目标,以""的角度发言
2. 体现你的性格特征
3. 自然流畅,像正常聊天一样,简短
4. 适当利用相关知识,但不要生硬引用
{f'5. 改进上一次回复中的问题' if previous_reply else ''}
请注意把握聊天内容,不要回复的太有条理,可以有个性。请分清""和对方说的话,不要把""说的话当做对方说的话,这是你自己说的话。
请你回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话
请你注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 )。
请直接输出回复内容,不需要任何额外格式。"""
try:
content, _ = await self.llm.generate_response_async(prompt)
logger.info(f"生成的回复: {content}")
# 检查生成的回复是否合适
is_suitable, reason, need_replan = await self.reply_checker.check(
content, goal, retry_count
)
if not is_suitable:
logger.warning(f"生成的回复不合适,原因: {reason}")
if need_replan:
logger.info("需要重新规划对话目标")
return "让我重新思考一下...", True
else:
# 递归调用将当前回复作为previous_reply传入
return await self.generate(
goal, chat_history, knowledge_cache,
content, retry_count + 1
)
return content, False
except Exception as e:
logger.error(f"生成回复时出错: {e}")
return "抱歉,我现在有点混乱,让我重新思考一下...", True
class Conversation:
# 类级别的实例管理
_instances: Dict[str, 'Conversation'] = {}
@classmethod
def get_instance(cls, stream_id: str) -> 'Conversation':
"""获取或创建对话实例"""
if stream_id not in cls._instances:
cls._instances[stream_id] = cls(stream_id)
logger.info(f"创建新的对话实例: {stream_id}")
return cls._instances[stream_id]
@classmethod
def remove_instance(cls, stream_id: str):
"""删除对话实例"""
if stream_id in cls._instances:
# 停止相关组件
instance = cls._instances[stream_id]
instance.chat_observer.stop()
# 删除实例
del cls._instances[stream_id]
logger.info(f"已删除对话实例 {stream_id}")
def __init__(self, stream_id: str):
"""初始化对话系统"""
self.stream_id = stream_id
self.state = ConversationState.INIT
self.current_goal: Optional[str] = None
self.current_method: Optional[str] = None
self.goal_reasoning: Optional[str] = None
self.generated_reply: Optional[str] = None
self.should_continue = True
# 初始化聊天观察器
self.chat_observer = ChatObserver.get_instance(stream_id)
# 添加action历史记录
self.action_history: List[Dict[str, str]] = []
# 知识缓存
self.knowledge_cache: Dict[str, str] = {} # 确保初始化为字典
# 初始化各个组件
self.goal_analyzer = GoalAnalyzer(self.stream_id)
self.action_planner = ActionPlanner(self.stream_id)
self.reply_generator = ReplyGenerator(self.stream_id)
self.knowledge_fetcher = KnowledgeFetcher()
self.direct_sender = DirectMessageSender()
self.waiter = Waiter(self.stream_id)
# 创建聊天流
self.chat_stream = chat_manager.get_stream(self.stream_id)
def _clear_knowledge_cache(self):
"""清空知识缓存"""
self.knowledge_cache.clear() # 使用clear方法清空字典
async def start(self):
"""开始对话流程"""
logger.info("对话系统启动")
self.should_continue = True
self.chat_observer.start() # 启动观察器
await asyncio.sleep(1)
# 启动对话循环
await self._conversation_loop()
async def _conversation_loop(self):
"""对话循环"""
# 获取最近的消息历史
self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
while self.should_continue:
# 执行行动
self.chat_observer.trigger_update() # 触发立即更新
if not await self.chat_observer.wait_for_update():
logger.warning("等待消息更新超时")
action, reason = await self.action_planner.plan(
self.current_goal,
self.current_method,
self.goal_reasoning,
self.action_history, # 传入action历史
self.chat_observer # 传入chat_observer
)
# 执行行动
await self._handle_action(action, reason)
def _convert_to_message(self, msg_dict: Dict[str, Any]) -> Message:
"""将消息字典转换为Message对象"""
try:
chat_info = msg_dict.get("chat_info", {})
chat_stream = ChatStream.from_dict(chat_info)
user_info = UserInfo.from_dict(msg_dict.get("user_info", {}))
return Message(
message_id=msg_dict["message_id"],
chat_stream=chat_stream,
time=msg_dict["time"],
user_info=user_info,
processed_plain_text=msg_dict.get("processed_plain_text", ""),
detailed_plain_text=msg_dict.get("detailed_plain_text", "")
)
except Exception as e:
logger.warning(f"转换消息时出错: {e}")
raise
async def _handle_action(self, action: str, reason: str):
"""处理规划的行动"""
logger.info(f"执行行动: {action}, 原因: {reason}")
# 记录action历史
self.action_history.append({
"action": action,
"reason": reason,
"time": datetime.datetime.now().strftime("%H:%M:%S")
})
# 只保留最近的10条记录
if len(self.action_history) > 10:
self.action_history = self.action_history[-10:]
if action == "direct_reply":
self.state = ConversationState.GENERATING
messages = self.chat_observer.get_message_history(limit=30)
self.generated_reply, need_replan = await self.reply_generator.generate(
self.current_goal,
self.current_method,
[self._convert_to_message(msg) for msg in messages],
self.knowledge_cache
)
if need_replan:
self.state = ConversationState.RETHINKING
self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
else:
await self._send_reply()
elif action == "fetch_knowledge":
self.state = ConversationState.GENERATING
messages = self.chat_observer.get_message_history(limit=30)
knowledge, sources = await self.knowledge_fetcher.fetch(
self.current_goal,
[self._convert_to_message(msg) for msg in messages]
)
logger.info(f"获取到知识,来源: {sources}")
if knowledge != "未找到相关知识":
self.knowledge_cache[sources] = knowledge
self.generated_reply, need_replan = await self.reply_generator.generate(
self.current_goal,
self.current_method,
[self._convert_to_message(msg) for msg in messages],
self.knowledge_cache
)
if need_replan:
self.state = ConversationState.RETHINKING
self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
else:
await self._send_reply()
elif action == "rethink_goal":
self.state = ConversationState.RETHINKING
self.current_goal, self.current_method, self.goal_reasoning = await self.goal_analyzer.analyze_goal()
elif action == "judge_conversation":
self.state = ConversationState.JUDGING
self.goal_achieved, self.stop_conversation, self.reason = await self.goal_analyzer.analyze_conversation(self.current_goal, self.goal_reasoning)
if self.stop_conversation:
await self._stop_conversation()
elif action == "listening":
self.state = ConversationState.LISTENING
logger.info("倾听对方发言...")
if await self.waiter.wait(): # 如果返回True表示超时
await self._send_timeout_message()
await self._stop_conversation()
else: # wait
self.state = ConversationState.WAITING
logger.info("等待更多信息...")
if await self.waiter.wait(): # 如果返回True表示超时
await self._send_timeout_message()
await self._stop_conversation()
async def _stop_conversation(self):
"""完全停止对话"""
logger.info("停止对话")
self.should_continue = False
self.state = ConversationState.ENDED
# 删除实例这会同时停止chat_observer
self.remove_instance(self.stream_id)
async def _send_timeout_message(self):
"""发送超时结束消息"""
try:
messages = self.chat_observer.get_message_history(limit=1)
if not messages:
return
latest_message = self._convert_to_message(messages[0])
await self.direct_sender.send_message(
chat_stream=self.chat_stream,
content="抱歉,由于等待时间过长,我需要先去忙别的了。下次再聊吧~",
reply_to_message=latest_message
)
except Exception as e:
logger.error(f"发送超时消息失败: {str(e)}")
async def _send_reply(self):
"""发送回复"""
if not self.generated_reply:
logger.warning("没有生成回复")
return
messages = self.chat_observer.get_message_history(limit=1)
if not messages:
logger.warning("没有最近的消息可以回复")
return
latest_message = self._convert_to_message(messages[0])
try:
await self.direct_sender.send_message(
chat_stream=self.chat_stream,
content=self.generated_reply,
reply_to_message=latest_message
)
self.chat_observer.trigger_update() # 触发立即更新
if not await self.chat_observer.wait_for_update():
logger.warning("等待消息更新超时")
self.state = ConversationState.ANALYZING
except Exception as e:
logger.error(f"发送消息失败: {str(e)}")
self.state = ConversationState.ANALYZING
class DirectMessageSender:
"""直接发送消息到平台的发送器"""
def __init__(self):
self.logger = get_module_logger("direct_sender")
self.storage = MessageStorage()
async def send_message(
self,
chat_stream: ChatStream,
content: str,
reply_to_message: Optional[Message] = None,
) -> None:
"""直接发送消息到平台
Args:
chat_stream: 聊天流
content: 消息内容
reply_to_message: 要回复的消息
"""
# 构建消息对象
message_segment = Seg(type="text", data=content)
bot_user_info = UserInfo(
user_id=global_config.BOT_QQ,
user_nickname=global_config.BOT_NICKNAME,
platform=chat_stream.platform,
)
message = MessageSending(
message_id=f"dm{round(time.time(), 2)}",
chat_stream=chat_stream,
bot_user_info=bot_user_info,
sender_info=reply_to_message.message_info.user_info if reply_to_message else None,
message_segment=message_segment,
reply=reply_to_message,
is_head=True,
is_emoji=False,
thinking_start_time=time.time(),
)
# 处理消息
await message.process()
# 发送消息
try:
message_json = message.to_dict()
end_point = global_config.api_urls.get(chat_stream.platform, None)
if not end_point:
raise ValueError(f"未找到平台:{chat_stream.platform} 的url配置")
await global_api.send_message(end_point, message_json)
# 存储消息
await self.storage.store_message(message, message.chat_stream)
self.logger.info(f"直接发送消息成功: {content[:30]}...")
except Exception as e:
self.logger.error(f"直接发送消息失败: {str(e)}")
raise

View File

@@ -0,0 +1,54 @@
from typing import List, Tuple
from src.common.logger import get_module_logger
from src.plugins.memory_system.Hippocampus import HippocampusManager
from ..models.utils_model import LLM_request
from ..config.config import global_config
from ..chat.message import Message
logger = get_module_logger("knowledge_fetcher")
class KnowledgeFetcher:
"""知识调取器"""
def __init__(self):
self.llm = LLM_request(
model=global_config.llm_normal,
temperature=0.7,
max_tokens=1000,
request_type="knowledge_fetch"
)
async def fetch(self, query: str, chat_history: List[Message]) -> Tuple[str, str]:
"""获取相关知识
Args:
query: 查询内容
chat_history: 聊天历史
Returns:
Tuple[str, str]: (获取的知识, 知识来源)
"""
# 构建查询上下文
chat_history_text = ""
for msg in chat_history:
# sender = msg.message_info.user_info.user_nickname or f"用户{msg.message_info.user_info.user_id}"
chat_history_text += f"{msg.detailed_plain_text}\n"
# 从记忆中获取相关知识
related_memory = await HippocampusManager.get_instance().get_memory_from_text(
text=f"{query}\n{chat_history_text}",
max_memory_num=3,
max_memory_length=2,
max_depth=3,
fast_retrieval=False
)
if related_memory:
knowledge = ""
sources = []
for memory in related_memory:
knowledge += memory[1] + "\n"
sources.append(f"记忆片段{memory[0]}")
return knowledge.strip(), "".join(sources)
return "未找到相关知识", "无记忆匹配"

View File

@@ -0,0 +1,141 @@
import json
import datetime
from typing import Tuple, Dict, Any, List
from src.common.logger import get_module_logger
from ..models.utils_model import LLM_request
from ..config.config import global_config
from .chat_observer import ChatObserver
from ..message.message_base import UserInfo
logger = get_module_logger("reply_checker")
class ReplyChecker:
"""回复检查器"""
def __init__(self, stream_id: str):
self.llm = LLM_request(
model=global_config.llm_normal,
temperature=0.7,
max_tokens=1000,
request_type="reply_check"
)
self.name = global_config.BOT_NICKNAME
self.chat_observer = ChatObserver.get_instance(stream_id)
self.max_retries = 2 # 最大重试次数
async def check(
self,
reply: str,
goal: str,
retry_count: int = 0
) -> Tuple[bool, str, bool]:
"""检查生成的回复是否合适
Args:
reply: 生成的回复
goal: 对话目标
retry_count: 当前重试次数
Returns:
Tuple[bool, str, bool]: (是否合适, 原因, 是否需要重新规划)
"""
# 获取最新的消息记录
messages = self.chat_observer.get_message_history(limit=5)
chat_history_text = ""
for msg in messages:
time_str = datetime.datetime.fromtimestamp(msg["time"]).strftime("%H:%M:%S")
user_info = UserInfo.from_dict(msg.get("user_info", {}))
sender = user_info.user_nickname or f"用户{user_info.user_id}"
if sender == self.name:
sender = "你说"
chat_history_text += f"{time_str},{sender}:{msg.get('processed_plain_text', '')}\n"
prompt = f"""请检查以下回复是否合适:
当前对话目标:{goal}
最新的对话记录:
{chat_history_text}
待检查的回复:
{reply}
请检查以下几点:
1. 回复是否依然符合当前对话目标和实现方式
2. 回复是否与最新的对话记录保持一致性
3. 回复是否重复发言,重复表达
4. 回复是否包含违法违规内容(政治敏感、暴力等)
5. 回复是否以你的角度发言,不要把""说的话当做对方说的话,这是你自己说的话
请以JSON格式输出包含以下字段
1. suitable: 是否合适 (true/false)
2. reason: 原因说明
3. need_replan: 是否需要重新规划对话目标 (true/false)当发现当前对话目标不再适合时设为true
输出格式示例:
{{
"suitable": true,
"reason": "回复符合要求,内容得体",
"need_replan": false
}}
注意请严格按照JSON格式输出不要包含任何其他内容。"""
try:
content, _ = await self.llm.generate_response_async(prompt)
logger.debug(f"检查回复的原始返回: {content}")
# 清理内容尝试提取JSON部分
content = content.strip()
try:
# 尝试直接解析
result = json.loads(content)
except json.JSONDecodeError:
# 如果直接解析失败尝试查找和提取JSON部分
import re
json_pattern = r'\{[^{}]*\}'
json_match = re.search(json_pattern, content)
if json_match:
try:
result = json.loads(json_match.group())
except json.JSONDecodeError:
# 如果JSON解析失败尝试从文本中提取结果
is_suitable = "不合适" not in content.lower() and "违规" not in content.lower()
reason = content[:100] if content else "无法解析响应"
need_replan = "重新规划" in content.lower() or "目标不适合" in content.lower()
return is_suitable, reason, need_replan
else:
# 如果找不到JSON从文本中判断
is_suitable = "不合适" not in content.lower() and "违规" not in content.lower()
reason = content[:100] if content else "无法解析响应"
need_replan = "重新规划" in content.lower() or "目标不适合" in content.lower()
return is_suitable, reason, need_replan
# 验证JSON字段
suitable = result.get("suitable", None)
reason = result.get("reason", "未提供原因")
need_replan = result.get("need_replan", False)
# 如果suitable字段是字符串转换为布尔值
if isinstance(suitable, str):
suitable = suitable.lower() == "true"
# 如果suitable字段不存在或不是布尔值从reason中判断
if suitable is None:
suitable = "不合适" not in reason.lower() and "违规" not in reason.lower()
# 如果不合适且未达到最大重试次数,返回需要重试
if not suitable and retry_count < self.max_retries:
return False, reason, False
# 如果不合适且已达到最大重试次数,返回需要重新规划
if not suitable and retry_count >= self.max_retries:
return False, f"多次重试后仍不合适: {reason}", True
return suitable, reason, need_replan
except Exception as e:
logger.error(f"检查回复时出错: {e}")
# 如果出错且已达到最大重试次数,建议重新规划
if retry_count >= self.max_retries:
return False, f"多次检查失败,建议重新规划", True
return False, f"检查过程出错,建议重试: {str(e)}", False

View File

@@ -1,14 +1,17 @@
from typing import Dict
from ..moods.moods import MoodManager # 导入情绪管理器 from ..moods.moods import MoodManager # 导入情绪管理器
from ..config.config import global_config from ..config.config import global_config
from ..chat_module.reasoning_chat.reasoning_generator import ResponseGenerator from ..chat_module.reasoning_chat.reasoning_generator import ResponseGenerator
from .message import MessageRecv
from ..storage.storage import MessageStorage # 修改导入路径 from ..storage.storage import MessageStorage # 修改导入路径
from ..PFC.pfc import Conversation, ConversationState
from .chat_stream import chat_manager
from ..chat_module.only_process.only_message_process import MessageProcessor
from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
from ..chat_module.think_flow_chat.think_flow_chat import ThinkFlowChat from ..chat_module.think_flow_chat.think_flow_chat import ThinkFlowChat
from ..chat_module.reasoning_chat.reasoning_chat import ReasoningChat from ..chat_module.reasoning_chat.reasoning_chat import ReasoningChat
import asyncio
# 定义日志配置 # 定义日志配置
chat_config = LogConfig( chat_config = LogConfig(
@@ -23,20 +26,33 @@ logger = get_module_logger("chat_bot", config=chat_config)
class ChatBot: class ChatBot:
def __init__(self): def __init__(self):
self.storage = MessageStorage()
self.gpt = ResponseGenerator()
self.bot = None # bot 实例引用 self.bot = None # bot 实例引用
self._started = False self._started = False
self.mood_manager = MoodManager.get_instance() # 获取情绪管理器单例 self.mood_manager = MoodManager.get_instance() # 获取情绪管理器单例
self.mood_manager.start_mood_update() # 启动情绪更新 self.mood_manager.start_mood_update() # 启动情绪更新
self.think_flow_chat = ThinkFlowChat() self.think_flow_chat = ThinkFlowChat()
self.reasoning_chat = ReasoningChat() self.reasoning_chat = ReasoningChat()
self.only_process_chat = MessageProcessor()
async def _ensure_started(self): async def _ensure_started(self):
"""确保所有任务已启动""" """确保所有任务已启动"""
if not self._started: if not self._started:
self._started = True self._started = True
async def _create_PFC_chat(self, message: MessageRecv):
try:
chat_id = str(message.chat_stream.stream_id)
if global_config.enable_pfc_chatting:
# 获取或创建对话实例
conversation = Conversation.get_instance(chat_id)
# 如果是新创建的实例,启动对话系统
if conversation.state == ConversationState.INIT:
asyncio.create_task(conversation.start())
logger.info(f"为聊天 {chat_id} 创建新的对话实例")
except Exception as e:
logger.error(f"创建PFC聊天流失败: {e}")
async def message_process(self, message_data: str) -> None: async def message_process(self, message_data: str) -> None:
"""处理转化后的统一格式消息 """处理转化后的统一格式消息
根据global_config.response_mode选择不同的回复模式 根据global_config.response_mode选择不同的回复模式
@@ -50,7 +66,11 @@ class ChatBot:
- 没有思维流相关的状态管理 - 没有思维流相关的状态管理
- 更简单直接的回复逻辑 - 更简单直接的回复逻辑
两种模式都包含: 3. pfc_chatting模式仅进行消息处理
- 不进行任何回复
- 只处理和存储消息
所有模式都包含:
- 消息过滤 - 消息过滤
- 记忆激活 - 记忆激活
- 意愿计算 - 意愿计算
@@ -59,6 +79,45 @@ class ChatBot:
- 性能计时 - 性能计时
""" """
message = MessageRecv(message_data)
groupinfo = message.message_info.group_info
if global_config.enable_pfc_chatting:
try:
if groupinfo is None and global_config.enable_friend_chat:
userinfo = message.message_info.user_info
messageinfo = message.message_info
# 创建聊天流
chat = await chat_manager.get_or_create_stream(
platform=messageinfo.platform,
user_info=userinfo,
group_info=groupinfo,
)
message.update_chat_stream(chat)
await self.only_process_chat.process_message(message)
await self._create_PFC_chat(message)
else:
if groupinfo.group_id in global_config.talk_allowed_groups:
if global_config.response_mode == "heart_flow":
await self.think_flow_chat.process_message(message_data)
elif global_config.response_mode == "reasoning":
await self.reasoning_chat.process_message(message_data)
else:
logger.error(f"未知的回复模式,请检查配置文件!!: {global_config.response_mode}")
except Exception as e:
logger.error(f"处理PFC消息失败: {e}")
else:
if groupinfo is None and global_config.enable_friend_chat:
# 私聊处理流程
# await self._handle_private_chat(message)
if global_config.response_mode == "heart_flow":
await self.think_flow_chat.process_message(message_data)
elif global_config.response_mode == "reasoning":
await self.reasoning_chat.process_message(message_data)
else:
logger.error(f"未知的回复模式,请检查配置文件!!: {global_config.response_mode}")
else: # 群聊处理
if groupinfo.group_id in global_config.talk_allowed_groups:
if global_config.response_mode == "heart_flow": if global_config.response_mode == "heart_flow":
await self.think_flow_chat.process_message(message_data) await self.think_flow_chat.process_message(message_data)
elif global_config.response_mode == "reasoning": elif global_config.response_mode == "reasoning":

View File

@@ -137,6 +137,7 @@ class ChatManager:
ChatStream: 聊天流对象 ChatStream: 聊天流对象
""" """
# 生成stream_id # 生成stream_id
try:
stream_id = self._generate_stream_id(platform, user_info, group_info) stream_id = self._generate_stream_id(platform, user_info, group_info)
# 检查内存中是否存在 # 检查内存中是否存在
@@ -167,6 +168,9 @@ class ChatManager:
user_info=user_info, user_info=user_info,
group_info=group_info, group_info=group_info,
) )
except Exception as e:
logger.error(f"创建聊天流失败: {e}")
raise e
# 保存到内存和数据库 # 保存到内存和数据库
self.streams[stream_id] = stream self.streams[stream_id] = stream

View File

@@ -166,7 +166,7 @@ class ImageManager:
# 查询缓存的描述 # 查询缓存的描述
cached_description = self._get_description_from_db(image_hash, "image") cached_description = self._get_description_from_db(image_hash, "image")
if cached_description: if cached_description:
logger.info(f"图片描述缓存中 {cached_description}") logger.debug(f"图片描述缓存中 {cached_description}")
return f"[图片:{cached_description}]" return f"[图片:{cached_description}]"
# 调用AI获取描述 # 调用AI获取描述

View File

@@ -0,0 +1,69 @@
from typing import Optional
from src.common.logger import get_module_logger
from src.plugins.chat.message import MessageRecv
from src.plugins.chat.chat_stream import chat_manager
from src.plugins.storage.storage import MessageStorage
from src.plugins.config.config import global_config
import re
import asyncio
from datetime import datetime
logger = get_module_logger("pfc_message_processor")
class MessageProcessor:
"""消息处理器,负责处理接收到的消息并存储"""
def __init__(self):
self.storage = MessageStorage()
def _check_ban_words(self, text: str, chat, userinfo) -> bool:
"""检查消息中是否包含过滤词"""
for word in global_config.ban_words:
if word in text:
logger.info(
f"[{chat.group_info.group_name if chat.group_info else '私聊'}]{userinfo.user_nickname}:{text}"
)
logger.info(f"[过滤词识别]消息中含有{word}filtered")
return True
return False
def _check_ban_regex(self, text: str, chat, userinfo) -> bool:
"""检查消息是否匹配过滤正则表达式"""
for pattern in global_config.ban_msgs_regex:
if re.search(pattern, text):
logger.info(
f"[{chat.group_info.group_name if chat.group_info else '私聊'}]{userinfo.user_nickname}:{text}"
)
logger.info(f"[正则表达式过滤]消息匹配到{pattern}filtered")
return True
return False
async def process_message(self, message: MessageRecv) -> None:
"""处理消息并存储
Args:
message: 消息对象
"""
userinfo = message.message_info.user_info
chat = message.chat_stream
# 处理消息
await message.process()
# 过滤词/正则表达式过滤
if self._check_ban_words(message.processed_plain_text, chat, userinfo) or self._check_ban_regex(
message.raw_message, chat, userinfo
):
return
# 存储消息
await self.storage.store_message(message, chat)
# 打印消息信息
mes_name = chat.group_info.group_name if chat.group_info else "私聊"
# 将时间戳转换为datetime对象
current_time = datetime.fromtimestamp(message.message_info.time).strftime("%H:%M:%S")
logger.info(
f"[{current_time}][{mes_name}]"
f"{chat.user_info.user_nickname}: {message.processed_plain_text}"
)

View File

@@ -134,11 +134,6 @@ class ReasoningChat:
messageinfo = message.message_info messageinfo = message.message_info
if groupinfo == None and global_config.enable_friend_chat:#如果是私聊
pass
elif groupinfo.group_id not in global_config.talk_allowed_groups:
return
# logger.info("使用推理聊天模式") # logger.info("使用推理聊天模式")
# 创建聊天流 # 创建聊天流

View File

@@ -145,10 +145,6 @@ class ThinkFlowChat:
userinfo = message.message_info.user_info userinfo = message.message_info.user_info
messageinfo = message.message_info messageinfo = message.message_info
if groupinfo == None and global_config.enable_friend_chat:#如果是私聊
pass
elif groupinfo.group_id not in global_config.talk_allowed_groups:
return
# 创建聊天流 # 创建聊天流
chat = await chat_manager.get_or_create_stream( chat = await chat_manager.get_or_create_stream(
@@ -178,16 +174,15 @@ class ThinkFlowChat:
) )
timer2 = time.time() timer2 = time.time()
timing_results["记忆激活"] = timer2 - timer1 timing_results["记忆激活"] = timer2 - timer1
logger.debug(f"记忆激活: {interested_rate}")
is_mentioned = is_mentioned_bot_in_message(message) is_mentioned = is_mentioned_bot_in_message(message)
# 计算回复意愿 # 计算回复意愿
if global_config.enable_think_flow:
current_willing_old = willing_manager.get_willing(chat_stream=chat) current_willing_old = willing_manager.get_willing(chat_stream=chat)
current_willing_new = (heartflow.get_subheartflow(chat.stream_id).current_state.willing - 5) / 4 current_willing_new = (heartflow.get_subheartflow(chat.stream_id).current_state.willing - 5) / 4
current_willing = (current_willing_old + current_willing_new) / 2 current_willing = (current_willing_old + current_willing_new) / 2
else:
current_willing = willing_manager.get_willing(chat_stream=chat)
willing_manager.set_willing(chat.stream_id, current_willing) willing_manager.set_willing(chat.stream_id, current_willing)
@@ -203,6 +198,7 @@ class ThinkFlowChat:
) )
timer2 = time.time() timer2 = time.time()
timing_results["意愿激活"] = timer2 - timer1 timing_results["意愿激活"] = timer2 - timer1
logger.debug(f"意愿激活: {reply_probability}")
# 打印消息信息 # 打印消息信息
mes_name = chat.group_info.group_name if chat.group_info else "私聊" mes_name = chat.group_info.group_name if chat.group_info else "私聊"

View File

@@ -24,8 +24,8 @@ config_config = LogConfig(
logger = get_module_logger("config", config=config_config) logger = get_module_logger("config", config=config_config)
#考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码 #考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
mai_version_main = "0.6.0" mai_version_main = "test-0.6.0"
mai_version_fix = "mmc-4" mai_version_fix = "snapshot-7"
mai_version = f"{mai_version_main}-{mai_version_fix}" mai_version = f"{mai_version_main}-{mai_version_fix}"
def update_config(): def update_config():
@@ -230,7 +230,8 @@ class BotConfig:
# experimental # experimental
enable_friend_chat: bool = False # 是否启用好友聊天 enable_friend_chat: bool = False # 是否启用好友聊天
enable_think_flow: bool = False # 是否启用思考流程 # enable_think_flow: bool = False # 是否启用思考流程
enable_pfc_chatting: bool = False # 是否启用PFC聊天
# 模型配置 # 模型配置
llm_reasoning: Dict[str, str] = field(default_factory=lambda: {}) llm_reasoning: Dict[str, str] = field(default_factory=lambda: {})
@@ -333,7 +334,7 @@ class BotConfig:
personality_config = parent["personality"] personality_config = parent["personality"]
personality = personality_config.get("prompt_personality") personality = personality_config.get("prompt_personality")
if len(personality) >= 2: if len(personality) >= 2:
logger.debug(f"载入自定义人格:{personality}") logger.info(f"载入自定义人格:{personality}")
config.PROMPT_PERSONALITY = personality_config.get("prompt_personality", config.PROMPT_PERSONALITY) config.PROMPT_PERSONALITY = personality_config.get("prompt_personality", config.PROMPT_PERSONALITY)
config.PERSONALITY_1 = personality_config.get("personality_1_probability", config.PERSONALITY_1) config.PERSONALITY_1 = personality_config.get("personality_1_probability", config.PERSONALITY_1)
@@ -563,7 +564,9 @@ class BotConfig:
def experimental(parent: dict): def experimental(parent: dict):
experimental_config = parent["experimental"] experimental_config = parent["experimental"]
config.enable_friend_chat = experimental_config.get("enable_friend_chat", config.enable_friend_chat) config.enable_friend_chat = experimental_config.get("enable_friend_chat", config.enable_friend_chat)
config.enable_think_flow = experimental_config.get("enable_think_flow", config.enable_think_flow) # config.enable_think_flow = experimental_config.get("enable_think_flow", config.enable_think_flow)
if config.INNER_VERSION in SpecifierSet(">=1.1.0"):
config.enable_pfc_chatting = experimental_config.get("pfc_chatting", config.enable_pfc_chatting)
# 版本表达式:>=1.0.0,<2.0.0 # 版本表达式:>=1.0.0,<2.0.0
# 允许字段func: method, support: str, notice: str, necessary: bool # 允许字段func: method, support: str, notice: str, necessary: bool

View File

@@ -1,5 +1,5 @@
[inner] [inner]
version = "1.0.4" version = "1.1.0"
#以下是给开发人员阅读的,一般用户不需要阅读 #以下是给开发人员阅读的,一般用户不需要阅读
@@ -149,6 +149,7 @@ enable = true
[experimental] [experimental]
enable_friend_chat = false # 是否启用好友聊天 enable_friend_chat = false # 是否启用好友聊天
pfc_chatting = false # 是否启用PFC聊天
#下面的模型若使用硅基流动则不需要更改使用ds官方则改成.env自定义的宏使用自定义模型则选择定位相似的模型自己填写 #下面的模型若使用硅基流动则不需要更改使用ds官方则改成.env自定义的宏使用自定义模型则选择定位相似的模型自己填写
#推理模型 #推理模型