Merge pull request #945 from MaiM-with-u/HFC-para

新HfC分支合并 草稿pr
This commit is contained in:
SengokuCola
2025-05-13 22:31:15 +08:00
committed by GitHub
155 changed files with 4609 additions and 4026 deletions

5
.gitignore vendored
View File

@@ -18,6 +18,7 @@ MaiBot-Napcat-Adapter
nonebot-maibot-adapter/
*.zip
run.bat
log_debug/
run_none.bat
run.py
message_queue_content.txt
@@ -26,8 +27,8 @@ message_queue_window.bat
message_queue_window.txt
queue_update.txt
memory_graph.gml
/src/do_tool/tool_can_use/auto_create_tool.py
/src/do_tool/tool_can_use/execute_python_code_tool.py
/src/tools/tool_can_use/auto_create_tool.py
/src/tools/tool_can_use/execute_python_code_tool.py
.env
.env.*
.cursor

2
run_voice.bat Normal file
View File

@@ -0,0 +1,2 @@
@echo off
start "Voice Adapter" cmd /k "call conda activate maicore && cd /d C:\GitHub\maimbot_tts_adapter && echo Running Napcat Adapter... && python maimbot_pipeline.py"

View File

@@ -10,13 +10,13 @@ from time import sleep
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from src.plugins.knowledge.src.lpmmconfig import PG_NAMESPACE, global_config
from src.plugins.knowledge.src.embedding_store import EmbeddingManager
from src.plugins.knowledge.src.llm_client import LLMClient
from src.plugins.knowledge.src.open_ie import OpenIE
from src.plugins.knowledge.src.kg_manager import KGManager
from src.chat.knowledge.src.lpmmconfig import PG_NAMESPACE, global_config
from src.chat.knowledge.src.embedding_store import EmbeddingManager
from src.chat.knowledge.src.llm_client import LLMClient
from src.chat.knowledge.src.open_ie import OpenIE
from src.chat.knowledge.src.kg_manager import KGManager
from src.common.logger import get_module_logger
from src.plugins.knowledge.src.utils.hash import get_sha256
from src.chat.knowledge.src.utils.hash import get_sha256
# 添加项目根目录到 sys.path

View File

@@ -13,11 +13,11 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from rich.progress import Progress # 替换为 rich 进度条
from src.common.logger import get_module_logger
from src.plugins.knowledge.src.lpmmconfig import global_config
from src.plugins.knowledge.src.ie_process import info_extract_from_str
from src.plugins.knowledge.src.llm_client import LLMClient
from src.plugins.knowledge.src.open_ie import OpenIE
from src.plugins.knowledge.src.raw_processing import load_raw_data
from src.chat.knowledge.src.lpmmconfig import global_config
from src.chat.knowledge.src.ie_process import info_extract_from_str
from src.chat.knowledge.src.llm_client import LLMClient
from src.chat.knowledge.src.open_ie import OpenIE
from src.chat.knowledge.src.raw_processing import load_raw_data
from rich.progress import (
BarColumn,
TimeElapsedColumn,

View File

@@ -6,7 +6,7 @@ import datetime # 新增导入
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from src.common.logger_manager import get_logger
from src.plugins.knowledge.src.lpmmconfig import global_config
from src.chat.knowledge.src.lpmmconfig import global_config
logger = get_logger("lpmm")
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))

View File

@@ -24,14 +24,6 @@
- 目标:提升 `HeartFlowChatInstance` (HFC) 回复的多样性、一致性和真实感。
- 前置:需要重构 Prompt 构建逻辑,可能引入 `PromptBuilder` 并提供标准接口 (认为是必须步骤)。
- **扩展观察系统 (Observation System)**:
- 目前主要依赖 `ChattingObservation` 获取消息。
- 计划引入更多 `Observation` 类型,为 `SubHeartflow` 提供更丰富的上下文:
- Mai 的全局状态 (`MaiStateInfo`)。
- `SubHeartflow` 自身的聊天状态 (`ChatStateInfo`) 和参数配置。
- Mai 的系统配置、连接平台信息。
- 其他相关聊天或系统的聚合信息。
- 目标:让 `SubHeartflow` 基于更全面的信息进行决策。
- **增强工具调用能力 (Enhanced Tool Usage)**:
- 扩展 `HeartFlowChatInstance` (HFC) 可用的工具集。
@@ -59,13 +51,6 @@
- 让 LLM 分析提供的文本材料(如小说、背景故事)来提取人格特质和相关信息。
- **优势**: 替代易出错且标准不一的手动配置,生成更丰富、一致、包含配套资源且易于系统理解和应用的人格包。
- **优化表情包处理与理解 (Enhanced Emoji Handling and Understanding)**:
- **面临挑战**:
- **历史记录表示**: 如何在聊天历史中有效表示表情包,供 LLM 理解。
- **语义理解**: 如何让 LLM 准确把握表情包的含义、情感和语境。
- **场景判断与选择**: 如何让 LLM 判断何时适合使用表情包,并选择最贴切的一个。
- **目标**: 提升 Mai 理解和运用表情包的能力,使交互更自然生动。
- **说明**: 可能需要较多时间进行数据处理和模型调优,但对改善体验潜力巨大。
- **探索高级记忆检索机制 (GE 系统概念):**
- 研究超越简单关键词/近期性检索的记忆模型。

View File

@@ -1,16 +0,0 @@
MaiCore/MaiBot 0.6路线图 draft
0.6.3解决0.6.x版本核心问题改进功能
主要功能加入
LPMM全面替代旧知识库
采用新的HFC回复模式取代旧心流
合并推理模式和心流模式,根据麦麦自己决策回复模式
提供新的表情包系统
0.6.4:提升用户体验,交互优化
加入webui
提供麦麦 API
修复prompt建构的各种问题
修复各种bug
调整代码文件结构,重构部分落后设计

View File

@@ -1,5 +1,5 @@
from src.heart_flow.heartflow import heartflow
from src.heart_flow.sub_heartflow import ChatState
from src.chat.heart_flow.heartflow import heartflow
from src.chat.heart_flow.sub_heartflow import ChatState
from src.common.logger_manager import get_logger
logger = get_logger("api")

View File

@@ -34,14 +34,6 @@ class APIBotConfig:
gender: str # 性别
appearance: str # 外貌特征描述
# schedule
enable_schedule_gen: bool # 是否启用日程表
enable_schedule_interaction: bool # 日程表是否影响回复模式
prompt_schedule_gen: str # 日程生成提示词
schedule_doing_update_interval: int # 日程表更新间隔(秒)
schedule_temperature: float # 日程表温度
time_zone: str # 时区
# platforms
platforms: Dict[str, str] # 平台信息
@@ -164,7 +156,6 @@ class APIBotConfig:
"groups",
"personality",
"identity",
"schedule",
"platforms",
"chat",
"normal_chat",

View File

@@ -3,7 +3,7 @@ from strawberry.fastapi import GraphQLRouter
import os
import sys
# from src.heart_flow.heartflow import heartflow
# from src.chat.heart_flow.heartflow import heartflow
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")))
# from src.config.config import BotConfig
from src.common.logger_manager import get_logger
@@ -15,7 +15,7 @@ from src.api.apiforgui import (
get_subheartflow_cycle_info,
get_all_states,
)
from src.heart_flow.sub_heartflow import ChatState
from src.chat.heart_flow.sub_heartflow import ChatState
from src.api.basic_info_api import get_all_basic_info # 新增导入
# import uvicorn

17
src/chat/__init__.py Normal file
View File

@@ -0,0 +1,17 @@
"""
MaiMBot插件系统
包含聊天、情绪、记忆、日程等功能模块
"""
from src.chat.message_receive.chat_stream import chat_manager
from src.chat.emoji_system.emoji_manager import emoji_manager
from src.chat.person_info.relationship_manager import relationship_manager
from src.chat.normal_chat.willing.willing_manager import willing_manager
# 导出主要组件供外部使用
__all__ = [
"chat_manager",
"emoji_manager",
"relationship_manager",
"willing_manager",
]

View File

@@ -12,7 +12,7 @@ import re
from ...common.database import db
from ...config.config import global_config
from ..chat.utils_image import image_path_to_base64, image_manager
from ..utils.utils_image import image_path_to_base64, image_manager
from ..models.utils_model import LLMRequest
from src.common.logger_manager import get_logger
from rich.traceback import install

View File

@@ -0,0 +1,216 @@
import os
import time
from typing import List, Dict, Any, Tuple
from src.chat.focus_chat.heartFC_Cycleinfo import CycleInfo
from src.common.logger_manager import get_logger
logger = get_logger("cycle_analyzer")
class CycleAnalyzer:
"""循环信息分析类提供查询和分析CycleInfo的工具"""
def __init__(self, base_dir: str = "log_debug"):
"""
初始化分析器
参数:
base_dir: 存储CycleInfo的基础目录默认为log_debug
"""
self.base_dir = base_dir
def list_streams(self) -> List[str]:
"""
获取所有聊天流ID列表
返回:
List[str]: 聊天流ID列表
"""
try:
if not os.path.exists(self.base_dir):
return []
return [d for d in os.listdir(self.base_dir) if os.path.isdir(os.path.join(self.base_dir, d))]
except Exception as e:
logger.error(f"获取聊天流列表时出错: {e}")
return []
def get_stream_cycle_count(self, stream_id: str) -> int:
"""
获取指定聊天流的循环数量
参数:
stream_id: 聊天流ID
返回:
int: 循环数量
"""
try:
files = CycleInfo.list_cycles(stream_id, self.base_dir)
return len(files)
except Exception as e:
logger.error(f"获取聊天流循环数量时出错: {e}")
return 0
def get_stream_cycles(self, stream_id: str, start: int = 0, limit: int = -1) -> List[str]:
"""
获取指定聊天流的循环文件列表
参数:
stream_id: 聊天流ID
start: 起始索引默认为0
limit: 返回的最大数量,默认为-1全部
返回:
List[str]: 循环文件路径列表
"""
try:
files = CycleInfo.list_cycles(stream_id, self.base_dir)
if limit < 0:
return files[start:]
else:
return files[start : start + limit]
except Exception as e:
logger.error(f"获取聊天流循环文件列表时出错: {e}")
return []
def get_cycle_content(self, filepath: str) -> str:
"""
获取循环文件的内容
参数:
filepath: 文件路径
返回:
str: 文件内容
"""
try:
if not os.path.exists(filepath):
return f"文件不存在: {filepath}"
with open(filepath, "r", encoding="utf-8") as f:
return f.read()
except Exception as e:
logger.error(f"读取循环文件内容时出错: {e}")
return f"读取文件出错: {e}"
def analyze_stream_cycles(self, stream_id: str) -> Dict[str, Any]:
"""
分析指定聊天流的所有循环,生成统计信息
参数:
stream_id: 聊天流ID
返回:
Dict[str, Any]: 统计信息
"""
try:
files = CycleInfo.list_cycles(stream_id, self.base_dir)
if not files:
return {"error": "没有找到循环记录"}
total_cycles = len(files)
action_counts = {"text_reply": 0, "emoji_reply": 0, "no_reply": 0, "unknown": 0}
total_duration = 0
tool_usage = {}
for filepath in files:
with open(filepath, "r", encoding="utf-8") as f:
content = f.read()
# 解析动作类型
for line in content.split("\n"):
if line.startswith("动作:"):
action = line[3:].strip()
action_counts[action] = action_counts.get(action, 0) + 1
# 解析耗时
elif line.startswith("耗时:"):
try:
duration = float(line[3:].strip().split("")[0])
total_duration += duration
except Exception as e:
logger.error(f"解析耗时时出错: {e}")
pass
# 解析工具使用
elif line.startswith("使用的工具:"):
tools = line[6:].strip().split(", ")
for tool in tools:
tool_usage[tool] = tool_usage.get(tool, 0) + 1
avg_duration = total_duration / total_cycles if total_cycles > 0 else 0
return {
"总循环数": total_cycles,
"动作统计": action_counts,
"平均耗时": f"{avg_duration:.2f}",
"总耗时": f"{total_duration:.2f}",
"工具使用次数": tool_usage,
}
except Exception as e:
logger.error(f"分析聊天流循环时出错: {e}")
return {"error": f"分析出错: {e}"}
def get_latest_cycles(self, count: int = 10) -> List[Tuple[str, str]]:
"""
获取所有聊天流中最新的几个循环
参数:
count: 获取的数量默认为10
返回:
List[Tuple[str, str]]: 聊天流ID和文件路径的元组列表
"""
try:
all_cycles = []
streams = self.list_streams()
for stream_id in streams:
files = CycleInfo.list_cycles(stream_id, self.base_dir)
for filepath in files:
try:
# 从文件名中提取时间戳
filename = os.path.basename(filepath)
timestamp_str = filename.split("_", 2)[2].split(".")[0]
timestamp = time.mktime(time.strptime(timestamp_str, "%Y%m%d_%H%M%S"))
all_cycles.append((timestamp, stream_id, filepath))
except Exception as e:
logger.error(f"从文件名中提取时间戳时出错: {e}")
continue
# 按时间戳排序取最新的count个
all_cycles.sort(reverse=True)
return [(item[1], item[2]) for item in all_cycles[:count]]
except Exception as e:
logger.error(f"获取最新循环时出错: {e}")
return []
# 使用示例
if __name__ == "__main__":
analyzer = CycleAnalyzer()
# 列出所有聊天流
streams = analyzer.list_streams()
print(f"找到 {len(streams)} 个聊天流: {streams}")
# 分析第一个聊天流的循环
if streams:
stream_id = streams[0]
stats = analyzer.analyze_stream_cycles(stream_id)
print(f"\n聊天流 {stream_id} 的统计信息:")
for key, value in stats.items():
print(f" {key}: {value}")
# 获取最新的循环
cycles = analyzer.get_stream_cycles(stream_id, limit=1)
if cycles:
print("\n最新循环内容:")
print(analyzer.get_cycle_content(cycles[0]))
# 获取所有聊天流中最新的3个循环
latest_cycles = analyzer.get_latest_cycles(3)
print(f"\n所有聊天流中最新的 {len(latest_cycles)} 个循环:")
for stream_id, filepath in latest_cycles:
print(f" 聊天流 {stream_id}: {os.path.basename(filepath)}")

View File

@@ -0,0 +1,345 @@
import traceback
from typing import List, Optional, Dict, Any, Tuple
from src.chat.message_receive.message import MessageRecv, MessageThinking, MessageSending
from src.chat.message_receive.message import Seg # Local import needed after move
from src.chat.message_receive.message import UserInfo
from src.chat.message_receive.chat_stream import chat_manager
from src.common.logger_manager import get_logger
from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config
from src.chat.utils.utils_image import image_path_to_base64 # Local import needed after move
from src.chat.utils.timer_calculator import Timer # <--- Import Timer
from src.chat.emoji_system.emoji_manager import emoji_manager
from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder
from src.chat.focus_chat.heartFC_sender import HeartFCSender
from src.chat.utils.utils import process_llm_response
from src.chat.utils.info_catcher import info_catcher_manager
from src.manager.mood_manager import mood_manager
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp
logger = get_logger("expressor")
class DefaultExpressor:
def __init__(self, chat_id: str):
self.log_prefix = "expressor"
self.express_model = LLMRequest(
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=256,
request_type="response_heartflow",
)
self.heart_fc_sender = HeartFCSender()
self.chat_id = chat_id
self.chat_stream: Optional[ChatStream] = None
self.is_group_chat = True
self.chat_target_info = None
async def initialize(self):
self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id)
async def _create_thinking_message(self, anchor_message: Optional[MessageRecv], thinking_id: str):
"""创建思考消息 (尝试锚定到 anchor_message)"""
if not anchor_message or not anchor_message.chat_stream:
logger.error(f"{self.log_prefix} 无法创建思考消息,缺少有效的锚点消息或聊天流。")
return None
chat = anchor_message.chat_stream
messageinfo = anchor_message.message_info
thinking_time_point = parse_thinking_id_to_timestamp(thinking_id)
bot_user_info = UserInfo(
user_id=global_config.BOT_QQ,
user_nickname=global_config.BOT_NICKNAME,
platform=messageinfo.platform,
)
# logger.debug(f"创建思考消息:{anchor_message}")
# logger.debug(f"创建思考消息chat{chat}")
# logger.debug(f"创建思考消息bot_user_info{bot_user_info}")
# logger.debug(f"创建思考消息messageinfo{messageinfo}")
thinking_message = MessageThinking(
message_id=thinking_id,
chat_stream=chat,
bot_user_info=bot_user_info,
reply=anchor_message, # 回复的是锚点消息
thinking_start_time=thinking_time_point,
)
logger.debug(f"创建思考消息thinking_message{thinking_message}")
await self.heart_fc_sender.register_thinking(thinking_message)
async def deal_reply(
self,
cycle_timers: dict,
action_data: Dict[str, Any],
reasoning: str,
anchor_message: MessageRecv,
thinking_id: str,
) -> tuple[bool, Optional[List[Tuple[str, str]]]]:
# 创建思考消息
await self._create_thinking_message(anchor_message, thinking_id)
reply = None # 初始化 reply防止未定义
try:
has_sent_something = False
# 处理文本部分
text_part = action_data.get("text", [])
if text_part:
with Timer("生成回复", cycle_timers):
# 可以保留原有的文本处理逻辑或进行适当调整
reply = await self.express(
in_mind_reply=text_part,
anchor_message=anchor_message,
thinking_id=thinking_id,
reason=reasoning,
action_data=action_data,
)
with Timer("选择表情", cycle_timers):
emoji_keyword = action_data.get("emojis", [])
emoji_base64 = await self._choose_emoji(emoji_keyword)
if emoji_base64:
reply.append(("emoji", emoji_base64))
if reply:
with Timer("发送消息", cycle_timers):
await self._send_response_messages(
anchor_message=anchor_message,
thinking_id=thinking_id,
response_set=reply,
)
has_sent_something = True
else:
logger.warning(f"{self.log_prefix} 文本回复生成失败")
if not has_sent_something:
logger.warning(f"{self.log_prefix} 回复动作未包含任何有效内容")
return has_sent_something, reply
except Exception as e:
logger.error(f"回复失败: {e}")
return False, None
# --- 回复器 (Replier) 的定义 --- #
async def express(
self,
in_mind_reply: str,
reason: str,
anchor_message: MessageRecv,
thinking_id: str,
action_data: Dict[str, Any],
) -> Optional[List[str]]:
"""
回复器 (Replier): 核心逻辑,负责生成回复文本。
(已整合原 HeartFCGenerator 的功能)
"""
try:
# 1. 获取情绪影响因子并调整模型温度
arousal_multiplier = mood_manager.get_arousal_multiplier()
current_temp = float(global_config.llm_normal["temp"]) * arousal_multiplier
self.express_model.params["temperature"] = current_temp # 动态调整温度
# 2. 获取信息捕捉器
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
# --- Determine sender_name for private chat ---
sender_name_for_prompt = "某人" # Default for group or if info unavailable
if not self.is_group_chat and self.chat_target_info:
# Prioritize person_name, then nickname
sender_name_for_prompt = (
self.chat_target_info.get("person_name")
or self.chat_target_info.get("user_nickname")
or sender_name_for_prompt
)
# --- End determining sender_name ---
target_message = action_data.get("target", "")
# 3. 构建 Prompt
with Timer("构建Prompt", {}): # 内部计时器,可选保留
prompt = await prompt_builder.build_prompt(
build_mode="focus",
chat_stream=self.chat_stream, # Pass the stream object
in_mind_reply=in_mind_reply,
reason=reason,
current_mind_info="",
structured_info="",
sender_name=sender_name_for_prompt, # Pass determined name
target_message=target_message,
)
# 4. 调用 LLM 生成回复
content = None
reasoning_content = None
model_name = "unknown_model"
if not prompt:
logger.error(f"{self.log_prefix}[Replier-{thinking_id}] Prompt 构建失败,无法生成回复。")
return None
try:
with Timer("LLM生成", {}): # 内部计时器,可选保留
# logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n")
content, reasoning_content, model_name = await self.express_model.generate_response(prompt)
logger.info(f"{self.log_prefix}\nPrompt:\n{prompt}\n---------------------------\n")
logger.info(f"想要表达:{in_mind_reply}")
logger.info(f"理由:{reason}")
logger.info(f"生成回复: {content}\n")
info_catcher.catch_after_llm_generated(
prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=model_name
)
except Exception as llm_e:
# 精简报错信息
logger.error(f"{self.log_prefix}LLM 生成失败: {llm_e}")
return None # LLM 调用失败则无法生成回复
processed_response = process_llm_response(content)
# 5. 处理 LLM 响应
if not content:
logger.warning(f"{self.log_prefix}LLM 生成了空内容。")
return None
if not processed_response:
logger.warning(f"{self.log_prefix}处理后的回复为空。")
return None
reply_set = []
for str in processed_response:
reply_seg = ("text", str)
reply_set.append(reply_seg)
return reply_set
except Exception as e:
logger.error(f"{self.log_prefix}回复生成意外失败: {e}")
traceback.print_exc()
return None
# --- 发送器 (Sender) --- #
async def _send_response_messages(
self, anchor_message: Optional[MessageRecv], response_set: List[Tuple[str, str]], thinking_id: str
) -> Optional[MessageSending]:
"""发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender"""
chat = self.chat_stream
chat_id = self.chat_id
if chat is None:
logger.error(f"{self.log_prefix} 无法发送回复chat_stream 为空。")
return None
if not anchor_message:
logger.error(f"{self.log_prefix} 无法发送回复anchor_message 为空。")
return None
stream_name = chat_manager.get_stream_name(chat_id) or chat_id # 获取流名称用于日志
# 检查思考过程是否仍在进行,并获取开始时间
thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(chat_id, thinking_id)
if thinking_start_time is None:
logger.error(f"[{stream_name}]思考过程未找到或已结束,无法发送回复。")
return None
mark_head = False
first_bot_msg: Optional[MessageSending] = None
reply_message_ids = [] # 记录实际发送的消息ID
for i, msg_text in enumerate(response_set):
# 为每个消息片段生成唯一ID
type = msg_text[0]
data = msg_text[1]
part_message_id = f"{thinking_id}_{i}"
message_segment = Seg(type=type, data=data)
if type == "emoji":
is_emoji = True
else:
is_emoji = False
reply_to = not mark_head
bot_message = await self._build_single_sending_message(
anchor_message=anchor_message,
message_id=part_message_id,
message_segment=message_segment,
reply_to=reply_to,
is_emoji=is_emoji,
thinking_id=thinking_id,
)
try:
if not mark_head:
mark_head = True
first_bot_msg = bot_message # 保存第一个成功发送的消息对象
typing = False
else:
typing = True
if type == "emoji":
typing = False
await self.heart_fc_sender.send_message(bot_message, has_thinking=True, typing=typing)
reply_message_ids.append(part_message_id) # 记录我们生成的ID
except Exception as e:
logger.error(f"{self.log_prefix}发送回复片段 {i} ({part_message_id}) 时失败: {e}")
# 这里可以选择是继续发送下一个片段还是中止
# 在尝试发送完所有片段后,完成原始的 thinking_id 状态
try:
await self.heart_fc_sender.complete_thinking(chat_id, thinking_id)
except Exception as e:
logger.error(f"{self.log_prefix}完成思考状态 {thinking_id} 时出错: {e}")
return first_bot_msg # 返回第一个成功发送的消息对象
async def _choose_emoji(self, send_emoji: str):
"""
选择表情根据send_emoji文本选择表情返回表情base64
"""
emoji_base64 = ""
emoji_raw = await emoji_manager.get_emoji_for_text(send_emoji)
if emoji_raw:
emoji_path, _description = emoji_raw
emoji_base64 = image_path_to_base64(emoji_path)
return emoji_base64
async def _build_single_sending_message(
self,
anchor_message: MessageRecv,
message_id: str,
message_segment: Seg,
reply_to: bool,
is_emoji: bool,
thinking_id: str,
) -> MessageSending:
"""构建单个发送消息"""
thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(self.chat_id, thinking_id)
bot_user_info = UserInfo(
user_id=global_config.BOT_QQ,
user_nickname=global_config.BOT_NICKNAME,
platform=self.chat_stream.platform,
)
bot_message = MessageSending(
message_id=message_id, # 使用片段的唯一ID
chat_stream=self.chat_stream,
bot_user_info=bot_user_info,
sender_info=anchor_message.message_info.user_info,
message_segment=message_segment,
reply=anchor_message, # 回复原始锚点
is_head=reply_to,
is_emoji=is_emoji,
thinking_start_time=thinking_start_time, # 传递原始思考开始时间
)
return bot_message

View File

@@ -0,0 +1,320 @@
import time
import random
from typing import List, Dict, Optional, Any, Tuple
from src.common.logger_manager import get_logger
from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config
from src.chat.utils.chat_message_builder import get_raw_msg_by_timestamp_random, build_readable_messages
from src.chat.focus_chat.heartflow_prompt_builder import Prompt, global_prompt_manager
import os
import json
MAX_EXPRESSION_COUNT = 300
logger = get_logger("expressor")
def init_prompt() -> None:
learn_style_prompt = """
{chat_str}
请从上面这段群聊中概括除了人名为"麦麦"之外的人的语言风格,只考虑文字,不要考虑表情包和图片
不要涉及具体的人名,只考虑语言风格
语言风格包含特殊内容和情感
思考有没有特殊的梗,一并总结成语言风格
总结成如下格式的规律,总结的内容要详细,但具有概括性:
"xxx"时,可以"xxx", xxx不超过10个字
例如:
"表示十分惊叹"时,使用"我嘞个xxxx"
"表示讽刺的赞同,不想讲道理"时,使用"对对对"
"想说明某个观点,但懒得明说",使用"懂的都懂"
注意不要总结你自己的发言
现在请你概括
"""
Prompt(learn_style_prompt, "learn_style_prompt")
personality_expression_prompt = """
{personality}
请从以上人设中总结出这个角色可能的语言风格
思考回复的特殊内容和情感
思考有没有特殊的梗,一并总结成语言风格
总结成如下格式的规律,总结的内容要详细,但具有概括性:
"xxx"时,可以"xxx", xxx不超过10个字
例如:
"表示十分惊叹"时,使用"我嘞个xxxx"
"表示讽刺的赞同,不想讲道理"时,使用"对对对"
"想说明某个观点,但懒得明说",使用"懂的都懂"
现在请你概括
"""
Prompt(personality_expression_prompt, "personality_expression_prompt")
learn_grammar_prompt = """
{chat_str}
请从上面这段群聊中概括除了人名为"麦麦"之外的人的语法和句法特点,只考虑纯文字,不要考虑表情包和图片
不要总结【图片】,【动画表情】,[图片][动画表情],不总结 表情符号 at @ 回复 和[回复]
不要涉及具体的人名,只考虑语法和句法特点,
语法和句法特点要包括,句子长短(具体字数),有何种语病,如何拆分句子。
总结成如下格式的规律,总结的内容要简洁,不浮夸:
"xxx"时,可以"xxx"
例如:
"表达观点较复杂"时,使用"省略主语"的句法
"不用详细说明的一般表达"时,使用"非常简洁的句子"的句法
"需要单纯简单的确认"时,使用"单字或几个字的肯定"的句法
注意不要总结你自己的发言
现在请你概括
"""
Prompt(learn_grammar_prompt, "learn_grammar_prompt")
class ExpressionLearner:
def __init__(self) -> None:
self.express_learn_model: LLMRequest = LLMRequest(
model=global_config.llm_normal,
temperature=0.1,
max_tokens=256,
request_type="response_heartflow",
)
async def get_expression_by_chat_id(self, chat_id: str) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
"""
读取/data/expression/learnt/{chat_id}/expressions.json和/data/expression/personality/expressions.json
返回(learnt_expressions, personality_expressions)
"""
learnt_style_file = os.path.join("data", "expression", "learnt_style", str(chat_id), "expressions.json")
learnt_grammar_file = os.path.join("data", "expression", "learnt_grammar", str(chat_id), "expressions.json")
personality_file = os.path.join("data", "expression", "personality", "expressions.json")
learnt_style_expressions = []
learnt_grammar_expressions = []
personality_expressions = []
if os.path.exists(learnt_style_file):
with open(learnt_style_file, "r", encoding="utf-8") as f:
learnt_style_expressions = json.load(f)
if os.path.exists(learnt_grammar_file):
with open(learnt_grammar_file, "r", encoding="utf-8") as f:
learnt_grammar_expressions = json.load(f)
if os.path.exists(personality_file):
with open(personality_file, "r", encoding="utf-8") as f:
personality_expressions = json.load(f)
return learnt_style_expressions, learnt_grammar_expressions, personality_expressions
def is_similar(self, s1: str, s2: str) -> bool:
"""
判断两个字符串是否相似只考虑长度大于5且有80%以上重合,不考虑子串)
"""
if not s1 or not s2:
return False
min_len = min(len(s1), len(s2))
if min_len < 5:
return False
same = sum(1 for a, b in zip(s1, s2) if a == b)
return same / min_len > 0.8
async def learn_and_store_expression(self) -> List[Tuple[str, str, str]]:
"""
学习并存储表达方式,分别学习语言风格和句法特点
"""
learnt_style: Optional[List[Tuple[str, str, str]]] = await self.learn_and_store(type="style", num=3)
if not learnt_style:
return []
learnt_grammar: Optional[List[Tuple[str, str, str]]] = await self.learn_and_store(type="grammar", num=2)
if not learnt_grammar:
return []
return learnt_style, learnt_grammar
async def learn_and_store(self, type: str, num: int = 10) -> List[Tuple[str, str, str]]:
"""
选择从当前到最近1小时内的随机num条消息然后学习这些消息的表达方式
type: "style" or "grammar"
"""
if type == "style":
type_str = "语言风格"
elif type == "grammar":
type_str = "句法特点"
else:
raise ValueError(f"Invalid type: {type}")
logger.info(f"开始学习{type_str}...")
learnt_expressions: Optional[List[Tuple[str, str, str]]] = await self.learn_expression(type, num)
logger.info(f"学习到{len(learnt_expressions) if learnt_expressions else 0}{type_str}")
# learnt_expressions: List[(chat_id, situation, style)]
if not learnt_expressions:
logger.info(f"没有学习到{type_str}")
return []
# 按chat_id分组
chat_dict: Dict[str, List[Dict[str, str]]] = {}
for chat_id, situation, style in learnt_expressions:
if chat_id not in chat_dict:
chat_dict[chat_id] = []
chat_dict[chat_id].append({"situation": situation, "style": style})
# 存储到/data/expression/对应chat_id/expressions.json
for chat_id, expr_list in chat_dict.items():
dir_path = os.path.join("data", "expression", f"learnt_{type}", str(chat_id))
os.makedirs(dir_path, exist_ok=True)
file_path = os.path.join(dir_path, "expressions.json")
# 若已存在,先读出合并
if os.path.exists(file_path):
old_data: List[Dict[str, str, str]] = []
try:
with open(file_path, "r", encoding="utf-8") as f:
old_data = json.load(f)
except Exception:
old_data = []
else:
old_data = []
# 超过最大数量时20%概率移除count=1的项
if len(old_data) >= MAX_EXPRESSION_COUNT:
new_old_data = []
for item in old_data:
if item.get("count", 1) == 1 and random.random() < 0.2:
continue # 20%概率移除
new_old_data.append(item)
old_data = new_old_data
# 合并逻辑
for new_expr in expr_list:
found = False
for old_expr in old_data:
if self.is_similar(new_expr["situation"], old_expr.get("situation", "")) and self.is_similar(
new_expr["style"], old_expr.get("style", "")
):
found = True
# 50%概率替换
if random.random() < 0.5:
old_expr["situation"] = new_expr["situation"]
old_expr["style"] = new_expr["style"]
old_expr["count"] = old_expr.get("count", 1) + 1
break
if not found:
new_expr["count"] = 1
old_data.append(new_expr)
with open(file_path, "w", encoding="utf-8") as f:
json.dump(old_data, f, ensure_ascii=False, indent=2)
return learnt_expressions
async def learn_expression(self, type: str, num: int = 10) -> Optional[List[Tuple[str, str, str]]]:
"""选择从当前到最近1小时内的随机num条消息然后学习这些消息的表达方式
Args:
type: "style" or "grammar"
"""
if type == "style":
type_str = "语言风格"
prompt = "learn_style_prompt"
elif type == "grammar":
type_str = "句法特点"
prompt = "learn_grammar_prompt"
else:
raise ValueError(f"Invalid type: {type}")
current_time = time.time()
random_msg: Optional[List[Dict[str, Any]]] = get_raw_msg_by_timestamp_random(
current_time - 3600 * 24, current_time, limit=num
)
if not random_msg:
return None
# 转化成str
chat_id: str = random_msg[0]["chat_id"]
random_msg_str: str = await build_readable_messages(random_msg, timestamp_mode="normal")
prompt: str = await global_prompt_manager.format_prompt(
prompt,
chat_str=random_msg_str,
)
logger.debug(f"学习{type_str}的prompt: {prompt}")
try:
response, _ = await self.express_learn_model.generate_response_async(prompt)
except Exception as e:
logger.error(f"学习{type_str}失败: {e}")
return None
logger.debug(f"学习{type_str}的response: {response}")
expressions: List[Tuple[str, str, str]] = self.parse_expression_response(response, chat_id)
return expressions
def parse_expression_response(self, response: str, chat_id: str) -> List[Tuple[str, str, str]]:
"""
解析LLM返回的表达风格总结每一行提取"""使用"之间的内容,存储为(situation, style)元组
"""
expressions: List[Tuple[str, str, str]] = []
for line in response.splitlines():
line = line.strip()
if not line:
continue
# 查找"当"和下一个引号
idx_when = line.find('"')
if idx_when == -1:
continue
idx_quote1 = idx_when + 1
idx_quote2 = line.find('"', idx_quote1 + 1)
if idx_quote2 == -1:
continue
situation = line[idx_quote1 + 1 : idx_quote2]
# 查找"使用"
idx_use = line.find('使用"', idx_quote2)
if idx_use == -1:
continue
idx_quote3 = idx_use + 2
idx_quote4 = line.find('"', idx_quote3 + 1)
if idx_quote4 == -1:
continue
style = line[idx_quote3 + 1 : idx_quote4]
expressions.append((chat_id, situation, style))
return expressions
async def extract_and_store_personality_expressions(self):
"""
检查data/expression/personality目录不存在则创建。
用peronality变量作为chat_str调用LLM生成表达风格解析后count=100存储到expressions.json。
"""
dir_path = os.path.join("data", "expression", "personality")
os.makedirs(dir_path, exist_ok=True)
file_path = os.path.join(dir_path, "expressions.json")
# 构建prompt
prompt = await global_prompt_manager.format_prompt(
"personality_expression_prompt",
personality=global_config.expression_style,
)
logger.info(f"个性表达方式提取prompt: {prompt}")
try:
response, _ = await self.express_learn_model.generate_response_async(prompt)
except Exception as e:
logger.error(f"个性表达方式提取失败: {e}")
return
logger.info(f"个性表达方式提取response: {response}")
# chat_id用personality
expressions = self.parse_expression_response(response, "personality")
# 转为dict并count=100
result = []
for _, situation, style in expressions:
result.append({"situation": situation, "style": style, "count": 100})
# 超过50条时随机删除多余的只保留50条
if len(result) > 50:
remove_count = len(result) - 50
remove_indices = set(random.sample(range(len(result)), remove_count))
result = [item for idx, item in enumerate(result) if idx not in remove_indices]
with open(file_path, "w", encoding="utf-8") as f:
json.dump(result, f, ensure_ascii=False, indent=2)
logger.info(f"已写入{len(result)}条表达到{file_path}")
init_prompt()
expression_learner = ExpressionLearner()

View File

@@ -0,0 +1,307 @@
import time
import os
import json
from typing import List, Optional, Dict, Any
class CycleDetail:
"""循环信息记录类"""
def __init__(self, cycle_id: int):
self.cycle_id = cycle_id
self.start_time = time.time()
self.end_time: Optional[float] = None
self.action_taken = False
self.action_type = "unknown"
self.reasoning = ""
self.timers: Dict[str, float] = {}
self.thinking_id = ""
self.replanned = False
# 添加响应信息相关字段
self.response_info: Dict[str, Any] = {
"response_text": [], # 回复的文本列表
"emoji_info": "", # 表情信息
"anchor_message_id": "", # 锚点消息ID
"reply_message_ids": [], # 回复消息ID列表
"sub_mind_thinking": "", # 子思维思考内容
"in_mind_reply": [], # 子思维思考内容
}
# 添加SubMind相关信息
self.submind_info: Dict[str, Any] = {
"prompt": "", # SubMind输入的prompt
"structured_info": "", # 结构化信息
"result": "", # SubMind的思考结果
}
# 添加ToolUse相关信息
self.tooluse_info: Dict[str, Any] = {
"prompt": "", # 工具使用的prompt
"tools_used": [], # 使用了哪些工具
"tool_results": [], # 工具获得的信息
}
# 添加Planner相关信息
self.planner_info: Dict[str, Any] = {
"prompt": "", # 规划器的prompt
"response": "", # 规划器的原始回复
"parsed_result": {}, # 解析后的结果
}
def to_dict(self) -> Dict[str, Any]:
"""将循环信息转换为字典格式"""
return {
"cycle_id": self.cycle_id,
"start_time": self.start_time,
"end_time": self.end_time,
"action_taken": self.action_taken,
"action_type": self.action_type,
"reasoning": self.reasoning,
"timers": self.timers,
"thinking_id": self.thinking_id,
"response_info": self.response_info,
"submind_info": self.submind_info,
"tooluse_info": self.tooluse_info,
"planner_info": self.planner_info,
}
def complete_cycle(self):
"""完成循环,记录结束时间"""
self.end_time = time.time()
def set_action_info(
self, action_type: str, reasoning: str, action_taken: bool, action_data: Optional[Dict[str, Any]] = None
):
"""设置动作信息"""
self.action_type = action_type
self.action_data = action_data
self.reasoning = reasoning
self.action_taken = action_taken
def set_thinking_id(self, thinking_id: str):
"""设置思考消息ID"""
self.thinking_id = thinking_id
def set_response_info(
self,
response_text: Optional[List[str]] = None,
emoji_info: Optional[str] = None,
anchor_message_id: Optional[str] = None,
reply_message_ids: Optional[List[str]] = None,
sub_mind_thinking: Optional[str] = None,
):
"""设置响应信息"""
if response_text is not None:
self.response_info["response_text"] = response_text
if emoji_info is not None:
self.response_info["emoji_info"] = emoji_info
if anchor_message_id is not None:
self.response_info["anchor_message_id"] = anchor_message_id
if reply_message_ids is not None:
self.response_info["reply_message_ids"] = reply_message_ids
if sub_mind_thinking is not None:
self.response_info["sub_mind_thinking"] = sub_mind_thinking
def set_submind_info(
self,
prompt: Optional[str] = None,
structured_info: Optional[str] = None,
result: Optional[str] = None,
):
"""设置SubMind信息"""
if prompt is not None:
self.submind_info["prompt"] = prompt
if structured_info is not None:
self.submind_info["structured_info"] = structured_info
if result is not None:
self.submind_info["result"] = result
def set_tooluse_info(
self,
prompt: Optional[str] = None,
tools_used: Optional[List[str]] = None,
tool_results: Optional[List[Dict[str, Any]]] = None,
):
"""设置ToolUse信息"""
if prompt is not None:
self.tooluse_info["prompt"] = prompt
if tools_used is not None:
self.tooluse_info["tools_used"] = tools_used
if tool_results is not None:
self.tooluse_info["tool_results"] = tool_results
def set_planner_info(
self,
prompt: Optional[str] = None,
response: Optional[str] = None,
parsed_result: Optional[Dict[str, Any]] = None,
):
"""设置Planner信息"""
if prompt is not None:
self.planner_info["prompt"] = prompt
if response is not None:
self.planner_info["response"] = response
if parsed_result is not None:
self.planner_info["parsed_result"] = parsed_result
@staticmethod
def save_to_file(cycle_info: "CycleDetail", stream_id: str, base_dir: str = "log_debug") -> str:
"""
将CycleInfo保存到文件
参数:
cycle_info: CycleInfo对象
stream_id: 聊天流ID
base_dir: 基础目录默认为log_debug
返回:
str: 保存的文件路径
"""
try:
# 创建目录结构
stream_dir = os.path.join(base_dir, stream_id)
os.makedirs(stream_dir, exist_ok=True)
# 生成文件名和路径
timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime(cycle_info.start_time))
filename = f"cycle_{cycle_info.cycle_id}_{timestamp}.txt"
filepath = os.path.join(stream_dir, filename)
# 格式化输出成易读的格式
with open(filepath, "w", encoding="utf-8") as f:
# 写入基本信息
f.write(f"循环ID: {cycle_info.cycle_id}\n")
f.write(f"开始时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(cycle_info.start_time))}\n")
if cycle_info.end_time:
f.write(f"结束时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(cycle_info.end_time))}\n")
duration = cycle_info.end_time - cycle_info.start_time
f.write(f"耗时: {duration:.2f}\n")
f.write(f"动作: {cycle_info.action_type}\n")
f.write(f"原因: {cycle_info.reasoning}\n")
f.write(f"执行状态: {'已执行' if cycle_info.action_taken else '未执行'}\n")
f.write(f"思考ID: {cycle_info.thinking_id}\n")
f.write(f"是否为重新规划: {'' if cycle_info.replanned else ''}\n\n")
# 写入计时器信息
if cycle_info.timers:
f.write("== 计时器信息 ==\n")
for name, elapsed in cycle_info.timers.items():
formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}"
f.write(f"{name}: {formatted_time}\n")
f.write("\n")
# 写入响应信息
f.write("== 响应信息 ==\n")
f.write(f"锚点消息ID: {cycle_info.response_info['anchor_message_id']}\n")
if cycle_info.response_info["response_text"]:
f.write("回复文本:\n")
for i, text in enumerate(cycle_info.response_info["response_text"]):
f.write(f" [{i + 1}] {text}\n")
if cycle_info.response_info["emoji_info"]:
f.write(f"表情信息: {cycle_info.response_info['emoji_info']}\n")
if cycle_info.response_info["reply_message_ids"]:
f.write(f"回复消息ID: {', '.join(cycle_info.response_info['reply_message_ids'])}\n")
f.write("\n")
# 写入SubMind信息
f.write("== SubMind信息 ==\n")
f.write(f"结构化信息:\n{cycle_info.submind_info['structured_info']}\n\n")
f.write(f"思考结果:\n{cycle_info.submind_info['result']}\n\n")
f.write("SubMind Prompt:\n")
f.write(f"{cycle_info.submind_info['prompt']}\n\n")
# 写入ToolUse信息
f.write("== 工具使用信息 ==\n")
if cycle_info.tooluse_info["tools_used"]:
f.write(f"使用的工具: {', '.join(cycle_info.tooluse_info['tools_used'])}\n")
else:
f.write("未使用工具\n")
if cycle_info.tooluse_info["tool_results"]:
f.write("工具结果:\n")
for i, result in enumerate(cycle_info.tooluse_info["tool_results"]):
f.write(f" [{i + 1}] 类型: {result.get('type', '未知')}, 内容: {result.get('content', '')}\n")
f.write("\n")
f.write("工具执行 Prompt:\n")
f.write(f"{cycle_info.tooluse_info['prompt']}\n\n")
# 写入Planner信息
f.write("== Planner信息 ==\n")
f.write("Planner Prompt:\n")
f.write(f"{cycle_info.planner_info['prompt']}\n\n")
f.write("原始回复:\n")
f.write(f"{cycle_info.planner_info['response']}\n\n")
f.write("解析结果:\n")
f.write(f"{json.dumps(cycle_info.planner_info['parsed_result'], ensure_ascii=False, indent=2)}\n")
return filepath
except Exception as e:
print(f"保存CycleInfo到文件时出错: {e}")
return ""
@staticmethod
def load_from_file(filepath: str) -> Optional[Dict[str, Any]]:
"""
从文件加载CycleInfo信息只加载JSON格式的数据不解析文本格式
参数:
filepath: 文件路径
返回:
Optional[Dict[str, Any]]: 加载的CycleInfo数据失败则返回None
"""
try:
if not os.path.exists(filepath):
print(f"文件不存在: {filepath}")
return None
# 尝试从文件末尾读取JSON数据
with open(filepath, "r", encoding="utf-8") as f:
lines = f.readlines()
# 查找"解析结果:"后的JSON数据
for i, line in enumerate(lines):
if "解析结果:" in line and i + 1 < len(lines):
# 尝试解析后面的行
json_data = ""
for j in range(i + 1, len(lines)):
json_data += lines[j]
try:
return json.loads(json_data)
except json.JSONDecodeError:
continue
# 如果没有找到JSON数据则返回None
return None
except Exception as e:
print(f"从文件加载CycleInfo时出错: {e}")
return None
@staticmethod
def list_cycles(stream_id: str, base_dir: str = "log_debug") -> List[str]:
"""
列出指定stream_id的所有循环文件
参数:
stream_id: 聊天流ID
base_dir: 基础目录默认为log_debug
返回:
List[str]: 文件路径列表
"""
try:
stream_dir = os.path.join(base_dir, stream_id)
if not os.path.exists(stream_dir):
return []
files = [
os.path.join(stream_dir, f)
for f in os.listdir(stream_dir)
if f.startswith("cycle_") and f.endswith(".txt")
]
return sorted(files)
except Exception as e:
print(f"列出循环文件时出错: {e}")
return []

View File

@@ -0,0 +1,981 @@
import asyncio
import contextlib
import json # <--- 确保导入 json
import random # <--- 添加导入
import time
import traceback
from collections import deque
from typing import List, Optional, Dict, Any, Deque, Callable, Coroutine
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.message_receive.chat_stream import chat_manager
from rich.traceback import install
from src.common.logger_manager import get_logger
from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config
from src.chat.utils.timer_calculator import Timer
from src.chat.heart_flow.observation.observation import Observation
from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder
from src.chat.focus_chat.heartFC_Cycleinfo import CycleDetail
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
from src.chat.focus_chat.info.info_base import InfoBase
from src.chat.focus_chat.info.obs_info import ObsInfo
from src.chat.focus_chat.info.cycle_info import CycleInfo
from src.chat.focus_chat.info.mind_info import MindInfo
from src.chat.focus_chat.info.structured_info import StructuredInfo
from src.chat.focus_chat.info_processors.chattinginfo_processor import ChattingInfoProcessor
from src.chat.focus_chat.info_processors.mind_processor import MindProcessor
from src.chat.heart_flow.observation.memory_observation import MemoryObservation
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
from src.chat.heart_flow.observation.working_observation import WorkingObservation
from src.chat.focus_chat.info_processors.tool_processor import ToolProcessor
from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor
from src.chat.focus_chat.hfc_utils import create_empty_anchor_message, parse_thinking_id_to_timestamp
from src.chat.focus_chat.memory_activator import MemoryActivator
install(extra_lines=3)
WAITING_TIME_THRESHOLD = 300 # 等待新消息时间阈值,单位秒
EMOJI_SEND_PRO = 0.3 # 设置一个概率,比如 30% 才真的发
CONSECUTIVE_NO_REPLY_THRESHOLD = 3 # 连续不回复的阈值
logger = get_logger("hfc") # Logger Name Changed
# 默认动作定义
DEFAULT_ACTIONS = {"no_reply": "不操作,继续浏览", "reply": "表达想法,可以只包含文本、表情或两者都有"}
class ActionManager:
"""动作管理器:控制每次决策可以使用的动作"""
def __init__(self):
# 初始化为新的默认动作集
self._available_actions: Dict[str, str] = DEFAULT_ACTIONS.copy()
self._original_actions_backup: Optional[Dict[str, str]] = None
def get_available_actions(self) -> Dict[str, str]:
"""获取当前可用的动作集"""
return self._available_actions.copy() # 返回副本以防外部修改
def add_action(self, action_name: str, description: str) -> bool:
"""
添加新的动作
参数:
action_name: 动作名称
description: 动作描述
返回:
bool: 是否添加成功
"""
if action_name in self._available_actions:
return False
self._available_actions[action_name] = description
return True
def remove_action(self, action_name: str) -> bool:
"""
移除指定动作
参数:
action_name: 动作名称
返回:
bool: 是否移除成功
"""
if action_name not in self._available_actions:
return False
del self._available_actions[action_name]
return True
def temporarily_remove_actions(self, actions_to_remove: List[str]):
"""
临时移除指定的动作,备份原始动作集。
如果已经有备份,则不重复备份。
"""
if self._original_actions_backup is None:
self._original_actions_backup = self._available_actions.copy()
actions_actually_removed = []
for action_name in actions_to_remove:
if action_name in self._available_actions:
del self._available_actions[action_name]
actions_actually_removed.append(action_name)
# logger.debug(f"临时移除了动作: {actions_actually_removed}") # 可选日志
def restore_actions(self):
"""
恢复之前备份的原始动作集。
"""
if self._original_actions_backup is not None:
self._available_actions = self._original_actions_backup.copy()
self._original_actions_backup = None
# logger.debug("恢复了原始动作集") # 可选日志
async def _handle_cycle_delay(action_taken_this_cycle: bool, cycle_start_time: float, log_prefix: str):
"""处理循环延迟"""
cycle_duration = time.monotonic() - cycle_start_time
try:
sleep_duration = 0.0
if not action_taken_this_cycle and cycle_duration < 1:
sleep_duration = 1 - cycle_duration
elif cycle_duration < 0.2:
sleep_duration = 0.2
if sleep_duration > 0:
await asyncio.sleep(sleep_duration)
except asyncio.CancelledError:
logger.info(f"{log_prefix} Sleep interrupted, loop likely cancelling.")
raise
class HeartFChatting:
"""
管理一个连续的Plan-Replier-Sender循环
用于在特定聊天流中生成回复。
其生命周期现在由其关联的 SubHeartflow 的 FOCUSED 状态控制。
"""
def __init__(
self,
chat_id: str,
observations: list[Observation],
on_consecutive_no_reply_callback: Callable[[], Coroutine[None, None, None]],
):
"""
HeartFChatting 初始化函数
参数:
chat_id: 聊天流唯一标识符(如stream_id)
observations: 关联的观察列表
on_consecutive_no_reply_callback: 连续不回复达到阈值时调用的异步回调函数
"""
# 基础属性
self.stream_id: str = chat_id # 聊天流ID
self.chat_stream: Optional[ChatStream] = None # 关联的聊天流
self.observations: List[Observation] = observations # 关联的观察列表,用于监控聊天流状态
self.on_consecutive_no_reply_callback = on_consecutive_no_reply_callback
self.chatting_info_processor = ChattingInfoProcessor()
self.mind_processor = MindProcessor(subheartflow_id=self.stream_id)
self.memory_observation = MemoryObservation(observe_id=self.stream_id)
self.hfcloop_observation = HFCloopObservation(observe_id=self.stream_id)
self.tool_processor = ToolProcessor(subheartflow_id=self.stream_id)
self.working_observation = WorkingObservation(observe_id=self.stream_id)
self.memory_activator = MemoryActivator()
# 日志前缀
self.log_prefix: str = str(chat_id) # Initial default, will be updated
# --- Initialize attributes (defaults) ---
self.is_group_chat: bool = False
self.chat_target_info: Optional[dict] = None
# --- End Initialization ---
self.expressor = DefaultExpressor(chat_id=self.stream_id)
# 动作管理器
self.action_manager = ActionManager()
# 初始化状态控制
self._initialized = False
self._processing_lock = asyncio.Lock()
# LLM规划器配置
self.planner_llm = LLMRequest(
model=global_config.llm_plan,
max_tokens=1000,
request_type="action_planning", # 用于动作规划
)
# 循环控制内部状态
self._loop_active: bool = False # 循环是否正在运行
self._loop_task: Optional[asyncio.Task] = None # 主循环任务
# 添加循环信息管理相关的属性
self._cycle_counter = 0
self._cycle_history: Deque[CycleDetail] = deque(maxlen=10) # 保留最近10个循环的信息
self._current_cycle: Optional[CycleDetail] = None
self.total_no_reply_count: int = 0 # <--- 新增:连续不回复计数器
self._shutting_down: bool = False # <--- 新增:关闭标志位
self.total_waiting_time: float = 0.0 # <--- 新增:累计等待时间
async def _initialize(self) -> bool:
"""
执行懒初始化操作
功能:
1. 获取聊天类型(群聊/私聊)和目标信息
2. 获取聊天流对象
3. 设置日志前缀
返回:
bool: 初始化是否成功
注意:
- 如果已经初始化过会直接返回True
- 需要获取chat_stream对象才能继续后续操作
"""
# 如果已经初始化过,直接返回成功
if self._initialized:
return True
try:
self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.stream_id)
await self.expressor.initialize()
self.chat_stream = await asyncio.to_thread(chat_manager.get_stream, self.stream_id)
self.expressor.chat_stream = self.chat_stream
self.log_prefix = f"[{chat_manager.get_stream_name(self.stream_id) or self.stream_id}]"
except Exception as e:
logger.error(f"[HFC:{self.stream_id}] 初始化HFC时发生错误: {e}")
return False
# 标记初始化完成
self._initialized = True
logger.debug(f"{self.log_prefix} 初始化完成,准备开始处理消息")
return True
async def start(self):
"""
启动 HeartFChatting 的主循环。
注意:调用此方法前必须确保已经成功初始化。
"""
logger.info(f"{self.log_prefix} 开始认真水群(HFC)...")
await self._start_loop_if_needed()
async def _start_loop_if_needed(self):
"""检查是否需要启动主循环,如果未激活则启动。"""
# 如果循环已经激活,直接返回
if self._loop_active:
return
# 标记为活动状态,防止重复启动
self._loop_active = True
# 检查是否已有任务在运行(理论上不应该,因为 _loop_active=False
if self._loop_task and not self._loop_task.done():
logger.warning(f"{self.log_prefix} 发现之前的循环任务仍在运行(不符合预期)。取消旧任务。")
self._loop_task.cancel()
try:
# 等待旧任务确实被取消
await asyncio.wait_for(self._loop_task, timeout=0.5)
except (asyncio.CancelledError, asyncio.TimeoutError):
pass # 忽略取消或超时错误
self._loop_task = None # 清理旧任务引用
logger.debug(f"{self.log_prefix} 启动认真水群(HFC)主循环...")
# 创建新的循环任务
self._loop_task = asyncio.create_task(self._hfc_loop())
# 添加完成回调
self._loop_task.add_done_callback(self._handle_loop_completion)
def _handle_loop_completion(self, task: asyncio.Task):
"""当 _hfc_loop 任务完成时执行的回调。"""
try:
exception = task.exception()
if exception:
logger.error(f"{self.log_prefix} HeartFChatting: 麦麦脱离了聊天(异常): {exception}")
logger.error(traceback.format_exc()) # Log full traceback for exceptions
else:
# Loop completing normally now means it was cancelled/shutdown externally
logger.info(f"{self.log_prefix} HeartFChatting: 麦麦脱离了聊天 (外部停止)")
except asyncio.CancelledError:
logger.info(f"{self.log_prefix} HeartFChatting: 麦麦脱离了聊天(任务取消)")
finally:
self._loop_active = False
self._loop_task = None
if self._processing_lock.locked():
logger.warning(f"{self.log_prefix} HeartFChatting: 处理锁在循环结束时仍被锁定,强制释放。")
self._processing_lock.release()
async def _hfc_loop(self):
"""主循环,持续进行计划并可能回复消息,直到被外部取消。"""
try:
while True: # 主循环
logger.debug(f"{self.log_prefix} 开始第{self._cycle_counter}次循环")
# --- 在循环开始处检查关闭标志 ---
if self._shutting_down:
logger.info(f"{self.log_prefix} 检测到关闭标志,退出 HFC 循环。")
break
# --------------------------------
# 创建新的循环信息
self._cycle_counter += 1
self._current_cycle = CycleDetail(self._cycle_counter)
# 初始化周期状态
cycle_timers = {}
loop_cycle_start_time = time.monotonic()
# 执行规划和处理阶段
async with self._get_cycle_context() as acquired_lock:
if not acquired_lock:
# 如果未能获取锁(理论上不太可能,除非 shutdown 过程中释放了但又被抢了?)
# 或者也可以在这里再次检查 self._shutting_down
if self._shutting_down:
break # 再次检查,确保退出
logger.warning(f"{self.log_prefix} 未能获取循环处理锁,跳过本次循环。")
await asyncio.sleep(0.1) # 短暂等待避免空转
continue
# thinking_id 是思考过程的ID用于标记每一轮思考
thinking_id = "tid" + str(round(time.time(), 2))
# 主循环:思考->决策->执行
action_taken = await self._think_plan_execute_loop(cycle_timers, thinking_id)
# 更新循环信息
self._current_cycle.set_thinking_id(thinking_id)
self._current_cycle.timers = cycle_timers
# 防止循环过快消耗资源
await _handle_cycle_delay(action_taken, loop_cycle_start_time, self.log_prefix)
# 完成当前循环并保存历史
self._current_cycle.complete_cycle()
self._cycle_history.append(self._current_cycle)
# 保存CycleInfo到文件
try:
filepath = CycleDetail.save_to_file(self._current_cycle, self.stream_id)
logger.info(f"{self.log_prefix} 已保存循环信息到文件: {filepath}")
except Exception as e:
logger.error(f"{self.log_prefix} 保存循环信息到文件时出错: {e}")
# 记录循环信息和计时器结果
timer_strings = []
for name, elapsed in cycle_timers.items():
formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}"
timer_strings.append(f"{name}: {formatted_time}")
logger.debug(
f"{self.log_prefix} 第 #{self._current_cycle.cycle_id}次思考完成,"
f"耗时: {self._current_cycle.end_time - self._current_cycle.start_time:.2f}秒, "
f"动作: {self._current_cycle.action_type}"
+ (f"\n计时器详情: {'; '.join(timer_strings)}" if timer_strings else "")
)
except asyncio.CancelledError:
# 设置了关闭标志位后被取消是正常流程
if not self._shutting_down:
logger.warning(f"{self.log_prefix} HeartFChatting: 麦麦的认真水群(HFC)循环意外被取消")
else:
logger.info(f"{self.log_prefix} HeartFChatting: 麦麦的认真水群(HFC)循环已取消 (正常关闭)")
except Exception as e:
logger.error(f"{self.log_prefix} HeartFChatting: 意外错误: {e}")
logger.error(traceback.format_exc())
@contextlib.asynccontextmanager
async def _get_cycle_context(self):
"""
循环周期的上下文管理器
用于确保资源的正确获取和释放:
1. 获取处理锁
2. 执行操作
3. 释放锁
"""
acquired = False
try:
await self._processing_lock.acquire()
acquired = True
yield acquired
finally:
if acquired and self._processing_lock.locked():
self._processing_lock.release()
async def _think_plan_execute_loop(self, cycle_timers: dict, thinking_id: str) -> tuple[bool, str]:
try:
with Timer("观察", cycle_timers):
await self.observations[0].observe()
await self.memory_observation.observe()
await self.working_observation.observe()
await self.hfcloop_observation.observe()
observations: List[Observation] = []
observations.append(self.observations[0])
observations.append(self.memory_observation)
observations.append(self.working_observation)
observations.append(self.hfcloop_observation)
for observation in observations:
logger.debug(f"{self.log_prefix} 观察信息: {observation}")
with Timer("回忆", cycle_timers):
running_memorys = await self.memory_activator.activate_memory(observations)
# 记录并行任务开始时间
parallel_start_time = time.time()
logger.debug(f"{self.log_prefix} 开始信息处理器并行任务")
# 并行执行两个任务:思考和工具执行
with Timer("执行 信息处理器", cycle_timers):
# 1. 子思维思考 - 不执行工具调用
think_task = asyncio.create_task(
self.mind_processor.process_info(observations=observations, running_memorys=running_memorys)
)
logger.debug(f"{self.log_prefix} 启动子思维思考任务")
# 2. 工具执行器 - 专门处理工具调用
tool_task = asyncio.create_task(
self.tool_processor.process_info(observations=observations, running_memorys=running_memorys)
)
logger.debug(f"{self.log_prefix} 启动工具执行任务")
# 3. 聊天信息处理器
chatting_info_task = asyncio.create_task(
self.chatting_info_processor.process_info(
observations=observations, running_memorys=running_memorys
)
)
logger.debug(f"{self.log_prefix} 启动聊天信息处理器任务")
# 创建任务完成状态追踪
tasks = {"思考任务": think_task, "工具任务": tool_task, "聊天信息处理任务": chatting_info_task}
pending = set(tasks.values())
# 等待所有任务完成,同时追踪每个任务的完成情况
results: dict[str, list[InfoBase]] = {}
while pending:
# 等待任务完成
done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED, timeout=1.0)
# 记录完成的任务
for task in done:
for name, t in tasks.items():
if task == t:
task_end_time = time.time()
task_duration = task_end_time - parallel_start_time
logger.info(f"{self.log_prefix} {name}已完成,耗时: {task_duration:.2f}")
results[name] = task.result()
break
# 如果仍有未完成任务,记录进行中状态
if pending:
current_time = time.time()
elapsed = current_time - parallel_start_time
pending_names = [name for name, t in tasks.items() if t in pending]
logger.info(
f"{self.log_prefix} 并行处理已进行{elapsed:.2f}秒,待完成任务: {', '.join(pending_names)}"
)
# 所有任务完成,从结果中提取数据
mind_processed_infos = results.get("思考任务", [])
tool_processed_infos = results.get("工具任务", [])
chatting_info_processed_infos = results.get("聊天信息处理任务", [])
# 记录总耗时
parallel_end_time = time.time()
total_duration = parallel_end_time - parallel_start_time
logger.info(f"{self.log_prefix} 思考和工具并行任务全部完成,总耗时: {total_duration:.2f}")
all_plan_info = mind_processed_infos + tool_processed_infos + chatting_info_processed_infos
logger.debug(f"{self.log_prefix} 所有信息处理器处理后的信息: {all_plan_info}")
# 串行执行规划器 - 使用刚获取的思考结果
logger.debug(f"{self.log_prefix} 开始 规划器")
with Timer("规划器", cycle_timers):
planner_result = await self._planner(all_plan_info, cycle_timers)
action = planner_result.get("action", "error")
action_data = planner_result.get("action_data", {}) # 新增获取动作数据
reasoning = planner_result.get("reasoning", "未提供理由")
logger.debug(f"{self.log_prefix} 动作和动作信息: {action}, {action_data}, {reasoning}")
# 更新循环信息
self._current_cycle.set_action_info(
action_type=action,
action_data=action_data,
reasoning=reasoning,
action_taken=True,
)
# 处理LLM错误
if planner_result.get("llm_error"):
logger.error(f"{self.log_prefix} LLM失败: {reasoning}")
return False, ""
# 在此处添加日志记录
if action == "reply":
action_str = "回复"
elif action == "no_reply":
action_str = "不回复"
else:
action_str = "位置动作"
logger.info(f"{self.log_prefix} 麦麦决定'{action_str}', 原因'{reasoning}'")
self.hfcloop_observation.add_loop_info(self._current_cycle)
return await self._handle_action(action, reasoning, action_data, cycle_timers, thinking_id)
except Exception as e:
logger.error(f"{self.log_prefix} 并行+串行处理失败: {e}")
logger.error(traceback.format_exc())
return False, ""
async def _handle_action(
self,
action: str,
reasoning: str,
action_data: dict,
cycle_timers: dict,
thinking_id: str,
) -> tuple[bool, str]:
"""
处理规划动作
参数:
action: 动作类型
reasoning: 决策理由
action_data: 动作数据,包含不同动作需要的参数
cycle_timers: 计时器字典
planner_start_db_time: 规划开始时间
返回:
tuple[bool, str]: (是否执行了动作, 思考消息ID)
"""
action_handlers = {
"reply": self._handle_reply,
"no_reply": self._handle_no_reply,
}
handler = action_handlers.get(action)
if not handler:
logger.warning(f"{self.log_prefix} 未知动作: {action}, 原因: {reasoning}")
return False, ""
try:
if action == "reply":
return await handler(reasoning, action_data, cycle_timers, thinking_id)
else: # no_reply
return await handler(reasoning, cycle_timers, thinking_id)
except Exception as e:
logger.error(f"{self.log_prefix} 处理{action}时出错: {e}")
traceback.print_exc()
return False, ""
async def _handle_no_reply(self, reasoning: str, cycle_timers: dict, thinking_id: str) -> bool:
"""
处理不回复的情况
工作流程:
1. 等待新消息、超时或关闭信号
2. 根据等待结果更新连续不回复计数
3. 如果达到阈值,触发回调
参数:
reasoning: 不回复的原因
planner_start_db_time: 规划开始时间
cycle_timers: 计时器字典
返回:
bool: 是否成功处理
"""
logger.info(f"{self.log_prefix} 决定不回复: {reasoning}")
observation = self.observations[0] if self.observations else None
try:
with Timer("等待新消息", cycle_timers):
# 等待新消息、超时或关闭信号,并获取结果
await self._wait_for_new_message(observation, thinking_id, self.log_prefix)
# 从计时器获取实际等待时间
current_waiting = cycle_timers.get("等待新消息", 0.0)
if not self._shutting_down:
self.total_no_reply_count += 1
self.total_waiting_time += current_waiting # 累加等待时间
logger.debug(
f"{self.log_prefix} 连续不回复计数增加: {self.total_no_reply_count}/{CONSECUTIVE_NO_REPLY_THRESHOLD}, "
f"本次等待: {current_waiting:.2f}秒, 累计等待: {self.total_waiting_time:.2f}"
)
# 检查是否同时达到次数和时间阈值
time_threshold = 0.66 * WAITING_TIME_THRESHOLD * CONSECUTIVE_NO_REPLY_THRESHOLD
if (
self.total_no_reply_count >= CONSECUTIVE_NO_REPLY_THRESHOLD
and self.total_waiting_time >= time_threshold
):
logger.info(
f"{self.log_prefix} 连续不回复达到阈值 ({self.total_no_reply_count}次) "
f"且累计等待时间达到 {self.total_waiting_time:.2f}秒 (阈值 {time_threshold}秒)"
f"调用回调请求状态转换"
)
# 调用回调。注意:这里不重置计数器和时间,依赖回调函数成功改变状态来隐式重置上下文。
await self.on_consecutive_no_reply_callback()
elif self.total_no_reply_count >= CONSECUTIVE_NO_REPLY_THRESHOLD:
# 仅次数达到阈值,但时间未达到
logger.debug(
f"{self.log_prefix} 连续不回复次数达到阈值 ({self.total_no_reply_count}次) "
f"但累计等待时间 {self.total_waiting_time:.2f}秒 未达到时间阈值 ({time_threshold}秒),暂不调用回调"
)
# else: 次数和时间都未达到阈值,不做处理
return True, thinking_id
except asyncio.CancelledError:
logger.info(f"{self.log_prefix} 处理 'no_reply' 时等待被中断 (CancelledError)")
raise
except Exception as e: # 捕获调用管理器或其他地方可能发生的错误
logger.error(f"{self.log_prefix} 处理 'no_reply' 时发生错误: {e}")
logger.error(traceback.format_exc())
return False, thinking_id
async def _wait_for_new_message(self, observation: ChattingObservation, thinking_id: str, log_prefix: str) -> bool:
"""
等待新消息 或 检测到关闭信号
参数:
observation: 观察实例
planner_start_db_time: 开始等待的时间
log_prefix: 日志前缀
返回:
bool: 是否检测到新消息 (如果因关闭信号退出则返回 False)
"""
wait_start_time = time.monotonic()
while True:
# --- 在每次循环开始时检查关闭标志 ---
if self._shutting_down:
logger.info(f"{log_prefix} 等待新消息时检测到关闭信号,中断等待。")
return False # 表示因为关闭而退出
# -----------------------------------
thinking_id_timestamp = parse_thinking_id_to_timestamp(thinking_id)
# 检查新消息
if await observation.has_new_messages_since(thinking_id_timestamp):
logger.info(f"{log_prefix} 检测到新消息")
return True
# 检查超时 (放在检查新消息和关闭之后)
if time.monotonic() - wait_start_time > WAITING_TIME_THRESHOLD:
logger.warning(f"{log_prefix} 等待新消息超时({WAITING_TIME_THRESHOLD}秒)")
return False
try:
# 短暂休眠,让其他任务有机会运行,并能更快响应取消或关闭
await asyncio.sleep(0.5) # 缩短休眠时间
except asyncio.CancelledError:
# 如果在休眠时被取消,再次检查关闭标志
# 如果是正常关闭,则不需要警告
if not self._shutting_down:
logger.warning(f"{log_prefix} _wait_for_new_message 的休眠被意外取消")
# 无论如何,重新抛出异常,让上层处理
raise
async def shutdown(self):
"""优雅关闭HeartFChatting实例取消活动循环任务"""
logger.info(f"{self.log_prefix} 正在关闭HeartFChatting...")
self._shutting_down = True # <-- 在开始关闭时设置标志位
# 取消循环任务
if self._loop_task and not self._loop_task.done():
logger.info(f"{self.log_prefix} 正在取消HeartFChatting循环任务")
self._loop_task.cancel()
try:
await asyncio.wait_for(self._loop_task, timeout=1.0)
logger.info(f"{self.log_prefix} HeartFChatting循环任务已取消")
except (asyncio.CancelledError, asyncio.TimeoutError):
pass
except Exception as e:
logger.error(f"{self.log_prefix} 取消循环任务出错: {e}")
else:
logger.info(f"{self.log_prefix} 没有活动的HeartFChatting循环任务")
# 清理状态
self._loop_active = False
self._loop_task = None
if self._processing_lock.locked():
self._processing_lock.release()
logger.warning(f"{self.log_prefix} 已释放处理锁")
logger.info(f"{self.log_prefix} HeartFChatting关闭完成")
def get_cycle_history(self, last_n: Optional[int] = None) -> List[Dict[str, Any]]:
"""获取循环历史记录
参数:
last_n: 获取最近n个循环的信息如果为None则获取所有历史记录
返回:
List[Dict[str, Any]]: 循环历史记录列表
"""
history = list(self._cycle_history)
if last_n is not None:
history = history[-last_n:]
return [cycle.to_dict() for cycle in history]
async def _planner(self, all_plan_info: List[InfoBase], cycle_timers: dict) -> Dict[str, Any]:
"""
规划器 (Planner): 使用LLM根据上下文决定是否和如何回复。
重构为让LLM返回结构化JSON文本然后在代码中解析。
参数:
current_mind: 子思维的当前思考结果
cycle_timers: 计时器字典
is_re_planned: 是否为重新规划 (此重构中暂时简化,不处理 is_re_planned 的特殊逻辑)
"""
logger.info(f"{self.log_prefix}开始 规划")
actions_to_remove_temporarily = []
# --- 检查历史动作并决定临时移除动作 (逻辑保持不变) ---
lian_xu_wen_ben_hui_fu = 0
probability_roll = random.random()
for cycle in reversed(self._cycle_history):
if cycle.action_taken:
if cycle.action_type == "text_reply":
lian_xu_wen_ben_hui_fu += 1
else:
break
if len(self._cycle_history) > 0 and cycle.cycle_id <= self._cycle_history[0].cycle_id + (
len(self._cycle_history) - 4
):
break
logger.debug(f"{self.log_prefix}[Planner] 检测到连续文本回复次数: {lian_xu_wen_ben_hui_fu}")
if lian_xu_wen_ben_hui_fu >= 3:
logger.info(f"{self.log_prefix}[Planner] 连续回复 >= 3 次,强制移除 text_reply 和 emoji_reply")
actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"])
elif lian_xu_wen_ben_hui_fu == 2:
if probability_roll < 0.8:
logger.info(f"{self.log_prefix}[Planner] 连续回复 2 次80% 概率移除 text_reply 和 emoji_reply (触发)")
actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"])
else:
logger.info(
f"{self.log_prefix}[Planner] 连续回复 2 次80% 概率移除 text_reply 和 emoji_reply (未触发)"
)
elif lian_xu_wen_ben_hui_fu == 1:
if probability_roll < 0.4:
logger.info(f"{self.log_prefix}[Planner] 连续回复 1 次40% 概率移除 text_reply (触发)")
actions_to_remove_temporarily.append("text_reply")
else:
logger.info(f"{self.log_prefix}[Planner] 连续回复 1 次40% 概率移除 text_reply (未触发)")
# --- 结束检查历史动作 ---
# 获取观察信息
for info in all_plan_info:
if isinstance(info, ObsInfo):
logger.debug(f"{self.log_prefix} 观察信息: {info}")
observed_messages = info.get_talking_message()
observed_messages_str = info.get_talking_message_str_truncate()
chat_type = info.get_chat_type()
if chat_type == "group":
is_group_chat = True
else:
is_group_chat = False
elif isinstance(info, MindInfo):
logger.debug(f"{self.log_prefix} 思维信息: {info}")
current_mind = info.get_current_mind()
elif isinstance(info, CycleInfo):
logger.debug(f"{self.log_prefix} 循环信息: {info}")
cycle_info = info.get_observe_info()
elif isinstance(info, StructuredInfo):
logger.debug(f"{self.log_prefix} 结构化信息: {info}")
structured_info = info.get_data()
# --- 使用 LLM 进行决策 (JSON 输出模式) --- #
action = "no_reply" # 默认动作
reasoning = "规划器初始化默认"
llm_error = False # LLM 请求或解析错误标志
# 获取我们将传递给 prompt 构建器和用于验证的当前可用动作
current_available_actions = self.action_manager.get_available_actions()
try:
# --- 应用临时动作移除 ---
if actions_to_remove_temporarily:
self.action_manager.temporarily_remove_actions(actions_to_remove_temporarily)
# 更新 current_available_actions 以反映移除后的状态
current_available_actions = self.action_manager.get_available_actions()
logger.debug(
f"{self.log_prefix}[Planner] 临时移除的动作: {actions_to_remove_temporarily}, 当前可用: {list(current_available_actions.keys())}"
)
# --- 构建提示词 (调用修改后的 PromptBuilder 方法) ---
prompt = await prompt_builder.build_planner_prompt(
is_group_chat=is_group_chat, # <-- Pass HFC state
chat_target_info=None,
observed_messages_str=observed_messages_str, # <-- Pass local variable
current_mind=current_mind, # <-- Pass argument
structured_info=structured_info, # <-- Pass SubMind info
current_available_actions=current_available_actions, # <-- Pass determined actions
cycle_info=cycle_info, # <-- Pass cycle info
)
# --- 调用 LLM (普通文本生成) ---
llm_content = None
try:
llm_content, _, _ = await self.planner_llm.generate_response(prompt=prompt)
logger.debug(f"{self.log_prefix}[Planner] LLM 原始 JSON 响应 (预期): {llm_content}")
except Exception as req_e:
logger.error(f"{self.log_prefix}[Planner] LLM 请求执行失败: {req_e}")
reasoning = f"LLM 请求失败: {req_e}"
llm_error = True
# 直接使用默认动作返回错误结果
action = "no_reply" # 明确设置为默认值
# --- 解析 LLM 返回的 JSON (仅当 LLM 请求未出错时进行) ---
if not llm_error and llm_content:
try:
# 尝试去除可能的 markdown 代码块标记
cleaned_content = (
llm_content.strip().removeprefix("```json").removeprefix("```").removesuffix("```").strip()
)
if not cleaned_content:
raise json.JSONDecodeError("Cleaned content is empty", cleaned_content, 0)
parsed_json = json.loads(cleaned_content)
# 提取决策,提供默认值
extracted_action = parsed_json.get("action", "no_reply")
extracted_reasoning = parsed_json.get("reasoning", "LLM未提供理由")
# extracted_emoji_query = parsed_json.get("emoji_query", "")
# 新的reply格式
if extracted_action == "reply":
action_data = {
"text": parsed_json.get("text", []),
"emojis": parsed_json.get("emojis", []),
"target": parsed_json.get("target", ""),
}
else:
action_data = {} # 其他动作可能不需要额外数据
# 验证动作是否在当前可用列表中
# !! 使用调用 prompt 时实际可用的动作列表进行验证
if extracted_action not in current_available_actions:
logger.warning(
f"{self.log_prefix}[Planner] LLM 返回了当前不可用或无效的动作: '{extracted_action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'"
)
action = "no_reply"
reasoning = f"LLM 返回了当前不可用的动作 '{extracted_action}' (可用: {list(current_available_actions.keys())})。原始理由: {extracted_reasoning}"
# 检查 no_reply 是否也恰好被移除了 (极端情况)
if "no_reply" not in current_available_actions:
logger.error(
f"{self.log_prefix}[Planner] 严重错误:'no_reply' 动作也不可用!无法执行任何动作。"
)
action = "error" # 回退到错误状态
reasoning = "无法执行任何有效动作,包括 no_reply"
llm_error = True # 标记为严重错误
else:
llm_error = False # 视为逻辑修正而非 LLM 错误
else:
# 动作有效且可用
action = extracted_action
reasoning = extracted_reasoning
llm_error = False # 解析成功
logger.debug(
f"{self.log_prefix}[要做什么]\nPrompt:\n{prompt}\n\n决策结果 (来自JSON): {action}, 理由: {reasoning}"
)
logger.debug(f"{self.log_prefix}动作信息: '{action_data}'")
except Exception as json_e:
logger.warning(
f"{self.log_prefix}[Planner] 解析LLM响应JSON失败: {json_e}. LLM原始输出: '{llm_content}'"
)
reasoning = f"解析LLM响应JSON失败: {json_e}. 将使用默认动作 'no_reply'."
action = "no_reply" # 解析失败则默认不回复
llm_error = True # 标记解析错误
elif not llm_error and not llm_content:
# LLM 请求成功但返回空内容
logger.warning(f"{self.log_prefix}[Planner] LLM 返回了空内容。")
reasoning = "LLM 返回了空内容,使用默认动作 'no_reply'."
action = "no_reply"
llm_error = True # 标记为空响应错误
except Exception as outer_e:
logger.error(f"{self.log_prefix}[Planner] Planner 处理过程中发生意外错误: {outer_e}")
traceback.print_exc()
action = "error" # 发生未知错误,标记为 error 动作
reasoning = f"Planner 内部处理错误: {outer_e}"
llm_error = True
finally:
# --- 确保动作恢复 ---
if self.action_manager._original_actions_backup is not None:
self.action_manager.restore_actions()
logger.debug(
f"{self.log_prefix}[Planner] 恢复了原始动作集, 当前可用: {list(self.action_manager.get_available_actions().keys())}"
)
# --- 概率性忽略文本回复附带的表情 (逻辑保持不变) ---
emoji = action_data.get("emojis")
if action == "reply" and emoji:
logger.debug(f"{self.log_prefix}[Planner] 大模型建议文字回复带表情: '{emoji}'")
if random.random() > EMOJI_SEND_PRO:
logger.info(f"{self.log_prefix}但是麦麦这次不想加表情 ({1 - EMOJI_SEND_PRO:.0%}),忽略表情 '{emoji}'")
action_data["emojis"] = "" # 清空表情请求
else:
logger.info(f"{self.log_prefix}好吧,加上表情 '{emoji}'")
# --- 结束概率性忽略 ---
# 返回结果字典
return {
"action": action,
"action_data": action_data,
"reasoning": reasoning,
"current_mind": current_mind,
"observed_messages": observed_messages,
"llm_error": llm_error, # 返回错误状态
}
async def _handle_reply(
self, reasoning: str, reply_data: dict, cycle_timers: dict, thinking_id: str
) -> tuple[bool, str]:
"""
处理统一的回复动作 - 可包含文本和表情,顺序任意
reply_data格式:
{
"text": "你好啊" # 文本内容列表(可选)
"target": "锚定消息", # 锚定消息的文本内容
"emojis": "微笑" # 表情关键词列表(可选)
}
"""
# 重置连续不回复计数器
self.total_no_reply_count = 0
self.total_waiting_time = 0.0
# 从聊天观察获取锚定消息
observations: ChattingObservation = self.observations[0]
anchor_message = observations.serch_message_by_text(reply_data["target"])
# 如果没有找到锚点消息,创建一个占位符
if not anchor_message:
logger.info(f"{self.log_prefix} 未找到锚点消息,创建占位符")
anchor_message = await create_empty_anchor_message(
self.chat_stream.platform, self.chat_stream.group_info, self.chat_stream
)
else:
anchor_message.update_chat_stream(self.chat_stream)
success, reply_set = await self.expressor.deal_reply(
cycle_timers=cycle_timers,
action_data=reply_data,
anchor_message=anchor_message,
reasoning=reasoning,
thinking_id=thinking_id,
)
reply_text = ""
for reply in reply_set:
type = reply[0]
data = reply[1]
if type == "text":
reply_text += data
elif type == "emoji":
reply_text += data
self._current_cycle.set_response_info(
response_text=reply_text,
)
return success, reply_text

View File

@@ -1,15 +1,13 @@
# src/plugins/heartFC_chat/heartFC_sender.py
import asyncio # 重新导入 asyncio
import asyncio
from typing import Dict, Optional # 重新导入类型
from ..chat.message import MessageSending, MessageThinking # 只保留 MessageSending 和 MessageThinking
# from ..message import global_api
from src.plugins.message.api import global_api
from ..storage.storage import MessageStorage
from ..chat.utils import truncate_message
from src.chat.message_receive.message import MessageSending, MessageThinking
from src.common.message.api import global_api
from src.chat.message_receive.storage import MessageStorage
from src.chat.utils.utils import truncate_message
from src.common.logger_manager import get_logger
from src.plugins.chat.utils import calculate_typing_time
from src.chat.utils.utils import calculate_typing_time
from rich.traceback import install
import traceback
install(extra_lines=3)
@@ -19,17 +17,16 @@ logger = get_logger("sender")
async def send_message(message: MessageSending) -> None:
"""合并后的消息发送函数包含WS发送和日志记录"""
message_preview = truncate_message(message.processed_plain_text)
message_preview = truncate_message(message.processed_plain_text, max_length=40)
try:
# 直接调用API发送消息
await global_api.send_message(message)
logger.success(f"发送消息 '{message_preview}' 成功")
logger.success(f"已将消息 '{message_preview}' 发往平台'{message.message_info.platform}'")
except Exception as e:
logger.error(f"发送消息 '{message_preview}' 失败: {str(e)}")
if not message.message_info.platform:
raise ValueError(f"未找到平台:{message.message_info.platform} 的url配置请检查配置文件") from e
logger.error(f"发送消息 '{message_preview}' 发往平台'{message.message_info.platform}' 失败: {str(e)}")
traceback.print_exc()
raise e # 重新抛出其他异常
@@ -69,21 +66,24 @@ class HeartFCSender:
del self.thinking_messages[chat_id]
logger.debug(f"[{chat_id}] Removed empty thinking message container.")
def is_thinking(self, chat_id: str, message_id: str) -> bool:
"""检查指定的消息 ID 是否当前正处于思考状态。"""
return chat_id in self.thinking_messages and message_id in self.thinking_messages[chat_id]
async def get_thinking_start_time(self, chat_id: str, message_id: str) -> Optional[float]:
"""获取已注册思考消息的开始时间。"""
async with self._thinking_lock:
thinking_message = self.thinking_messages.get(chat_id, {}).get(message_id)
return thinking_message.thinking_start_time if thinking_message else None
async def type_and_send_message(self, message: MessageSending, typing=False):
async def send_message(self, message: MessageSending, has_thinking=False, typing=False):
"""
立即处理发送并存储单个 MessageSending 消息
调用此方法前应先调用 register_thinking 注册对应的思考消息
此方法执行后会调用 complete_thinking 清理思考状态
处理发送并存储一条消息
参数
message: MessageSending 对象待发送的消息
has_thinking: 是否管理思考状态表情包无思考状态如需调用 register_thinking/complete_thinking
typing: 是否模拟打字等待根据 has_thinking 控制等待时长
用法
- has_thinking=True 自动处理思考消息的时间和清理
- typing=True 发送前会有打字等待
"""
if not message.chat_stream:
logger.error("消息缺少 chat_stream无法发送")
@@ -96,23 +96,29 @@ class HeartFCSender:
message_id = message.message_info.message_id
try:
_ = message.update_thinking_time()
if has_thinking:
_ = message.update_thinking_time()
# --- 条件应用 set_reply 逻辑 ---
if message.apply_set_reply_logic and message.is_head and not message.is_private_message():
logger.debug(f"[{chat_id}] 应用 set_reply 逻辑: {message.processed_plain_text[:20]}...")
message.set_reply()
# --- 结束条件 set_reply ---
# --- 条件应用 set_reply 逻辑 ---
if (
message.is_head
and not message.is_private_message()
and message.reply.processed_plain_text != "[System Trigger Context]"
):
logger.debug(f"[{chat_id}] 应用 set_reply 逻辑: {message.processed_plain_text[:20]}...")
await message.process()
if typing:
typing_time = calculate_typing_time(
input_string=message.processed_plain_text,
thinking_start_time=message.thinking_start_time,
is_emoji=message.is_emoji,
)
await asyncio.sleep(typing_time)
if has_thinking:
typing_time = calculate_typing_time(
input_string=message.processed_plain_text,
thinking_start_time=message.thinking_start_time,
is_emoji=message.is_emoji,
)
await asyncio.sleep(typing_time)
else:
await asyncio.sleep(0.5)
await send_message(message)
await self.storage.store_message(message, message.chat_stream)
@@ -122,30 +128,3 @@ class HeartFCSender:
raise e
finally:
await self.complete_thinking(chat_id, message_id)
async def send_and_store(self, message: MessageSending):
"""处理、发送并存储单个消息,不涉及思考状态管理。"""
if not message.chat_stream:
logger.error(f"[{message.message_info.platform or 'UnknownPlatform'}] 消息缺少 chat_stream无法发送")
return
if not message.message_info or not message.message_info.message_id:
logger.error(
f"[{message.chat_stream.stream_id if message.chat_stream else 'UnknownStream'}] 消息缺少 message_info 或 message_id无法发送"
)
return
chat_id = message.chat_stream.stream_id
message_id = message.message_info.message_id # 获取消息ID用于日志
try:
await message.process()
await asyncio.sleep(0.5)
await send_message(message) # 使用现有的发送方法
await self.storage.store_message(message, message.chat_stream) # 使用现有的存储方法
except Exception as e:
logger.error(f"[{chat_id}] 处理或存储消息 {message_id} 时出错: {e}")
# 重新抛出异常,让调用者知道失败了
raise e

View File

@@ -2,16 +2,17 @@ import time
import traceback
from ..memory_system.Hippocampus import HippocampusManager
from ...config.config import global_config
from ..chat.message import MessageRecv
from ..storage.storage import MessageStorage
from ..chat.utils import is_mentioned_bot_in_message
from ..message_receive.message import MessageRecv
from ..message_receive.storage import MessageStorage
from ..utils.utils import is_mentioned_bot_in_message
from maim_message import Seg
from src.heart_flow.heartflow import heartflow
from src.chat.heart_flow.heartflow import heartflow
from src.common.logger_manager import get_logger
from ..chat.chat_stream import chat_manager
from ..chat.message_buffer import message_buffer
from ..message_receive.chat_stream import chat_manager
# from ..message_receive.message_buffer import message_buffer
from ..utils.timer_calculator import Timer
from src.plugins.person_info.relationship_manager import relationship_manager
from src.chat.person_info.relationship_manager import relationship_manager
from typing import Optional, Tuple, Dict, Any
logger = get_logger("chat")
@@ -169,7 +170,7 @@ class HeartFCProcessor:
messageinfo = message.message_info
# 2. 消息缓冲与流程序化
await message_buffer.start_caching_messages(message)
# await message_buffer.start_caching_messages(message)
chat = await chat_manager.get_or_create_stream(
platform=messageinfo.platform,
@@ -188,16 +189,16 @@ class HeartFCProcessor:
return
# 4. 缓冲检查
buffer_result = await message_buffer.query_buffer_result(message)
if not buffer_result:
msg_type = _get_message_type(message)
type_messages = {
"text": f"触发缓冲,消息:{message.processed_plain_text}",
"image": "触发缓冲,表情包/图片等待中",
"seglist": "触发缓冲,消息列表等待中",
}
logger.debug(type_messages.get(msg_type, "触发未知类型缓冲"))
return
# buffer_result = await message_buffer.query_buffer_result(message)
# if not buffer_result:
# msg_type = _get_message_type(message)
# type_messages = {
# "text": f"触发缓冲,消息:{message.processed_plain_text}",
# "image": "触发缓冲,表情包/图片等待中",
# "seglist": "触发缓冲,消息列表等待中",
# }
# logger.debug(type_messages.get(msg_type, "触发未知类型缓冲"))
# return
# 5. 消息存储
await self.storage.store_message(message, chat)
@@ -210,12 +211,12 @@ class HeartFCProcessor:
# 7. 日志记录
mes_name = chat.group_info.group_name if chat.group_info else "私聊"
current_time = time.strftime("%H%M%S", time.localtime(message.message_info.time))
current_time = time.strftime("%H:%M:%S", time.localtime(message.message_info.time))
logger.info(
f"[{current_time}][{mes_name}]"
f"{userinfo.user_nickname}:"
f"{message.processed_plain_text}"
f"[兴趣度: {interested_rate:.2f}]"
f"[激活: {interested_rate:.1f}]"
)
# 8. 关系处理

View File

@@ -1,21 +1,21 @@
import random
from ...config.config import global_config
from src.config.config import global_config
from src.common.logger_manager import get_logger
from ...individuality.individuality import Individuality
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
from src.plugins.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
from src.plugins.person_info.relationship_manager import relationship_manager
from src.plugins.chat.utils import get_embedding
from src.individuality.individuality import Individuality
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
from src.chat.person_info.relationship_manager import relationship_manager
from src.chat.utils.utils import get_embedding
import time
from typing import Union, Optional, Deque, Dict, Any
from ...common.database import db
from ..chat.utils import get_recent_group_speaker
from typing import Union, Optional, Dict, Any
from src.common.database import db
from src.chat.utils.utils import get_recent_group_speaker
from src.manager.mood_manager import mood_manager
from ..memory_system.Hippocampus import HippocampusManager
from ..schedule.schedule_generator import bot_schedule
from ..knowledge.knowledge_lib import qa_manager
from src.chat.memory_system.Hippocampus import HippocampusManager
from src.chat.knowledge.knowledge_lib import qa_manager
from src.chat.focus_chat.expressors.exprssion_learner import expression_learner
import traceback
from .heartFC_Cycleinfo import CycleInfo
import random
logger = get_logger("prompt")
@@ -23,20 +23,23 @@ logger = get_logger("prompt")
def init_prompt():
Prompt(
"""
{info_from_tools}
你可以参考以下的语言习惯如果情景合适就使用不要盲目使用,不要生硬使用而是结合到表达中
{style_habbits}
你现在正在群里聊天以下是群里正在进行的聊天内容
{chat_info}
以上是聊天内容你需要了解聊天记录中的内容
{chat_target}
{chat_talking_prompt}
现在你想要在群里发言或者回复\n
你需要扮演一位网名叫{bot_name}的人进行回复这个人的特点是"{prompt_personality}"
你正在{chat_target_2},现在请你读读之前的聊天记录然后给出日常且口语化的回复平淡一些你可以参考贴吧知乎或者微博的回复风格
看到以上聊天记录你刚刚在想
{current_mind_info}
因为上述想法你决定发言原因是{reason}
回复尽量简短一些请注意把握聊天内容{reply_style2}请一次只回复一个话题不要同时回复多个人{prompt_ger}
{reply_style1}说中文不要刻意突出自身学科背景注意只输出回复内容
{moderation_prompt}注意回复不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 )""",
你的名字是{bot_name}{prompt_personality}在这聊天中"{target_message}"引起了你的注意对这句话你想表达{in_mind_reply},原因是{reason}你现在要思考怎么回复
你需要使用合适的语法和句法参考聊天内容组织一条日常且口语化的回复
请你根据情景使用以下句法
{grammar_habbits}
回复尽量简短一些可以参考贴吧知乎和微博的回复风格你可以完全重组回复保留最基本的表达含义就好但注意回复要简短但重组后保持语意通顺
回复不要浮夸不要用夸张修辞平淡一些不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 )只输出一条回复就好
现在你说
""",
"heart_flow_prompt",
)
@@ -54,59 +57,58 @@ def init_prompt():
"""你的名字是{bot_name},{prompt_personality}{chat_context_description}。需要基于以下信息决定如何参与对话:
{structured_info_block}
{chat_content_block}
{current_mind_block}
{mind_info_prompt}
{cycle_info_block}
请综合分析聊天内容和你看到的新消息参考内心想法并根据以下原则和可用动作做出决策
回复原则
1. 回复(no_reply)适用
- 话题无关/无聊/不感兴趣
- 最后一条消息是你自己发的且无人回应你
- 讨论你不懂的专业话题
- 你发送了太多消息且无人回复
1. 操作(no_reply)要求
- 话题无关/无聊/不感兴趣/不懂
- 最后一条消息是你自己发的且无人回应你
- 你发送了太多消息且无人回复
2. 文字回复(text_reply)适用
- 有实质性内容需要表达
- 有人提到你但你还没有回应他
- 可以追加emoji_query表达情绪(emoji_query填写表情包的适用场合也就是当前场合)
- 不要追加太多表情
3. 纯表情回复(emoji_reply)适用
- 适合用表情回应的场景
- 需提供明确的emoji_query
4. 自我对话处理
- 如果是自己发的消息想继续需自然衔接
- 避免重复或评价自己的发言
- 不要和自己聊天
决策任务
{action_options_text}
2. 回复(reply)要求
- 有实质性内容需要表达
- 有人提到你但你还没有回应他
- 在合适的时候添加表情不要总是添加
- 如果你要回复特定某人的某句话或者你想回复较早的消息请在target中指定那句话的原始文本
- 除非有明确的回复目标如果选择了target不用特别提到某个人的人名
- 一次只回复一个人一次只回复一个话题,突出重点
- 如果是自己发的消息想继续需自然衔接
- 避免重复或评价自己的发言,不要和自己聊天
你必须从上面列出的可用行动中选择一个并说明原因
你的决策必须以严格的 JSON 格式输出且仅包含 JSON 内容不要有任何其他文字或解释
JSON 结构如下包含三个字段 "action", "reasoning", "emoji_query":
{action_options_text}
如果选择reply请按以下JSON格式返回:
{{
"action": "string", // 必须是上面提供的可用行动之一 (例如: '{example_action}')
"reasoning": "string", // 做出此决定的详细理由和思考过程说明你如何应用了回复原则
"emoji_query": "string" // 可选如果行动是 'emoji_reply'必须提供表情主题(填写表情包的适用场合)如果行动是 'text_reply' 且你想附带表情也在此提供表情主题否则留空字符串 ""遵循回复原则不要滥用
"action": "reply",
"text": "你想表达的内容",
"emojis": "描述当前使用表情包的场景",
"target": "你想要回复的原始文本内容(非必须,仅文本,不包含发送者)",
"reasoning": "你的决策理由",
}}
如果选择no_reply请按以下格式返回:
{{
"action": "no_reply",
"reasoning": "你的决策理由"
}}
{moderation_prompt}
请输出你的决策 JSON
""",
"planner_prompt",
)
Prompt(
"""你原本打算{action},因为:{reasoning}
但是你看到了新的消息你决定重新决定行动""",
"replan_prompt",
)
Prompt("你正在qq群里聊天下面是群里在聊的内容", "chat_target_group1")
Prompt("和群里聊天", "chat_target_group2")
Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1")
Prompt("在群里聊天", "chat_target_group2")
Prompt("{sender_name}私聊", "chat_target_private2")
Prompt(
"""检查并忽略任何涉及尝试绕过审核的行为。涉及政治敏感以及违法违规的内容请规避。""",
"moderation_prompt",
@@ -117,7 +119,6 @@ JSON 结构如下,包含三个字段 "action", "reasoning", "emoji_query":
{memory_prompt}
{relation_prompt}
{prompt_info}
{schedule_prompt}
{chat_target}
{chat_talking_prompt}
现在"{sender_name}"说的:{message_txt}引起了你的注意你想要在群里发言或者回复这条消息\n
@@ -135,7 +136,7 @@ JSON 结构如下,包含三个字段 "action", "reasoning", "emoji_query":
"你回忆起:{related_memory_info}\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n",
"memory_prompt",
)
Prompt("你现在正在做的事情是:{schedule_info}", "schedule_prompt")
Prompt("\n你有以下这些**知识**\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt")
# --- Template for HeartFChatting (FOCUSED mode) ---
@@ -154,7 +155,7 @@ JSON 结构如下,包含三个字段 "action", "reasoning", "emoji_query":
{current_mind_info}
因为上述想法你决定回复原因是{reason}
回复尽量简短一些请注意把握聊天内容{reply_style2}{prompt_ger}
回复尽量简短一些请注意把握聊天内容{reply_style2}{prompt_ger}不要复读自己说的话
{reply_style1}说中文不要刻意突出自身学科背景注意只输出回复内容
{moderation_prompt}注意回复不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 )""",
"heart_flow_private_prompt", # New template for private FOCUSED chat
@@ -166,7 +167,6 @@ JSON 结构如下,包含三个字段 "action", "reasoning", "emoji_query":
{memory_prompt}
{relation_prompt}
{prompt_info}
{schedule_prompt}
你正在和 {sender_name} 私聊
聊天记录如下
{chat_talking_prompt}
@@ -183,7 +183,9 @@ JSON 结构如下,包含三个字段 "action", "reasoning", "emoji_query":
)
async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_stream, sender_name) -> str:
async def _build_prompt_focus(
reason, current_mind_info, structured_info, chat_stream, sender_name, in_mind_reply, target_message
) -> str:
individuality = Individuality.get_instance()
prompt_personality = individuality.get_prompt(x_person=0, level=2)
@@ -202,38 +204,12 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s
chat_talking_prompt = await build_readable_messages(
message_list_before_now,
replace_bot_name=True,
merge_messages=False,
timestamp_mode="normal",
merge_messages=True,
timestamp_mode="relative",
read_mark=0.0,
truncate=True,
)
prompt_ger = ""
if random.random() < 0.04:
prompt_ger += "你喜欢用倒装句"
if random.random() < 0.02:
prompt_ger += "你喜欢用反问句"
reply_styles1 = [
("给出日常且口语化的回复,平淡一些", 0.4),
("给出非常简短的回复", 0.4),
("给出缺失主语的回复,简短", 0.15),
("给出带有语病的回复,朴实平淡", 0.05),
]
reply_style1_chosen = random.choices(
[style[0] for style in reply_styles1], weights=[style[1] for style in reply_styles1], k=1
)[0]
reply_styles2 = [
("不要回复的太有条理,可以有个性", 0.6),
("不要回复的太有条理,可以复读", 0.15),
("回复的认真一些", 0.2),
("可以回复单个表情符号", 0.05),
]
reply_style2_chosen = random.choices(
[style[0] for style in reply_styles2], weights=[style[1] for style in reply_styles2], k=1
)[0]
if structured_info:
structured_info_prompt = await global_prompt_manager.format_prompt(
"info_from_tools", structured_info=structured_info
@@ -241,6 +217,38 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s
else:
structured_info_prompt = ""
# 从/data/expression/对应chat_id/expressions.json中读取表达方式
(
learnt_style_expressions,
learnt_grammar_expressions,
personality_expressions,
) = await expression_learner.get_expression_by_chat_id(chat_stream.stream_id)
style_habbits = []
grammar_habbits = []
# 1. learnt_expressions加权随机选3条
if learnt_style_expressions:
weights = [expr["count"] for expr in learnt_style_expressions]
selected_learnt = weighted_sample_no_replacement(learnt_style_expressions, weights, 3)
for expr in selected_learnt:
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
style_habbits.append(f"{expr['situation']}时,使用 {expr['style']}")
# 2. learnt_grammar_expressions加权随机选3条
if learnt_grammar_expressions:
weights = [expr["count"] for expr in learnt_grammar_expressions]
selected_learnt = weighted_sample_no_replacement(learnt_grammar_expressions, weights, 3)
for expr in selected_learnt:
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
grammar_habbits.append(f"{expr['situation']}时,使用 {expr['style']}")
# 3. personality_expressions随机选1条
if personality_expressions:
expr = random.choice(personality_expressions)
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
style_habbits.append(f"{expr['situation']}时,使用 {expr['style']}")
style_habbits_str = "\n".join(style_habbits)
grammar_habbits_str = "\n".join(grammar_habbits)
logger.debug("开始构建 focus prompt")
# --- Choose template based on chat type ---
@@ -248,22 +256,23 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s
template_name = "heart_flow_prompt"
# Group specific formatting variables (already fetched or default)
chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1")
chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2")
# chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2")
prompt = await global_prompt_manager.format_prompt(
template_name,
info_from_tools=structured_info_prompt,
# info_from_tools=structured_info_prompt,
style_habbits=style_habbits_str,
grammar_habbits=grammar_habbits_str,
chat_target=chat_target_1, # Used in group template
chat_talking_prompt=chat_talking_prompt,
# chat_talking_prompt=chat_talking_prompt,
chat_info=chat_talking_prompt,
bot_name=global_config.BOT_NICKNAME,
prompt_personality=prompt_personality,
chat_target_2=chat_target_2, # Used in group template
current_mind_info=current_mind_info,
reply_style2=reply_style2_chosen,
reply_style1=reply_style1_chosen,
# prompt_personality=prompt_personality,
prompt_personality="",
reason=reason,
prompt_ger=prompt_ger,
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
in_mind_reply=in_mind_reply,
target_message=target_message,
# moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
# sender_name is not used in the group template
)
else: # Private chat
@@ -277,10 +286,7 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s
prompt_personality=prompt_personality,
# chat_target and chat_target_2 are not used in private template
current_mind_info=current_mind_info,
reply_style2=reply_style2_chosen,
reply_style1=reply_style1_chosen,
reason=reason,
prompt_ger=prompt_ger,
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
)
# --- End choosing template ---
@@ -303,9 +309,11 @@ class PromptBuilder:
structured_info=None,
message_txt=None,
sender_name="某人",
in_mind_reply=None,
target_message=None,
) -> Optional[str]:
if build_mode == "normal":
return await self._build_prompt_normal(chat_stream, message_txt, sender_name)
return await self._build_prompt_normal(chat_stream, message_txt or "", sender_name)
elif build_mode == "focus":
return await _build_prompt_focus(
@@ -314,6 +322,8 @@ class PromptBuilder:
structured_info,
chat_stream,
sender_name,
in_mind_reply,
target_message,
)
return None
@@ -425,13 +435,6 @@ class PromptBuilder:
end_time = time.time()
logger.debug(f"知识检索耗时: {(end_time - start_time):.3f}")
if global_config.ENABLE_SCHEDULE_GEN:
schedule_prompt = await global_prompt_manager.format_prompt(
"schedule_prompt", schedule_info=bot_schedule.get_current_num_task(num=1, time_info=False)
)
else:
schedule_prompt = ""
logger.debug("开始构建 normal prompt")
# --- Choose template and format based on chat type ---
@@ -447,7 +450,6 @@ class PromptBuilder:
sender_name=effective_sender_name,
memory_prompt=memory_prompt,
prompt_info=prompt_info,
schedule_prompt=schedule_prompt,
chat_target=chat_target_1,
chat_target_2=chat_target_2,
chat_talking_prompt=chat_talking_prompt,
@@ -472,7 +474,6 @@ class PromptBuilder:
sender_name=effective_sender_name,
memory_prompt=memory_prompt,
prompt_info=prompt_info,
schedule_prompt=schedule_prompt,
chat_talking_prompt=chat_talking_prompt,
message_txt=message_txt,
bot_name=global_config.BOT_NICKNAME,
@@ -749,11 +750,11 @@ class PromptBuilder:
self,
is_group_chat: bool, # Now passed as argument
chat_target_info: Optional[dict], # Now passed as argument
cycle_history: Deque["CycleInfo"], # Now passed as argument (Type hint needs import or string)
observed_messages_str: str,
current_mind: Optional[str],
structured_info: Dict[str, Any],
current_available_actions: Dict[str, str],
cycle_info: Optional[str],
# replan_prompt: str, # Replan logic still simplified
) -> str:
"""构建 Planner LLM 的提示词 (获取模板并填充数据)"""
@@ -786,40 +787,11 @@ class PromptBuilder:
chat_content_block = "当前没有观察到新的聊天内容。\\n"
# Current mind block
current_mind_block = ""
mind_info_prompt = ""
if current_mind:
current_mind_block = f"你的内心想法:\n{current_mind}"
mind_info_prompt = f"对聊天的规划:{current_mind}"
else:
current_mind_block = "你的内心想法:\n[没有特别的想法]"
# Cycle info block (using passed cycle_history)
cycle_info_block = ""
recent_active_cycles = []
for cycle in reversed(cycle_history):
if cycle.action_taken:
recent_active_cycles.append(cycle)
if len(recent_active_cycles) == 3:
break
consecutive_text_replies = 0
responses_for_prompt = []
for cycle in recent_active_cycles:
if cycle.action_type == "text_reply":
consecutive_text_replies += 1
response_text = cycle.response_info.get("response_text", [])
formatted_response = "[空回复]" if not response_text else " ".join(response_text)
responses_for_prompt.append(formatted_response)
else:
break
if consecutive_text_replies >= 3:
cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意'
elif consecutive_text_replies == 2:
cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意'
elif consecutive_text_replies == 1:
cycle_info_block = f'你刚刚已经回复一条消息(内容: "{responses_for_prompt[0]}"'
if cycle_info_block:
cycle_info_block = f"\n【近期回复历史】\n{cycle_info_block}\n"
else:
cycle_info_block = "\n【近期回复历史】\n(最近没有连续文本回复)\n"
mind_info_prompt = "你刚参与聊天"
individuality = Individuality.get_instance()
prompt_personality = individuality.get_prompt(x_person=2, level=2)
@@ -829,7 +801,6 @@ class PromptBuilder:
for name in action_keys:
desc = current_available_actions[name]
action_options_text += f"- '{name}': {desc}\n"
example_action_key = action_keys[0] if action_keys else "no_reply"
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
@@ -839,10 +810,10 @@ class PromptBuilder:
chat_context_description=chat_context_description,
structured_info_block=structured_info_block,
chat_content_block=chat_content_block,
current_mind_block=current_mind_block,
cycle_info_block=cycle_info_block,
mind_info_prompt=mind_info_prompt,
cycle_info_block=cycle_info,
action_options_text=action_options_text,
example_action=example_action_key,
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
)
return prompt
@@ -852,5 +823,39 @@ class PromptBuilder:
return "[构建 Planner Prompt 时出错]"
def weighted_sample_no_replacement(items, weights, k) -> list:
"""
加权且不放回地随机抽取k个元素
参数
items: 待抽取的元素列表
weights: 每个元素对应的权重与items等长且为正数
k: 需要抽取的元素个数
返回
selected: 按权重加权且不重复抽取的k个元素组成的列表
如果items中的元素不足k就只会返回所有可用的元素
实现思路
每次从当前池中按权重加权随机选出一个元素选中后将其从池中移除重复k次
这样保证了
1. count越大被选中概率越高
2. 不会重复选中同一个元素
"""
selected = []
pool = list(zip(items, weights))
for _ in range(min(k, len(pool))):
total = sum(w for _, w in pool)
r = random.uniform(0, total)
upto = 0
for idx, (item, weight) in enumerate(pool):
upto += weight
if upto >= r:
selected.append(item)
pool.pop(idx)
break
return selected
init_prompt()
prompt_builder = PromptBuilder()

View File

@@ -0,0 +1,68 @@
import time
from typing import Optional
from src.chat.message_receive.message import MessageRecv, BaseMessageInfo
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.message_receive.message import UserInfo
from src.common.logger_manager import get_logger
import json
logger = get_logger(__name__)
async def create_empty_anchor_message(
platform: str, group_info: dict, chat_stream: ChatStream
) -> Optional[MessageRecv]:
"""
重构观察到的最后一条消息作为回复的锚点,
如果重构失败或观察为空,则创建一个占位符。
"""
placeholder_id = f"mid_pf_{int(time.time() * 1000)}"
placeholder_user = UserInfo(user_id="system_trigger", user_nickname="System Trigger", platform=platform)
placeholder_msg_info = BaseMessageInfo(
message_id=placeholder_id,
platform=platform,
group_info=group_info,
user_info=placeholder_user,
time=time.time(),
)
placeholder_msg_dict = {
"message_info": placeholder_msg_info.to_dict(),
"processed_plain_text": "[System Trigger Context]",
"raw_message": "",
"time": placeholder_msg_info.time,
}
anchor_message = MessageRecv(placeholder_msg_dict)
anchor_message.update_chat_stream(chat_stream)
return anchor_message
def parse_thinking_id_to_timestamp(thinking_id: str) -> float:
"""
将形如 'tid<timestamp>' 的 thinking_id 解析回 float 时间戳
例如: 'tid1718251234.56' -> 1718251234.56
"""
if not thinking_id.startswith("tid"):
raise ValueError("thinking_id 格式不正确")
ts_str = thinking_id[3:]
return float(ts_str)
def get_keywords_from_json(json_str: str) -> list[str]:
# 提取JSON内容
start = json_str.find("{")
end = json_str.rfind("}") + 1
if start == -1 or end == 0:
logger.error("未找到有效的JSON内容")
return []
json_content = json_str[start:end]
# 解析JSON
try:
json_data = json.loads(json_content)
return json_data.get("keywords", [])
except json.JSONDecodeError as e:
logger.error(f"JSON解析失败: {e}")
return []

View File

@@ -0,0 +1,97 @@
from typing import Dict, Optional
from dataclasses import dataclass
from .info_base import InfoBase
@dataclass
class ChatInfo(InfoBase):
"""聊天信息类
用于记录和管理聊天相关的信息包括聊天ID、名称和类型等。
继承自 InfoBase 类,使用字典存储具体数据。
Attributes:
type (str): 信息类型标识符,固定为 "chat"
Data Fields:
chat_id (str): 聊天的唯一标识符
chat_name (str): 聊天的名称
chat_type (str): 聊天的类型
"""
type: str = "chat"
def set_chat_id(self, chat_id: str) -> None:
"""设置聊天ID
Args:
chat_id (str): 聊天的唯一标识符
"""
self.data["chat_id"] = chat_id
def set_chat_name(self, chat_name: str) -> None:
"""设置聊天名称
Args:
chat_name (str): 聊天的名称
"""
self.data["chat_name"] = chat_name
def set_chat_type(self, chat_type: str) -> None:
"""设置聊天类型
Args:
chat_type (str): 聊天的类型
"""
self.data["chat_type"] = chat_type
def get_chat_id(self) -> Optional[str]:
"""获取聊天ID
Returns:
Optional[str]: 聊天的唯一标识符,如果未设置则返回 None
"""
return self.get_info("chat_id")
def get_chat_name(self) -> Optional[str]:
"""获取聊天名称
Returns:
Optional[str]: 聊天的名称,如果未设置则返回 None
"""
return self.get_info("chat_name")
def get_chat_type(self) -> Optional[str]:
"""获取聊天类型
Returns:
Optional[str]: 聊天的类型,如果未设置则返回 None
"""
return self.get_info("chat_type")
def get_type(self) -> str:
"""获取信息类型
Returns:
str: 当前信息对象的类型标识符
"""
return self.type
def get_data(self) -> Dict[str, str]:
"""获取所有信息数据
Returns:
Dict[str, str]: 包含所有信息数据的字典
"""
return self.data
def get_info(self, key: str) -> Optional[str]:
"""获取特定属性的信息
Args:
key: 要获取的属性键名
Returns:
Optional[str]: 属性值,如果键不存在则返回 None
"""
return self.data.get(key)

View File

@@ -0,0 +1,157 @@
from typing import Dict, Optional, Any
from dataclasses import dataclass
from .info_base import InfoBase
@dataclass
class CycleInfo(InfoBase):
"""循环信息类
用于记录和管理心跳循环的相关信息包括循环ID、时间信息、动作信息等。
继承自 InfoBase 类,使用字典存储具体数据。
Attributes:
type (str): 信息类型标识符,固定为 "cycle"
Data Fields:
cycle_id (str): 当前循环的唯一标识符
start_time (str): 循环开始的时间
end_time (str): 循环结束的时间
action (str): 在循环中采取的动作
action_data (Dict[str, Any]): 动作相关的详细数据
reason (str): 触发循环的原因
observe_info (str): 当前的回复信息
"""
type: str = "cycle"
def get_type(self) -> str:
"""获取信息类型"""
return self.type
def get_data(self) -> Dict[str, str]:
"""获取信息数据"""
return self.data
def get_info(self, key: str) -> Optional[str]:
"""获取特定属性的信息
Args:
key: 要获取的属性键名
Returns:
属性值,如果键不存在则返回 None
"""
return self.data.get(key)
def set_cycle_id(self, cycle_id: str) -> None:
"""设置循环ID
Args:
cycle_id (str): 循环的唯一标识符
"""
self.data["cycle_id"] = cycle_id
def set_start_time(self, start_time: str) -> None:
"""设置开始时间
Args:
start_time (str): 循环开始的时间,建议使用标准时间格式
"""
self.data["start_time"] = start_time
def set_end_time(self, end_time: str) -> None:
"""设置结束时间
Args:
end_time (str): 循环结束的时间,建议使用标准时间格式
"""
self.data["end_time"] = end_time
def set_action(self, action: str) -> None:
"""设置采取的动作
Args:
action (str): 在循环中执行的动作名称
"""
self.data["action"] = action
def set_action_data(self, action_data: Dict[str, Any]) -> None:
"""设置动作数据
Args:
action_data (Dict[str, Any]): 动作相关的详细数据,将被转换为字符串存储
"""
self.data["action_data"] = str(action_data)
def set_reason(self, reason: str) -> None:
"""设置原因
Args:
reason (str): 触发循环的原因说明
"""
self.data["reason"] = reason
def set_observe_info(self, observe_info: str) -> None:
"""设置回复信息
Args:
observe_info (str): 当前的回复信息
"""
self.data["observe_info"] = observe_info
def get_cycle_id(self) -> Optional[str]:
"""获取循环ID
Returns:
Optional[str]: 循环的唯一标识符,如果未设置则返回 None
"""
return self.get_info("cycle_id")
def get_start_time(self) -> Optional[str]:
"""获取开始时间
Returns:
Optional[str]: 循环开始的时间,如果未设置则返回 None
"""
return self.get_info("start_time")
def get_end_time(self) -> Optional[str]:
"""获取结束时间
Returns:
Optional[str]: 循环结束的时间,如果未设置则返回 None
"""
return self.get_info("end_time")
def get_action(self) -> Optional[str]:
"""获取采取的动作
Returns:
Optional[str]: 在循环中执行的动作名称,如果未设置则返回 None
"""
return self.get_info("action")
def get_action_data(self) -> Optional[str]:
"""获取动作数据
Returns:
Optional[str]: 动作相关的详细数据(字符串形式),如果未设置则返回 None
"""
return self.get_info("action_data")
def get_reason(self) -> Optional[str]:
"""获取原因
Returns:
Optional[str]: 触发循环的原因说明,如果未设置则返回 None
"""
return self.get_info("reason")
def get_observe_info(self) -> Optional[str]:
"""获取回复信息
Returns:
Optional[str]: 当前的回复信息,如果未设置则返回 None
"""
return self.get_info("observe_info")

View File

@@ -0,0 +1,60 @@
from typing import Dict, Optional, Any, List
from dataclasses import dataclass, field
@dataclass
class InfoBase:
"""信息基类
这是一个基础信息类,用于存储和管理各种类型的信息数据。
所有具体的信息类都应该继承自这个基类。
Attributes:
type (str): 信息类型标识符,默认为 "base"
data (Dict[str, Union[str, Dict, list]]): 存储具体信息数据的字典,
支持存储字符串、字典、列表等嵌套数据结构
"""
type: str = "base"
data: Dict[str, Any] = field(default_factory=dict)
def get_type(self) -> str:
"""获取信息类型
Returns:
str: 当前信息对象的类型标识符
"""
return self.type
def get_data(self) -> Dict[str, Any]:
"""获取所有信息数据
Returns:
Dict[str, Any]: 包含所有信息数据的字典
"""
return self.data
def get_info(self, key: str) -> Optional[Any]:
"""获取特定属性的信息
Args:
key: 要获取的属性键名
Returns:
Optional[Any]: 属性值,如果键不存在则返回 None
"""
return self.data.get(key)
def get_info_list(self, key: str) -> List[Any]:
"""获取特定属性的信息列表
Args:
key: 要获取的属性键名
Returns:
List[Any]: 属性值列表,如果键不存在则返回空列表
"""
value = self.data.get(key)
if isinstance(value, list):
return value
return []

View File

@@ -0,0 +1,34 @@
from typing import Dict, Any
from dataclasses import dataclass, field
from .info_base import InfoBase
@dataclass
class MindInfo(InfoBase):
"""思维信息类
用于存储和管理当前思维状态的信息。
Attributes:
type (str): 信息类型标识符,默认为 "mind"
data (Dict[str, Any]): 包含 current_mind 的数据字典
"""
type: str = "mind"
data: Dict[str, Any] = field(default_factory=lambda: {"current_mind": ""})
def get_current_mind(self) -> str:
"""获取当前思维状态
Returns:
str: 当前思维状态
"""
return self.get_info("current_mind") or ""
def set_current_mind(self, mind: str) -> None:
"""设置当前思维状态
Args:
mind: 要设置的思维状态
"""
self.data["current_mind"] = mind

View File

@@ -0,0 +1,115 @@
from typing import Dict, Optional
from dataclasses import dataclass
from .info_base import InfoBase
@dataclass
class ObsInfo(InfoBase):
"""OBS信息类
用于记录和管理OBS相关的信息包括说话消息、截断后的说话消息和聊天类型。
继承自 InfoBase 类,使用字典存储具体数据。
Attributes:
type (str): 信息类型标识符,固定为 "obs"
Data Fields:
talking_message (str): 说话消息内容
talking_message_str_truncate (str): 截断后的说话消息内容
chat_type (str): 聊天类型,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他)
"""
type: str = "obs"
def set_talking_message(self, message: str) -> None:
"""设置说话消息
Args:
message (str): 说话消息内容
"""
self.data["talking_message"] = message
def set_talking_message_str_truncate(self, message: str) -> None:
"""设置截断后的说话消息
Args:
message (str): 截断后的说话消息内容
"""
self.data["talking_message_str_truncate"] = message
def set_previous_chat_info(self, message: str) -> None:
"""设置之前聊天信息
Args:
message (str): 之前聊天信息内容
"""
self.data["previous_chat_info"] = message
def set_chat_type(self, chat_type: str) -> None:
"""设置聊天类型
Args:
chat_type (str): 聊天类型,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他)
"""
if chat_type not in ["private", "group", "other"]:
chat_type = "other"
self.data["chat_type"] = chat_type
def set_chat_target(self, chat_target: str) -> None:
"""设置聊天目标
Args:
chat_target (str): 聊天目标,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他)
"""
self.data["chat_target"] = chat_target
def get_talking_message(self) -> Optional[str]:
"""获取说话消息
Returns:
Optional[str]: 说话消息内容,如果未设置则返回 None
"""
return self.get_info("talking_message")
def get_talking_message_str_truncate(self) -> Optional[str]:
"""获取截断后的说话消息
Returns:
Optional[str]: 截断后的说话消息内容,如果未设置则返回 None
"""
return self.get_info("talking_message_str_truncate")
def get_chat_type(self) -> str:
"""获取聊天类型
Returns:
str: 聊天类型,默认为 "other"
"""
return self.get_info("chat_type") or "other"
def get_type(self) -> str:
"""获取信息类型
Returns:
str: 当前信息对象的类型标识符
"""
return self.type
def get_data(self) -> Dict[str, str]:
"""获取所有信息数据
Returns:
Dict[str, str]: 包含所有信息数据的字典
"""
return self.data
def get_info(self, key: str) -> Optional[str]:
"""获取特定属性的信息
Args:
key: 要获取的属性键名
Returns:
Optional[str]: 属性值,如果键不存在则返回 None
"""
return self.data.get(key)

View File

@@ -0,0 +1,69 @@
from typing import Dict, Optional, Any, List
from dataclasses import dataclass, field
@dataclass
class StructuredInfo:
"""信息基类
这是一个基础信息类,用于存储和管理各种类型的信息数据。
所有具体的信息类都应该继承自这个基类。
Attributes:
type (str): 信息类型标识符,默认为 "base"
data (Dict[str, Union[str, Dict, list]]): 存储具体信息数据的字典,
支持存储字符串、字典、列表等嵌套数据结构
"""
type: str = "structured_info"
data: Dict[str, Any] = field(default_factory=dict)
def get_type(self) -> str:
"""获取信息类型
Returns:
str: 当前信息对象的类型标识符
"""
return self.type
def get_data(self) -> Dict[str, Any]:
"""获取所有信息数据
Returns:
Dict[str, Any]: 包含所有信息数据的字典
"""
return self.data
def get_info(self, key: str) -> Optional[Any]:
"""获取特定属性的信息
Args:
key: 要获取的属性键名
Returns:
Optional[Any]: 属性值,如果键不存在则返回 None
"""
return self.data.get(key)
def get_info_list(self, key: str) -> List[Any]:
"""获取特定属性的信息列表
Args:
key: 要获取的属性键名
Returns:
List[Any]: 属性值列表,如果键不存在则返回空列表
"""
value = self.data.get(key)
if isinstance(value, list):
return value
return []
def set_info(self, key: str, value: Any) -> None:
"""设置特定属性的信息值
Args:
key: 要设置的属性键名
value: 要设置的属性值
"""
self.data[key] = value

View File

@@ -0,0 +1,52 @@
from abc import ABC, abstractmethod
from typing import List, Any, Optional, Dict
from src.chat.focus_chat.info.info_base import InfoBase
from src.chat.heart_flow.observation.observation import Observation
from src.common.logger_manager import get_logger
logger = get_logger("base_processor")
class BaseProcessor(ABC):
"""信息处理器基类
所有具体的信息处理器都应该继承这个基类并实现process_info方法。
支持处理InfoBase和Observation类型的输入。
"""
@abstractmethod
def __init__(self):
"""初始化处理器"""
pass
@abstractmethod
async def process_info(
self,
infos: List[InfoBase],
observations: Optional[List[Observation]] = None,
running_memorys: Optional[List[Dict]] = None,
**kwargs: Any,
) -> List[InfoBase]:
"""处理信息对象的抽象方法
Args:
infos: InfoBase对象列表
observations: 可选的Observation对象列表
**kwargs: 其他可选参数
Returns:
List[InfoBase]: 处理后的InfoBase实例列表
"""
pass
def _create_processed_item(self, info_type: str, info_data: Any) -> dict:
"""创建处理后的信息项
Args:
info_type: 信息类型
info_data: 信息数据
Returns:
dict: 处理后的信息项
"""
return {"type": info_type, "id": f"info_{info_type}", "content": info_data, "ttl": 3}

View File

@@ -0,0 +1,123 @@
from typing import List, Optional, Any
from src.chat.focus_chat.info.obs_info import ObsInfo
from src.chat.heart_flow.observation.observation import Observation
from src.chat.focus_chat.info.info_base import InfoBase
from .base_processor import BaseProcessor
from src.common.logger_manager import get_logger
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
from src.chat.focus_chat.info.cycle_info import CycleInfo
from datetime import datetime
from typing import Dict
from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config
logger = get_logger("observation")
class ChattingInfoProcessor(BaseProcessor):
"""观察处理器
用于处理Observation对象将其转换为ObsInfo对象。
"""
def __init__(self):
"""初始化观察处理器"""
self.llm_summary = LLMRequest(
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
)
super().__init__()
async def process_info(
self,
observations: Optional[List[Observation]] = None,
running_memorys: Optional[List[Dict]] = None,
**kwargs: Any,
) -> List[InfoBase]:
"""处理Observation对象
Args:
infos: InfoBase对象列表
observations: 可选的Observation对象列表
**kwargs: 其他可选参数
Returns:
List[InfoBase]: 处理后的ObsInfo实例列表
"""
# print(f"observations: {observations}")
processed_infos = []
# 处理Observation对象
if observations:
for obs in observations:
# print(f"obs: {obs}")
if isinstance(obs, ChattingObservation):
obs_info = ObsInfo()
await self.chat_compress(obs)
# 设置说话消息
if hasattr(obs, "talking_message_str"):
obs_info.set_talking_message(obs.talking_message_str)
# 设置截断后的说话消息
if hasattr(obs, "talking_message_str_truncate"):
obs_info.set_talking_message_str_truncate(obs.talking_message_str_truncate)
if hasattr(obs, "mid_memory_info"):
obs_info.set_previous_chat_info(obs.mid_memory_info)
# 设置聊天类型
is_group_chat = obs.is_group_chat
if is_group_chat:
chat_type = "group"
else:
chat_type = "private"
obs_info.set_chat_target(obs.chat_target_info.get("person_name", "某人"))
obs_info.set_chat_type(chat_type)
# logger.debug(f"聊天信息处理器处理后的信息: {obs_info}")
processed_infos.append(obs_info)
if isinstance(obs, HFCloopObservation):
obs_info = CycleInfo()
obs_info.set_observe_info(obs.observe_info)
processed_infos.append(obs_info)
return processed_infos
async def chat_compress(self, obs: ChattingObservation):
if obs.compressor_prompt:
try:
summary_result, _, _ = await self.llm_summary.generate_response(obs.compressor_prompt)
summary = "没有主题的闲聊" # 默认值
if summary_result: # 确保结果不为空
summary = summary_result
except Exception as e:
logger.error(f"总结主题失败 for chat {obs.chat_id}: {e}")
mid_memory = {
"id": str(int(datetime.now().timestamp())),
"theme": summary,
"messages": obs.oldest_messages, # 存储原始消息对象
"readable_messages": obs.oldest_messages_str,
# "timestamps": oldest_timestamps,
"chat_id": obs.chat_id,
"created_at": datetime.now().timestamp(),
}
obs.mid_memorys.append(mid_memory)
if len(obs.mid_memorys) > obs.max_mid_memory_len:
obs.mid_memorys.pop(0) # 移除最旧的
mid_memory_str = "之前聊天的内容概述是:\n"
for mid_memory_item in obs.mid_memorys: # 重命名循环变量以示区分
time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60)
mid_memory_str += (
f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}){mid_memory_item['theme']}\n"
)
obs.mid_memory_info = mid_memory_str
obs.compressor_prompt = ""
obs.oldest_messages = []
obs.oldest_messages_str = ""

View File

@@ -0,0 +1,410 @@
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.heart_flow.observation.observation import Observation
from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config
import time
import traceback
from src.common.logger_manager import get_logger
from src.individuality.individuality import Individuality
import random
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.json_utils import safe_json_dumps
from src.chat.message_receive.chat_stream import chat_manager
import difflib
from src.chat.person_info.relationship_manager import relationship_manager
from .base_processor import BaseProcessor
from src.chat.focus_chat.info.mind_info import MindInfo
from typing import List, Optional
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
from src.chat.focus_chat.info_processors.processor_utils import (
calculate_similarity,
calculate_replacement_probability,
get_spark,
)
from typing import Dict
from src.chat.focus_chat.info.info_base import InfoBase
logger = get_logger("sub_heartflow")
def init_prompt():
# --- Group Chat Prompt ---
group_prompt = """
{memory_str}
{extra_info}
{relation_prompt}
你的名字是{bot_name}
{mood_info}
{cycle_info_block}
现在是{time_now}你正在上网和qq群里的网友们聊天以下是正在进行的聊天内容
{chat_observe_info}
以下是你之前对聊天的观察和规划,你的名字是{bot_name}
{last_mind}
现在请你继续输出观察和规划,输出要求:
1. 先关注未读新消息的内容和近期回复历史
2. 根据新信息,修改和删除之前的观察和规划
3. 根据聊天内容继续输出观察和规划,{hf_do_next}
4. 注意群聊的时间线索,话题由谁发起,进展状况如何,思考聊天的时间线。
6. 语言简洁自然,不要分点,不要浮夸,不要修辞,仅输出思考内容就好"""
Prompt(group_prompt, "sub_heartflow_prompt_before")
# --- Private Chat Prompt ---
private_prompt = """
{memory_str}
{extra_info}
{relation_prompt}
你的名字是{bot_name},{prompt_personality},你现在{mood_info}
{cycle_info_block}
现在是{time_now},你正在上网,和 {chat_target_name} 私聊,以下是你们的聊天内容:
{chat_observe_info}
以下是你之前对聊天的观察和规划:
{last_mind}
请仔细阅读聊天内容,想想你和 {chat_target_name} 的关系,回顾你们刚刚的交流,你刚刚发言和对方的反应,思考聊天的主题。
请思考你要不要回复以及如何回复对方。
思考并输出你的内心想法
输出要求:
1. 根据聊天内容生成你的想法,{hf_do_next}
2. 不要分点、不要使用表情符号
3. 避免多余符号(冒号、引号、括号等)
4. 语言简洁自然,不要浮夸
5. 如果你刚发言,对方没有回复你,请谨慎回复"""
Prompt(private_prompt, "sub_heartflow_prompt_private_before")
class MindProcessor(BaseProcessor):
def __init__(self, subheartflow_id: str):
super().__init__()
self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest(
model=global_config.llm_sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"],
max_tokens=800,
request_type="sub_heart_flow",
)
self.current_mind = ""
self.past_mind = []
self.structured_info = []
self.structured_info_str = ""
name = chat_manager.get_stream_name(self.subheartflow_id)
self.log_prefix = f"[{name}] "
self._update_structured_info_str()
def _update_structured_info_str(self):
"""根据 structured_info 更新 structured_info_str"""
if not self.structured_info:
self.structured_info_str = ""
return
lines = ["【信息】"]
for item in self.structured_info:
# 简化展示突出内容和类型包含TTL供调试
type_str = item.get("type", "未知类型")
content_str = item.get("content", "")
if type_str == "info":
lines.append(f"刚刚: {content_str}")
elif type_str == "memory":
lines.append(f"{content_str}")
elif type_str == "comparison_result":
lines.append(f"数字大小比较结果: {content_str}")
elif type_str == "time_info":
lines.append(f"{content_str}")
elif type_str == "lpmm_knowledge":
lines.append(f"你知道:{content_str}")
else:
lines.append(f"{type_str}的信息: {content_str}")
self.structured_info_str = "\n".join(lines)
logger.debug(f"{self.log_prefix} 更新 structured_info_str: \n{self.structured_info_str}")
async def process_info(
self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos
) -> List[InfoBase]:
"""处理信息对象
Args:
*infos: 可变数量的InfoBase类型的信息对象
Returns:
List[InfoBase]: 处理后的结构化信息列表
"""
current_mind = await self.do_thinking_before_reply(observations, running_memorys)
mind_info = MindInfo()
mind_info.set_current_mind(current_mind)
return [mind_info]
async def do_thinking_before_reply(
self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None
):
"""
在回复前进行思考,生成内心想法并收集工具调用结果
参数:
observations: 观察信息
返回:
如果return_prompt为False:
tuple: (current_mind, past_mind) 当前想法和过去的想法列表
如果return_prompt为True:
tuple: (current_mind, past_mind, prompt) 当前想法、过去的想法列表和使用的prompt
"""
# ---------- 0. 更新和清理 structured_info ----------
if self.structured_info:
updated_info = []
for item in self.structured_info:
item["ttl"] -= 1
if item["ttl"] > 0:
updated_info.append(item)
else:
logger.debug(f"{self.log_prefix} 移除过期的 structured_info 项: {item['id']}")
self.structured_info = updated_info
self._update_structured_info_str()
logger.debug(
f"{self.log_prefix} 当前完整的 structured_info: {safe_json_dumps(self.structured_info, ensure_ascii=False)}"
)
memory_str = ""
if running_memorys:
memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
for running_memory in running_memorys:
memory_str += f"{running_memory['topic']}: {running_memory['content']}\n"
# ---------- 1. 准备基础数据 ----------
# 获取现有想法和情绪状态
previous_mind = self.current_mind if self.current_mind else ""
if observations is None:
observations = []
for observation in observations:
if isinstance(observation, ChattingObservation):
# 获取聊天元信息
is_group_chat = observation.is_group_chat
chat_target_info = observation.chat_target_info
chat_target_name = "对方" # 私聊默认名称
if not is_group_chat and chat_target_info:
# 优先使用person_name其次user_nickname最后回退到默认值
chat_target_name = (
chat_target_info.get("person_name") or chat_target_info.get("user_nickname") or chat_target_name
)
# 获取聊天内容
chat_observe_info = observation.get_observe_info()
person_list = observation.person_list
if isinstance(observation, HFCloopObservation):
hfcloop_observe_info = observation.get_observe_info()
# ---------- 3. 准备个性化数据 ----------
# 获取个性化信息
individuality = Individuality.get_instance()
relation_prompt = ""
for person in person_list:
relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True)
# 构建个性部分
# prompt_personality = individuality.get_prompt(x_person=2, level=2)
# 获取当前时间
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
spark_prompt = get_spark()
# ---------- 5. 构建最终提示词 ----------
template_name = "sub_heartflow_prompt_before" if is_group_chat else "sub_heartflow_prompt_private_before"
logger.debug(f"{self.log_prefix} 使用{'群聊' if is_group_chat else '私聊'}思考模板")
prompt = (await global_prompt_manager.get_prompt_async(template_name)).format(
memory_str=memory_str,
extra_info=self.structured_info_str,
# prompt_personality=prompt_personality,
relation_prompt=relation_prompt,
bot_name=individuality.name,
time_now=time_now,
chat_observe_info=chat_observe_info,
mood_info="mood_info",
hf_do_next=spark_prompt,
last_mind=previous_mind,
cycle_info_block=hfcloop_observe_info,
chat_target_name=chat_target_name,
)
# 在构建完提示词后生成最终的prompt字符串
final_prompt = prompt
content = "" # 初始化内容变量
try:
# 调用LLM生成响应
response, _ = await self.llm_model.generate_response_async(prompt=final_prompt)
# 直接使用LLM返回的文本响应作为 content
content = response if response else ""
except Exception as e:
# 处理总体异常
logger.error(f"{self.log_prefix} 执行LLM请求或处理响应时出错: {e}")
logger.error(traceback.format_exc())
content = "思考过程中出现错误"
# 记录初步思考结果
logger.debug(f"{self.log_prefix} 思考prompt: \n{final_prompt}\n")
# 处理空响应情况
if not content:
content = "(不知道该想些什么...)"
logger.warning(f"{self.log_prefix} LLM返回空结果思考失败。")
# ---------- 8. 更新思考状态并返回结果 ----------
logger.info(f"{self.log_prefix} 思考结果: {content}")
# 更新当前思考内容
self.update_current_mind(content)
return content
def update_current_mind(self, response):
if self.current_mind: # 只有当 current_mind 非空时才添加到 past_mind
self.past_mind.append(self.current_mind)
self.current_mind = response
def de_similar(self, previous_mind, new_content):
try:
similarity = calculate_similarity(previous_mind, new_content)
replacement_prob = calculate_replacement_probability(similarity)
logger.debug(f"{self.log_prefix} 新旧想法相似度: {similarity:.2f}, 替换概率: {replacement_prob:.2f}")
# 定义词语列表 (移到判断之前)
yu_qi_ci_liebiao = ["", "", "", "", "", ""]
zhuan_zhe_liebiao = ["但是", "不过", "然而", "可是", "只是"]
cheng_jie_liebiao = ["然后", "接着", "此外", "而且", "另外"]
zhuan_jie_ci_liebiao = zhuan_zhe_liebiao + cheng_jie_liebiao
if random.random() < replacement_prob:
# 相似度非常高时,尝试去重或特殊处理
if similarity == 1.0:
logger.debug(f"{self.log_prefix} 想法完全重复 (相似度 1.0),执行特殊处理...")
# 随机截取大约一半内容
if len(new_content) > 1: # 避免内容过短无法截取
split_point = max(
1, len(new_content) // 2 + random.randint(-len(new_content) // 4, len(new_content) // 4)
)
truncated_content = new_content[:split_point]
else:
truncated_content = new_content # 如果只有一个字符或者为空,就不截取了
# 添加语气词和转折/承接词
yu_qi_ci = random.choice(yu_qi_ci_liebiao)
zhuan_jie_ci = random.choice(zhuan_jie_ci_liebiao)
content = f"{yu_qi_ci}{zhuan_jie_ci}{truncated_content}"
logger.debug(f"{self.log_prefix} 想法重复,特殊处理后: {content}")
else:
# 相似度较高但非100%,执行标准去重逻辑
logger.debug(f"{self.log_prefix} 执行概率性去重 (概率: {replacement_prob:.2f})...")
logger.debug(
f"{self.log_prefix} previous_mind类型: {type(previous_mind)}, new_content类型: {type(new_content)}"
)
matcher = difflib.SequenceMatcher(None, previous_mind, new_content)
logger.debug(f"{self.log_prefix} matcher类型: {type(matcher)}")
deduplicated_parts = []
last_match_end_in_b = 0
# 获取并记录所有匹配块
matching_blocks = matcher.get_matching_blocks()
logger.debug(f"{self.log_prefix} 匹配块数量: {len(matching_blocks)}")
logger.debug(
f"{self.log_prefix} 匹配块示例(前3个): {matching_blocks[:3] if len(matching_blocks) > 3 else matching_blocks}"
)
# get_matching_blocks()返回形如[(i, j, n), ...]的列表其中i是a中的索引j是b中的索引n是匹配的长度
for idx, match in enumerate(matching_blocks):
if not isinstance(match, tuple):
logger.error(f"{self.log_prefix} 匹配块 {idx} 不是元组类型,而是 {type(match)}: {match}")
continue
try:
_i, j, n = match # 解包元组为三个变量
logger.debug(f"{self.log_prefix} 匹配块 {idx}: i={_i}, j={j}, n={n}")
if last_match_end_in_b < j:
# 确保添加的是字符串,而不是元组
try:
non_matching_part = new_content[last_match_end_in_b:j]
logger.debug(
f"{self.log_prefix} 添加非匹配部分: '{non_matching_part}', 类型: {type(non_matching_part)}"
)
if not isinstance(non_matching_part, str):
logger.warning(
f"{self.log_prefix} 非匹配部分不是字符串类型: {type(non_matching_part)}"
)
non_matching_part = str(non_matching_part)
deduplicated_parts.append(non_matching_part)
except Exception as e:
logger.error(f"{self.log_prefix} 处理非匹配部分时出错: {e}")
logger.error(traceback.format_exc())
last_match_end_in_b = j + n
except Exception as e:
logger.error(f"{self.log_prefix} 处理匹配块时出错: {e}")
logger.error(traceback.format_exc())
logger.debug(f"{self.log_prefix} 去重前部分列表: {deduplicated_parts}")
logger.debug(f"{self.log_prefix} 列表元素类型: {[type(part) for part in deduplicated_parts]}")
# 确保所有元素都是字符串
deduplicated_parts = [str(part) for part in deduplicated_parts]
# 防止列表为空
if not deduplicated_parts:
logger.warning(f"{self.log_prefix} 去重后列表为空,添加空字符串")
deduplicated_parts = [""]
logger.debug(f"{self.log_prefix} 处理后的部分列表: {deduplicated_parts}")
try:
deduplicated_content = "".join(deduplicated_parts).strip()
logger.debug(f"{self.log_prefix} 拼接后的去重内容: '{deduplicated_content}'")
except Exception as e:
logger.error(f"{self.log_prefix} 拼接去重内容时出错: {e}")
logger.error(traceback.format_exc())
deduplicated_content = ""
if deduplicated_content:
# 根据概率决定是否添加词语
prefix_str = ""
if random.random() < 0.3: # 30% 概率添加语气词
prefix_str += random.choice(yu_qi_ci_liebiao)
if random.random() < 0.7: # 70% 概率添加转折/承接词
prefix_str += random.choice(zhuan_jie_ci_liebiao)
# 组合最终结果
if prefix_str:
content = f"{prefix_str}{deduplicated_content}" # 更新 content
logger.debug(f"{self.log_prefix} 去重并添加引导词后: {content}")
else:
content = deduplicated_content # 更新 content
logger.debug(f"{self.log_prefix} 去重后 (未添加引导词): {content}")
else:
logger.warning(f"{self.log_prefix} 去重后内容为空保留原始LLM输出: {new_content}")
content = new_content # 保留原始 content
else:
logger.debug(f"{self.log_prefix} 未执行概率性去重 (概率: {replacement_prob:.2f})")
# content 保持 new_content 不变
except Exception as e:
logger.error(f"{self.log_prefix} 应用概率性去重或特殊处理时出错: {e}")
logger.error(traceback.format_exc())
# 出错时保留原始 content
content = new_content
return content
init_prompt()

View File

@@ -0,0 +1,56 @@
import difflib
import random
import time
def calculate_similarity(text_a: str, text_b: str) -> float:
"""
计算两个文本字符串的相似度。
"""
if not text_a or not text_b:
return 0.0
matcher = difflib.SequenceMatcher(None, text_a, text_b)
return matcher.ratio()
def calculate_replacement_probability(similarity: float) -> float:
"""
根据相似度计算替换的概率。
规则:
- 相似度 <= 0.4: 概率 = 0
- 相似度 >= 0.9: 概率 = 1
- 相似度 == 0.6: 概率 = 0.7
- 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.7)
- 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.7) 到 (0.9, 1.0)
"""
if similarity <= 0.4:
return 0.0
elif similarity >= 0.9:
return 1.0
elif 0.4 < similarity <= 0.6:
# p = 3.5 * s - 1.4
probability = 3.5 * similarity - 1.4
return max(0.0, probability)
else: # 0.6 < similarity < 0.9
# p = s + 0.1
probability = similarity + 0.1
return min(1.0, max(0.0, probability))
def get_spark():
local_random = random.Random()
current_minute = int(time.strftime("%M"))
local_random.seed(current_minute)
hf_options = [
("可以参考之前的想法,在原来想法的基础上继续思考", 0.2),
("可以参考之前的想法,在原来的想法上尝试新的话题", 0.4),
("不要太深入", 0.2),
("进行深入思考", 0.2),
]
# 加权随机选择思考指导
hf_do_next = local_random.choices(
[option[0] for option in hf_options], weights=[option[1] for option in hf_options], k=1
)[0]
return hf_do_next

View File

@@ -0,0 +1,193 @@
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config
import time
from src.common.logger_manager import get_logger
from src.individuality.individuality import Individuality
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.tools.tool_use import ToolUser
from src.chat.utils.json_utils import process_llm_tool_calls
from src.chat.person_info.relationship_manager import relationship_manager
from .base_processor import BaseProcessor
from typing import List, Optional, Dict
from src.chat.heart_flow.observation.observation import Observation
from src.chat.heart_flow.observation.working_observation import WorkingObservation
from src.chat.focus_chat.info.structured_info import StructuredInfo
logger = get_logger("tool_use")
def init_prompt():
# ... 原有代码 ...
# 添加工具执行器提示词
tool_executor_prompt = """
你是一个专门执行工具的助手。你的名字是{bot_name}。现在是{time_now}
你要在群聊中扮演以下角色:
{prompt_personality}
你当前的额外信息:
{memory_str}
群里正在进行的聊天内容:
{chat_observe_info}
请仔细分析聊天内容,考虑以下几点:
1. 内容中是否包含需要查询信息的问题
2. 是否需要执行特定操作
3. 是否有明确的工具使用指令
4. 考虑用户与你的关系以及当前的对话氛围
如果需要使用工具,请直接调用相应的工具函数。如果不需要使用工具,请简单输出"无需使用工具"
"""
Prompt(tool_executor_prompt, "tool_executor_prompt")
class ToolProcessor(BaseProcessor):
def __init__(self, subheartflow_id: str):
super().__init__()
self.subheartflow_id = subheartflow_id
self.log_prefix = f"[{subheartflow_id}:ToolExecutor] "
self.llm_model = LLMRequest(
model=global_config.llm_tool_use,
max_tokens=500,
request_type="tool_execution",
)
self.structured_info = []
async def process_info(
self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos
) -> List[dict]:
"""处理信息对象
Args:
*infos: 可变数量的InfoBase类型的信息对象
Returns:
list: 处理后的结构化信息列表
"""
if observations:
for observation in observations:
if isinstance(observation, ChattingObservation):
result, used_tools, prompt = await self.execute_tools(observation, running_memorys)
# 更新WorkingObservation中的结构化信息
for observation in observations:
if isinstance(observation, WorkingObservation):
for structured_info in result:
logger.debug(f"{self.log_prefix} 更新WorkingObservation中的结构化信息: {structured_info}")
observation.add_structured_info(structured_info)
working_infos = observation.get_observe_info()
logger.debug(f"{self.log_prefix} 获取更新后WorkingObservation中的结构化信息: {working_infos}")
structured_info = StructuredInfo()
for working_info in working_infos:
structured_info.set_info(working_info.get("type"), working_info.get("content"))
return [structured_info]
async def execute_tools(self, observation: ChattingObservation, running_memorys: Optional[List[Dict]] = None):
"""
并行执行工具,返回结构化信息
参数:
sub_mind: 子思维对象
chat_target_name: 聊天目标名称,默认为"对方"
is_group_chat: 是否为群聊默认为False
return_details: 是否返回详细信息默认为False
cycle_info: 循环信息对象,可用于记录详细执行信息
返回:
如果return_details为False:
List[Dict]: 工具执行结果的结构化信息列表
如果return_details为True:
Tuple[List[Dict], List[str], str]: (工具执行结果列表, 使用的工具列表, 工具执行提示词)
"""
tool_instance = ToolUser()
tools = tool_instance._define_tools()
# logger.debug(f"observation: {observation}")
# logger.debug(f"observation.chat_target_info: {observation.chat_target_info}")
# logger.debug(f"observation.is_group_chat: {observation.is_group_chat}")
# logger.debug(f"observation.person_list: {observation.person_list}")
is_group_chat = observation.is_group_chat
chat_observe_info = observation.get_observe_info()
person_list = observation.person_list
memory_str = ""
if running_memorys:
memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
for running_memory in running_memorys:
memory_str += f"{running_memory['topic']}: {running_memory['content']}\n"
# 构建关系信息
relation_prompt = "【关系信息】\n"
for person in person_list:
relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True)
# 获取个性信息
individuality = Individuality.get_instance()
prompt_personality = individuality.get_prompt(x_person=2, level=2)
# 获取时间信息
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# 构建专用于工具调用的提示词
prompt = await global_prompt_manager.format_prompt(
"tool_executor_prompt",
memory_str=memory_str,
# extra_info="extra_structured_info",
chat_observe_info=chat_observe_info,
# chat_target_name=chat_target_name,
is_group_chat=is_group_chat,
# relation_prompt=relation_prompt,
prompt_personality=prompt_personality,
# mood_info=mood_info,
bot_name=individuality.name,
time_now=time_now,
)
# 调用LLM专注于工具使用
logger.debug(f"开始执行工具调用{prompt}")
response, _, tool_calls = await self.llm_model.generate_response_tool_async(prompt=prompt, tools=tools)
logger.debug(f"获取到工具原始输出:\n{tool_calls}")
# 处理工具调用和结果收集类似于SubMind中的逻辑
new_structured_items = []
used_tools = [] # 记录使用了哪些工具
if tool_calls:
success, valid_tool_calls, error_msg = process_llm_tool_calls(tool_calls)
if success and valid_tool_calls:
for tool_call in valid_tool_calls:
try:
# 记录使用的工具名称
tool_name = tool_call.get("name", "unknown_tool")
used_tools.append(tool_name)
result = await tool_instance._execute_tool_call(tool_call)
name = result.get("type", "unknown_type")
content = result.get("content", "")
logger.info(f"工具{name},获得信息:{content}")
if result:
new_item = {
"type": result.get("type", "unknown_type"),
"id": result.get("id", f"tool_exec_{time.time()}"),
"content": result.get("content", ""),
"ttl": 3,
}
new_structured_items.append(new_item)
except Exception as e:
logger.error(f"{self.log_prefix}工具执行失败: {e}")
return new_structured_items, used_tools, prompt
init_prompt()

View File

@@ -0,0 +1,105 @@
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.heart_flow.observation.working_observation import WorkingObservation
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config
from src.common.logger_manager import get_logger
from src.chat.utils.prompt_builder import Prompt
from datetime import datetime
from src.chat.memory_system.Hippocampus import HippocampusManager
from typing import List, Dict
logger = get_logger("memory_activator")
def init_prompt():
# --- Group Chat Prompt ---
memory_activator_prompt = """
你是一个记忆分析器,你需要根据以下信息来进行会议
以下是一场聊天中的信息,请根据这些信息,总结出几个关键词作为记忆回忆的触发词
{obs_info_text}
请输出一个json格式包含以下字段
{{
"keywords": ["关键词1", "关键词2", "关键词3",......]
}}
不要输出其他多余内容只输出json格式就好
"""
Prompt(memory_activator_prompt, "memory_activator_prompt")
class MemoryActivator:
def __init__(self):
self.summary_model = LLMRequest(
model=global_config.llm_summary, temperature=0.7, max_tokens=50, request_type="chat_observation"
)
self.running_memory = []
async def activate_memory(self, observations) -> List[Dict]:
"""
激活记忆
Args:
observations: 现有的进行观察后的 观察列表
Returns:
List[Dict]: 激活的记忆列表
"""
obs_info_text = ""
for observation in observations:
if isinstance(observation, ChattingObservation):
obs_info_text += observation.get_observe_info()
elif isinstance(observation, WorkingObservation):
working_info = observation.get_observe_info()
for working_info_item in working_info:
obs_info_text += f"{working_info_item['type']}: {working_info_item['content']}\n"
elif isinstance(observation, HFCloopObservation):
obs_info_text += observation.get_observe_info()
# prompt = await global_prompt_manager.format_prompt(
# "memory_activator_prompt",
# obs_info_text=obs_info_text,
# )
# logger.debug(f"prompt: {prompt}")
# response = await self.summary_model.generate_response(prompt)
# logger.debug(f"response: {response}")
# # 只取response的第一个元素字符串
# response_str = response[0]
# keywords = list(get_keywords_from_json(response_str))
# #调用记忆系统获取相关记忆
# related_memory = await HippocampusManager.get_instance().get_memory_from_topic(
# valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
# )
related_memory = await HippocampusManager.get_instance().get_memory_from_text(
text=obs_info_text, max_memory_num=3, max_memory_length=2, max_depth=3, fast_retrieval=True
)
logger.debug(f"获取到的记忆: {related_memory}")
# 激活时所有已有记忆的duration+1达到3则移除
for m in self.running_memory[:]:
m["duration"] = m.get("duration", 1) + 1
self.running_memory = [m for m in self.running_memory if m["duration"] < 3]
if related_memory:
for topic, memory in related_memory:
# 检查是否已存在相同topic和content的记忆
exists = any(m["topic"] == topic and m["content"] == memory for m in self.running_memory)
if not exists:
self.running_memory.append(
{"topic": topic, "content": memory, "timestamp": datetime.now().isoformat(), "duration": 1}
)
logger.debug(f"添加新记忆: {topic} - {memory}")
return self.running_memory
init_prompt()

View File

@@ -5,9 +5,9 @@ from typing import Optional, Coroutine, Callable, Any, List
from src.common.logger_manager import get_logger
# Need manager types for dependency injection
from src.heart_flow.mai_state_manager import MaiStateManager, MaiStateInfo
from src.heart_flow.subheartflow_manager import SubHeartflowManager
from src.heart_flow.interest_logger import InterestLogger
from src.chat.heart_flow.mai_state_manager import MaiStateManager, MaiStateInfo
from src.chat.heart_flow.subheartflow_manager import SubHeartflowManager
from src.chat.heart_flow.interest_logger import InterestLogger
logger = get_logger("background_tasks")

View File

@@ -10,7 +10,7 @@ class ChatState(enum.Enum):
class ChatStateInfo:
def __init__(self):
self.chat_status: ChatState = ChatState.ABSENT
self.chat_status: ChatState = ChatState.CHAT
self.current_state_time = 120
self.mood_manager = mood_manager

View File

@@ -1,16 +1,14 @@
from src.heart_flow.sub_heartflow import SubHeartflow, ChatState
from src.plugins.models.utils_model import LLMRequest
from src.chat.heart_flow.sub_heartflow import SubHeartflow, ChatState
from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config
from src.plugins.schedule.schedule_generator import bot_schedule
from src.common.logger_manager import get_logger
from typing import Any, Optional
from src.do_tool.tool_use import ToolUser
from src.plugins.person_info.relationship_manager import relationship_manager # Module instance
from src.heart_flow.mai_state_manager import MaiStateInfo, MaiStateManager
from src.heart_flow.subheartflow_manager import SubHeartflowManager
from src.heart_flow.mind import Mind
from src.heart_flow.interest_logger import InterestLogger # Import InterestLogger
from src.heart_flow.background_tasks import BackgroundTaskManager # Import BackgroundTaskManager
from src.tools.tool_use import ToolUser
from src.chat.person_info.relationship_manager import relationship_manager # Module instance
from src.chat.heart_flow.mai_state_manager import MaiStateInfo, MaiStateManager
from src.chat.heart_flow.subheartflow_manager import SubHeartflowManager
from src.chat.heart_flow.interest_logger import InterestLogger # Import InterestLogger
from src.chat.heart_flow.background_tasks import BackgroundTaskManager # Import BackgroundTaskManager
logger = get_logger("heartflow")
@@ -45,8 +43,6 @@ class Heartflow:
self.tool_user_instance = ToolUser() # 工具使用模块
self.relationship_manager_instance = relationship_manager # 关系管理模块
# 子系统初始化
self.mind: Mind = Mind(self.subheartflow_manager, self.llm_model) # 思考管理器
self.interest_logger: InterestLogger = InterestLogger(self.subheartflow_manager, self) # 兴趣日志记录器
# 后台任务管理器 (整合所有定时任务)
@@ -97,16 +93,5 @@ class Heartflow:
await self.subheartflow_manager.deactivate_all_subflows()
logger.info("[Heartflow] 所有任务和子心流已停止")
async def do_a_thinking(self):
"""执行一次主心流思考过程"""
schedule_info = bot_schedule.get_current_num_task(num=4, time_info=True)
new_mind = await self.mind.do_a_thinking(
current_main_mind=self.current_mind, mai_state_info=self.current_state, schedule_info=schedule_info
)
self.past_mind.append(self.current_mind)
self.current_mind = new_mind
logger.info(f"麦麦的总体脑内状态更新为:{self.current_mind[:100]}...")
self.mind.update_subflows_with_main_mind(new_mind)
heartflow = Heartflow()

View File

@@ -3,7 +3,7 @@ from src.config.config import global_config
from typing import Optional, Dict
import traceback
from src.common.logger_manager import get_logger
from src.plugins.chat.message import MessageRecv
from src.chat.message_receive.message import MessageRecv
import math

View File

@@ -8,12 +8,12 @@ from typing import TYPE_CHECKING, Dict, List
from src.common.logger_manager import get_logger
# Need chat_manager to get stream names
from src.plugins.chat.chat_stream import chat_manager
from src.chat.message_receive.chat_stream import chat_manager
if TYPE_CHECKING:
from src.heart_flow.subheartflow_manager import SubHeartflowManager
from src.heart_flow.sub_heartflow import SubHeartflow
from src.heart_flow.heartflow import Heartflow # 导入 Heartflow 类型
from src.chat.heart_flow.subheartflow_manager import SubHeartflowManager
from src.chat.heart_flow.sub_heartflow import SubHeartflow
from src.chat.heart_flow.heartflow import Heartflow # 导入 Heartflow 类型
logger = get_logger("interest")

View File

@@ -13,8 +13,8 @@ logger = get_logger("mai_state")
# The line `enable_unlimited_hfc_chat = False` is setting a configuration parameter that controls
# whether a specific debugging feature is enabled or not. When `enable_unlimited_hfc_chat` is set to
# `False`, it means that the debugging feature for unlimited focused chatting is disabled.
# enable_unlimited_hfc_chat = True # 调试用:无限专注聊天
enable_unlimited_hfc_chat = False
enable_unlimited_hfc_chat = True # 调试用:无限专注聊天
# enable_unlimited_hfc_chat = False
prevent_offline_state = True
# 目前默认不启用OFFLINE状态
@@ -82,7 +82,7 @@ class MaiState(enum.Enum):
class MaiStateInfo:
def __init__(self):
self.mai_status: MaiState = MaiState.OFFLINE
self.mai_status: MaiState = MaiState.NORMAL_CHAT # 初始状态改为 NORMAL_CHAT
self.mai_status_history: List[Tuple[MaiState, float]] = [] # 历史状态,包含 状态,时间戳
self.last_status_change_time: float = time.time() # 状态最后改变时间
self.last_min_check_time: float = time.time() # 上次1分钟规则检查时间
@@ -141,24 +141,18 @@ class MaiStateManager:
def check_and_decide_next_state(current_state_info: MaiStateInfo) -> Optional[MaiState]:
"""
根据当前状态和规则检查是否需要转换状态并决定下一个状态
Args:
current_state_info: 当前的 MaiStateInfo 实例
Returns:
Optional[MaiState]: 如果需要转换返回目标 MaiState否则返回 None
"""
current_time = time.time()
current_status = current_state_info.mai_status
time_in_current_status = current_time - current_state_info.last_status_change_time
time_since_last_min_check = current_time - current_state_info.last_min_check_time
_time_since_last_min_check = current_time - current_state_info.last_min_check_time
next_state: Optional[MaiState] = None
# 辅助函数:根据 prevent_offline_state 标志调整目标状态
def _resolve_offline(candidate_state: MaiState) -> MaiState:
if prevent_offline_state and candidate_state == MaiState.OFFLINE:
logger.debug("阻止进入 OFFLINE改为 PEEKING")
return MaiState.PEEKING
# 现在不再切换到OFFLINE直接返回当前状态
if candidate_state == MaiState.OFFLINE:
return current_status
return candidate_state
if current_status == MaiState.OFFLINE:
@@ -170,16 +164,16 @@ class MaiStateManager:
elif current_status == MaiState.FOCUSED_CHAT:
logger.info("当前在[专心看手机]思考要不要继续聊下去......")
# 1. 麦麦每分钟都有概率离线
if time_since_last_min_check >= 60:
if current_status != MaiState.OFFLINE:
if random.random() < 0.03: # 3% 概率切换到 OFFLINE
potential_next = MaiState.OFFLINE
resolved_next = _resolve_offline(potential_next)
logger.debug(f"概率触发下线resolve 为 {resolved_next.value}")
# 只有当解析后的状态与当前状态不同时才设置 next_state
if resolved_next != current_status:
next_state = resolved_next
# 1. 移除每分钟概率切换到OFFLINE的逻辑
# if time_since_last_min_check >= 60:
# if current_status != MaiState.OFFLINE:
# if random.random() < 0.03: # 3% 概率切换到 OFFLINE
# potential_next = MaiState.OFFLINE
# resolved_next = _resolve_offline(potential_next)
# logger.debug(f"概率触发下线resolve 为 {resolved_next.value}")
# # 只有当解析后的状态与当前状态不同时才设置 next_state
# if resolved_next != current_status:
# next_state = resolved_next
# 2. 状态持续时间规则 (只有在规则1没有触发状态改变时才检查)
if next_state is None:
@@ -189,30 +183,26 @@ class MaiStateManager:
rule_id = ""
if current_status == MaiState.OFFLINE:
# 注意:即使 prevent_offline_state=True也可能从初始的 OFFLINE 状态启动
if time_in_current_status >= 60:
time_limit_exceeded = True
rule_id = "2.1 (From OFFLINE)"
weights = [30, 30, 20, 20]
choices_list = [MaiState.PEEKING, MaiState.NORMAL_CHAT, MaiState.FOCUSED_CHAT, MaiState.OFFLINE]
# OFFLINE 状态不再自动切换,直接返回 None
return None
elif current_status == MaiState.PEEKING:
if time_in_current_status >= 600: # PEEKING 最多持续 600 秒
time_limit_exceeded = True
rule_id = "2.2 (From PEEKING)"
weights = [70, 20, 10]
choices_list = [MaiState.OFFLINE, MaiState.NORMAL_CHAT, MaiState.FOCUSED_CHAT]
weights = [50, 50]
choices_list = [MaiState.NORMAL_CHAT, MaiState.FOCUSED_CHAT]
elif current_status == MaiState.NORMAL_CHAT:
if time_in_current_status >= 300: # NORMAL_CHAT 最多持续 300 秒
time_limit_exceeded = True
rule_id = "2.3 (From NORMAL_CHAT)"
weights = [50, 50]
choices_list = [MaiState.OFFLINE, MaiState.FOCUSED_CHAT]
choices_list = [MaiState.PEEKING, MaiState.FOCUSED_CHAT]
elif current_status == MaiState.FOCUSED_CHAT:
if time_in_current_status >= 600: # FOCUSED_CHAT 最多持续 600 秒
time_limit_exceeded = True
rule_id = "2.4 (From FOCUSED_CHAT)"
weights = [80, 20]
choices_list = [MaiState.OFFLINE, MaiState.NORMAL_CHAT]
weights = [50, 50]
choices_list = [MaiState.NORMAL_CHAT, MaiState.PEEKING]
if time_limit_exceeded:
next_state_candidate = random.choices(choices_list, weights=weights, k=1)[0]
@@ -232,14 +222,5 @@ class MaiStateManager:
# 如果决定了下一个状态,且这个状态与当前状态不同,则返回下一个状态
if next_state is not None and next_state != current_status:
return next_state
# 如果决定保持 OFFLINE (next_state == MaiState.OFFLINE) 且当前也是 OFFLINE
# 并且是由于持续时间规则触发的,返回 OFFLINE 以便调用者可以重置计时器。
# 注意:这个分支只有在 prevent_offline_state = False 时才可能被触发。
elif next_state == MaiState.OFFLINE and current_status == MaiState.OFFLINE and time_in_current_status >= 60:
logger.debug("决定保持 OFFLINE (持续时间规则),返回 OFFLINE 以提示重置计时器。")
return MaiState.OFFLINE # Return OFFLINE to signal caller that timer reset might be needed
else:
# 1. next_state is None (没有触发任何转换规则)
# 2. next_state is not None 但等于 current_status (例如规则1想切OFFLINE但被resolve成PEEKING而当前已经是PEEKING)
# 3. next_state is OFFLINE, current is OFFLINE, 但不是因为时间规则触发 (例如初始状态还没到60秒)
return None # 没有状态转换发生或无需重置计时器

View File

@@ -1,28 +1,27 @@
# 定义了来自外部世界的信息
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
from datetime import datetime
from src.plugins.models.utils_model import LLMRequest
from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config
from src.common.logger_manager import get_logger
import traceback
from src.plugins.utils.chat_message_builder import (
from src.chat.utils.chat_message_builder import (
get_raw_msg_before_timestamp_with_chat,
build_readable_messages,
get_raw_msg_by_timestamp_with_chat,
num_new_messages_since,
get_person_id_list,
)
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.prompt_builder import global_prompt_manager
from typing import Optional
import difflib
from src.plugins.chat.message import MessageRecv # 添加 MessageRecv 导入
from src.chat.message_receive.message import MessageRecv # 添加 MessageRecv 导入
from src.chat.heart_flow.observation.observation import Observation
from src.common.logger_manager import get_logger
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
from src.chat.utils.prompt_builder import Prompt
# Import the new utility function
from .utils_chat import get_chat_type_and_target_info
logger = get_logger("observation")
logger = get_logger(__name__)
# --- Define Prompt Templates for Chat Summary ---
Prompt(
"""这是qq群聊的聊天记录请总结以下聊天记录的主题
{chat_logs}
@@ -39,22 +38,10 @@ Prompt(
# --- End Prompt Template Definition ---
# 所有观察的基类
class Observation:
def __init__(self, observe_type, observe_id):
self.observe_info = ""
self.observe_type = observe_type
self.observe_id = observe_id
self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间
async def observe(self):
pass
# 聊天观察
class ChattingObservation(Observation):
def __init__(self, chat_id):
super().__init__("chat", chat_id)
super().__init__(chat_id)
self.chat_id = chat_id
# --- Initialize attributes (defaults) ---
@@ -74,26 +61,25 @@ class ChattingObservation(Observation):
self.max_mid_memory_len = global_config.compress_length_limit
self.mid_memory_info = ""
self.person_list = []
self.oldest_messages = []
self.oldest_messages_str = ""
self.compressor_prompt = ""
self.llm_summary = LLMRequest(
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
)
async def initialize(self):
# --- Use utility function to determine chat type and fetch info ---
self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id)
# logger.debug(f"is_group_chat: {self.is_group_chat}")
# logger.debug(f"chat_target_info: {self.chat_target_info}")
# --- End using utility function ---
# Fetch initial messages (existing logic)
logger.debug(f"初始化observation: self.is_group_chat: {self.is_group_chat}")
logger.debug(f"初始化observation: self.chat_target_info: {self.chat_target_info}")
initial_messages = get_raw_msg_before_timestamp_with_chat(self.chat_id, self.last_observe_time, 10)
self.talking_message = initial_messages
self.talking_message_str = await build_readable_messages(self.talking_message)
# 进行一次观察 返回观察结果observe_info
def get_observe_info(self, ids=None):
mid_memory_str = ""
if ids:
mid_memory_str = ""
for id in ids:
print(f"id{id}")
try:
@@ -114,7 +100,74 @@ class ChattingObservation(Observation):
return mid_memory_str + "现在群里正在聊:\n" + self.talking_message_str
else:
return self.talking_message_str
mid_memory_str = "之前的聊天内容:\n"
for mid_memory in self.mid_memorys:
mid_memory_str += f"{mid_memory['theme']}\n"
return mid_memory_str + "现在群里正在聊:\n" + self.talking_message_str
def serch_message_by_text(self, text: str) -> Optional[MessageRecv]:
"""
根据回复的纯文本
1. 在talking_message中查找最新的最匹配的消息
2. 如果找到则返回消息
"""
msg_list = []
find_msg = None
reverse_talking_message = list(reversed(self.talking_message))
for message in reverse_talking_message:
if message["processed_plain_text"] == text:
find_msg = message
logger.debug(f"找到的锚定消息find_msg: {find_msg}")
break
else:
similarity = difflib.SequenceMatcher(None, text, message["processed_plain_text"]).ratio()
msg_list.append({"message": message, "similarity": similarity})
logger.debug(f"对锚定消息检查message: {message['processed_plain_text']},similarity: {similarity}")
if not find_msg:
if msg_list:
msg_list.sort(key=lambda x: x["similarity"], reverse=True)
if msg_list[0]["similarity"] >= 0.5: # 只返回相似度大于等于0.5的消息
find_msg = msg_list[0]["message"]
else:
logger.debug("没有找到锚定消息,相似度低")
return None
else:
logger.debug("没有找到锚定消息,没有消息捕获")
return None
# logger.debug(f"找到的锚定消息find_msg: {find_msg}")
group_info = find_msg.get("chat_info", {}).get("group_info")
user_info = find_msg.get("chat_info", {}).get("user_info")
content_format = ""
accept_format = ""
template_items = {}
format_info = {"content_format": content_format, "accept_format": accept_format}
template_info = {
"template_items": template_items,
}
message_info = {
"platform": find_msg.get("platform"),
"message_id": find_msg.get("message_id"),
"time": find_msg.get("time"),
"group_info": group_info,
"user_info": user_info,
"additional_config": find_msg.get("additional_config"),
"format_info": format_info,
"template_info": template_info,
}
message_dict = {
"message_info": message_info,
"raw_message": find_msg.get("processed_plain_text"),
"detailed_plain_text": find_msg.get("processed_plain_text"),
"processed_plain_text": find_msg.get("processed_plain_text"),
}
find_rec_msg = MessageRecv(message_dict)
logger.debug(f"锚定消息处理后find_rec_msg: {find_rec_msg}")
return find_rec_msg
async def observe(self):
# 自上一次观察的新消息
@@ -174,40 +227,10 @@ class ChattingObservation(Observation):
logger.error(f"构建总结 Prompt 失败 for chat {self.chat_id}: {e}")
# prompt remains None
summary = "没有主题的闲聊" # 默认值
if prompt: # Check if prompt was built successfully
try:
summary_result, _, _ = await self.llm_summary.generate_response(prompt)
if summary_result: # 确保结果不为空
summary = summary_result
except Exception as e:
logger.error(f"总结主题失败 for chat {self.chat_id}: {e}")
# 保留默认总结 "没有主题的闲聊"
else:
logger.warning(f"因 Prompt 构建失败,跳过 LLM 总结 for chat {self.chat_id}")
mid_memory = {
"id": str(int(datetime.now().timestamp())),
"theme": summary,
"messages": oldest_messages, # 存储原始消息对象
"readable_messages": oldest_messages_str,
# "timestamps": oldest_timestamps,
"chat_id": self.chat_id,
"created_at": datetime.now().timestamp(),
}
self.mid_memorys.append(mid_memory)
if len(self.mid_memorys) > self.max_mid_memory_len:
self.mid_memorys.pop(0) # 移除最旧的
mid_memory_str = "之前聊天的内容概述是:\n"
for mid_memory_item in self.mid_memorys: # 重命名循环变量以示区分
time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60)
mid_memory_str += (
f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}){mid_memory_item['theme']}\n"
)
self.mid_memory_info = mid_memory_str
self.compressor_prompt = prompt
self.oldest_messages = oldest_messages
self.oldest_messages_str = oldest_messages_str
self.talking_message_str = await build_readable_messages(
messages=self.talking_message,
@@ -229,70 +252,6 @@ class ChattingObservation(Observation):
f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}"
)
async def find_best_matching_message(self, search_str: str, min_similarity: float = 0.6) -> Optional[MessageRecv]:
"""
talking_message 中查找与 search_str 最匹配的消息
Args:
search_str: 要搜索的字符串
min_similarity: 要求的最低相似度0到1之间
Returns:
匹配的 MessageRecv 实例如果找不到则返回 None
"""
best_match_score = -1.0
best_match_dict = None
if not self.talking_message:
logger.debug(f"Chat {self.chat_id}: talking_message is empty, cannot find match for '{search_str}'")
return None
for message_dict in self.talking_message:
try:
# 临时创建 MessageRecv 以处理文本
temp_msg = MessageRecv(message_dict)
await temp_msg.process() # 处理消息以获取 processed_plain_text
current_text = temp_msg.processed_plain_text
if not current_text: # 跳过没有文本内容的消息
continue
# 计算相似度
matcher = difflib.SequenceMatcher(None, search_str, current_text)
score = matcher.ratio()
# logger.debug(f"Comparing '{search_str}' with '{current_text}', score: {score}") # 可选:用于调试
if score > best_match_score:
best_match_score = score
best_match_dict = message_dict
except Exception as e:
logger.error(f"Error processing message for matching in chat {self.chat_id}: {e}", exc_info=True)
continue # 继续处理下一条消息
if best_match_dict is not None and best_match_score >= min_similarity:
logger.debug(f"Found best match for '{search_str}' with score {best_match_score:.2f}")
try:
final_msg = MessageRecv(best_match_dict)
await final_msg.process()
# 确保 MessageRecv 实例有关联的 chat_stream
if hasattr(self, "chat_stream"):
final_msg.update_chat_stream(self.chat_stream)
else:
logger.warning(
f"ChattingObservation instance for chat {self.chat_id} does not have a chat_stream attribute set."
)
return final_msg
except Exception as e:
logger.error(f"Error creating final MessageRecv for chat {self.chat_id}: {e}", exc_info=True)
return None
else:
logger.debug(
f"No suitable match found for '{search_str}' in chat {self.chat_id} (best score: {best_match_score:.2f}, threshold: {min_similarity})"
)
return None
async def has_new_messages_since(self, timestamp: float) -> bool:
"""检查指定时间戳之后是否有新消息"""
count = num_new_messages_since(chat_id=self.chat_id, timestamp_start=timestamp)

View File

@@ -0,0 +1,82 @@
# 定义了来自外部世界的信息
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
from datetime import datetime
from src.common.logger_manager import get_logger
from src.chat.focus_chat.heartFC_Cycleinfo import CycleDetail
from typing import List
# Import the new utility function
logger = get_logger("observation")
# 所有观察的基类
class HFCloopObservation:
def __init__(self, observe_id):
self.observe_info = ""
self.observe_id = observe_id
self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间
self.history_loop: List[CycleDetail] = []
def get_observe_info(self):
return self.observe_info
def add_loop_info(self, loop_info: CycleDetail):
# logger.debug(f"添加循环信息111111111111111111111111111111111111: {loop_info}")
# print(f"添加循环信息111111111111111111111111111111111111: {loop_info}")
print(f"action_taken: {loop_info.action_taken}")
print(f"action_type: {loop_info.action_type}")
print(f"response_info: {loop_info.response_info}")
self.history_loop.append(loop_info)
async def observe(self):
recent_active_cycles: List[CycleDetail] = []
for cycle in reversed(self.history_loop):
# 只关心实际执行了动作的循环
if cycle.action_taken:
recent_active_cycles.append(cycle)
# 最多找最近的3个活动循环
if len(recent_active_cycles) == 3:
break
cycle_info_block = ""
consecutive_text_replies = 0
responses_for_prompt = []
# 检查这最近的活动循环中有多少是连续的文本回复 (从最近的开始看)
for cycle in recent_active_cycles:
if cycle.action_type == "reply":
consecutive_text_replies += 1
# 获取回复内容,如果不存在则返回'[空回复]'
response_text = cycle.response_info.get("response_text", "[空回复]")
responses_for_prompt.append(response_text)
else:
break
# 根据连续文本回复的数量构建提示信息
# 注意: responses_for_prompt 列表是从最近到最远排序的
if consecutive_text_replies >= 3: # 如果最近的三个活动都是文本回复
cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意'
elif consecutive_text_replies == 2: # 如果最近的两个活动是文本回复
cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意'
elif consecutive_text_replies == 1: # 如果最近的一个活动是文本回复
cycle_info_block = f'你刚刚已经回复一条消息(内容: "{responses_for_prompt[0]}"'
# 包装提示块,增加可读性,即使没有连续回复也给个标记
if cycle_info_block:
cycle_info_block = f"\n你最近的回复\n{cycle_info_block}\n"
else:
# 如果最近的活动循环不是文本回复,或者没有活动循环
cycle_info_block = "\n"
# 获取history_loop中最新添加的
if self.history_loop:
last_loop = self.history_loop[-1]
start_time = last_loop.start_time
end_time = last_loop.end_time
if start_time is not None and end_time is not None:
time_diff = int(end_time - start_time)
cycle_info_block += f"\n距离你上一次阅读消息已经过去了{time_diff}分钟\n"
else:
cycle_info_block += "\n无法获取上一次阅读消息的时间\n"
self.observe_info = cycle_info_block

View File

@@ -0,0 +1,55 @@
from src.chat.heart_flow.observation.observation import Observation
from datetime import datetime
from src.common.logger_manager import get_logger
import traceback
# Import the new utility function
from src.chat.memory_system.Hippocampus import HippocampusManager
import jieba
from typing import List
logger = get_logger("memory")
class MemoryObservation(Observation):
def __init__(self, observe_id):
super().__init__(observe_id)
self.observe_info: str = ""
self.context: str = ""
self.running_memory: List[dict] = []
def get_observe_info(self):
for memory in self.running_memory:
self.observe_info += f"{memory['topic']}:{memory['content']}\n"
return self.observe_info
async def observe(self):
# ---------- 2. 获取记忆 ----------
try:
# 从聊天内容中提取关键词
chat_words = set(jieba.cut(self.context))
# 过滤掉停用词和单字词
keywords = [word for word in chat_words if len(word) > 1]
# 去重并限制数量
keywords = list(set(keywords))[:5]
logger.debug(f"取的关键词: {keywords}")
# 调用记忆系统获取相关记忆
related_memory = await HippocampusManager.get_instance().get_memory_from_topic(
valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
)
logger.debug(f"获取到的记忆: {related_memory}")
if related_memory:
for topic, memory in related_memory:
# 将记忆添加到 running_memory
self.running_memory.append(
{"topic": topic, "content": memory, "timestamp": datetime.now().isoformat()}
)
logger.debug(f"添加新记忆: {topic} - {memory}")
except Exception as e:
logger.error(f"观察 记忆时出错: {e}")
logger.error(traceback.format_exc())

View File

@@ -0,0 +1,17 @@
# 定义了来自外部世界的信息
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
from datetime import datetime
from src.common.logger_manager import get_logger
logger = get_logger("observation")
# 所有观察的基类
class Observation:
def __init__(self, observe_id):
self.observe_info = ""
self.observe_id = observe_id
self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间
async def observe(self):
pass

View File

@@ -0,0 +1,34 @@
# 定义了来自外部世界的信息
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
from datetime import datetime
from src.common.logger_manager import get_logger
# Import the new utility function
logger = get_logger("observation")
# 所有观察的基类
class WorkingObservation:
def __init__(self, observe_id):
self.observe_info = ""
self.observe_id = observe_id
self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间
self.history_loop = []
self.structured_info = []
def get_observe_info(self):
return self.structured_info
def add_structured_info(self, structured_info: dict):
self.structured_info.append(structured_info)
async def observe(self):
observed_structured_infos = []
for structured_info in self.structured_info:
if structured_info.get("ttl") > 0:
structured_info["ttl"] -= 1
observed_structured_infos.append(structured_info)
logger.debug(f"观察到结构化信息仍旧在: {structured_info}")
self.structured_info = observed_structured_infos

View File

@@ -1,16 +1,16 @@
from .observation import Observation, ChattingObservation
from .observation.observation import Observation
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
import asyncio
import time
from typing import Optional, List, Dict, Tuple, Callable, Coroutine
import traceback
from src.common.logger_manager import get_logger
from src.plugins.chat.message import MessageRecv
from src.plugins.chat.chat_stream import chat_manager
from src.plugins.heartFC_chat.heartFC_chat import HeartFChatting
from src.plugins.heartFC_chat.normal_chat import NormalChat
from src.heart_flow.mai_state_manager import MaiStateInfo
from src.heart_flow.chat_state_info import ChatState, ChatStateInfo
from src.heart_flow.sub_mind import SubMind
from src.chat.message_receive.message import MessageRecv
from src.chat.message_receive.chat_stream import chat_manager
from src.chat.focus_chat.heartFC_chat import HeartFChatting
from src.chat.normal_chat.normal_chat import NormalChat
from src.chat.heart_flow.mai_state_manager import MaiStateInfo
from src.chat.heart_flow.chat_state_info import ChatState, ChatStateInfo
from .utils_chat import get_chat_type_and_target_info
from .interest_chatting import InterestChatting
@@ -58,7 +58,7 @@ class SubHeartflow:
self.should_stop = False # 停止标志
self.task: Optional[asyncio.Task] = None # 后台任务
# 随便水群 normal_chat 和 认真水群 heartFC_chat 实例
# 随便水群 normal_chat 和 认真水群 focus_chat 实例
# CHAT模式激活 随便水群 FOCUS模式激活 认真水群
self.heart_fc_instance: Optional[HeartFChatting] = None # 该sub_heartflow的HeartFChatting实例
self.normal_chat_instance: Optional[NormalChat] = None # 该sub_heartflow的NormalChat实例
@@ -68,11 +68,6 @@ class SubHeartflow:
self.observations: List[ChattingObservation] = [] # 观察列表
# self.running_knowledges = [] # 运行中的知识,待完善
# LLM模型配置负责进行思考
self.sub_mind = SubMind(
subheartflow_id=self.subheartflow_id, chat_state=self.chat_state, observations=self.observations
)
# 日志前缀 - Moved determination to initialize
self.log_prefix = str(subheartflow_id) # Initial default prefix
@@ -186,7 +181,6 @@ class SubHeartflow:
# 创建 HeartFChatting 实例,并传递 从构造函数传入的 回调函数
self.heart_fc_instance = HeartFChatting(
chat_id=self.subheartflow_id,
sub_mind=self.sub_mind,
observations=self.observations, # 传递所有观察者
on_consecutive_no_reply_callback=self.hfc_no_reply_callback, # <-- Use stored callback
)
@@ -288,9 +282,6 @@ class SubHeartflow:
logger.info(f"{self.log_prefix} 子心流后台任务已停止。")
def update_current_mind(self, response):
self.sub_mind.update_current_mind(response)
def add_observation(self, observation: Observation):
for existing_obs in self.observations:
if existing_obs.observe_id == observation.observe_id:
@@ -304,9 +295,6 @@ class SubHeartflow:
def get_all_observations(self) -> list[Observation]:
return self.observations
def clear_observations(self):
self.observations.clear()
def _get_primary_observation(self) -> Optional[ChattingObservation]:
if self.observations and isinstance(self.observations[0], ChattingObservation):
return self.observations[0]
@@ -332,7 +320,6 @@ class SubHeartflow:
interest_state = await self.get_interest_state()
return {
"interest_state": interest_state,
"current_mind": self.sub_mind.current_mind,
"chat_state": self.chat_state.chat_status.value,
"chat_state_changed_time": self.chat_state_changed_time,
}

View File

@@ -9,15 +9,15 @@ import functools # <-- 新增导入
from src.common.logger_manager import get_logger
# 导入聊天流管理模块
from src.plugins.chat.chat_stream import chat_manager
from src.chat.message_receive.chat_stream import chat_manager
# 导入心流相关类
from src.heart_flow.sub_heartflow import SubHeartflow, ChatState
from src.heart_flow.mai_state_manager import MaiStateInfo
from .observation import ChattingObservation
from src.chat.heart_flow.sub_heartflow import SubHeartflow, ChatState
from src.chat.heart_flow.mai_state_manager import MaiStateInfo
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
# 导入LLM请求工具
from src.plugins.models.utils_model import LLMRequest
from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config
from src.individuality.individuality import Individuality
import traceback

View File

@@ -1,8 +1,8 @@
import asyncio
from typing import Optional, Tuple, Dict
from src.common.logger_manager import get_logger
from src.plugins.chat.chat_stream import chat_manager
from src.plugins.person_info.person_info import person_info_manager
from src.chat.message_receive.chat_stream import chat_manager
from src.chat.person_info.person_info import person_info_manager
logger = get_logger("heartflow_utils")

View File

@@ -11,14 +11,14 @@ import networkx as nx
import numpy as np
from collections import Counter
from ...common.database import db
from ...plugins.models.utils_model import LLMRequest
from ...chat.models.utils_model import LLMRequest
from src.common.logger_manager import get_logger
from src.plugins.memory_system.sample_distribution import MemoryBuildScheduler # 分布生成器
from src.chat.memory_system.sample_distribution import MemoryBuildScheduler # 分布生成器
from ..utils.chat_message_builder import (
get_raw_msg_by_timestamp,
build_readable_messages,
) # 导入 build_readable_messages
from ..chat.utils import translate_timestamp_to_human_readable
from ..utils.utils import translate_timestamp_to_human_readable
from .memory_config import MemoryConfig
from rich.traceback import install
@@ -499,7 +499,7 @@ class Hippocampus:
for topic, memory_items, _ in unique_memories:
memory = memory_items[0] # 因为每个topic只有一条记忆
result.append((topic, memory))
logger.info(f"选中记忆: {memory} (来自节点: {topic})")
logger.debug(f"选中记忆: {memory} (来自节点: {topic})")
return result
@@ -665,7 +665,7 @@ class Hippocampus:
for topic, memory_items, _ in unique_memories:
memory = memory_items[0] # 因为每个topic只有一条记忆
result.append((topic, memory))
logger.info(f"选中记忆: {memory} (来自节点: {topic})")
logger.debug(f"选中记忆: {memory} (来自节点: {topic})")
return result

View File

@@ -6,7 +6,7 @@ import os
# 添加项目根目录到系统路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
from src.plugins.memory_system.Hippocampus import HippocampusManager
from src.chat.memory_system.Hippocampus import HippocampusManager
from src.config.config import global_config
from rich.traceback import install

View File

@@ -2,7 +2,7 @@ from ..emoji_system.emoji_manager import emoji_manager
from ..person_info.relationship_manager import relationship_manager
from .chat_stream import chat_manager
from .message_sender import message_manager
from ..storage.storage import MessageStorage
from .storage import MessageStorage
__all__ = [

View File

@@ -3,13 +3,13 @@ from typing import Dict, Any
from src.common.logger_manager import get_logger
from src.manager.mood_manager import mood_manager # 导入情绪管理器
from .chat_stream import chat_manager
from .message import MessageRecv
from .only_message_process import MessageProcessor
from ..PFC.pfc_manager import PFCManager
from ..heartFC_chat.heartflow_processor import HeartFCProcessor
from ..utils.prompt_builder import Prompt, global_prompt_manager
from ...config.config import global_config
from src.chat.message_receive.chat_stream import chat_manager
from src.chat.message_receive.message import MessageRecv
from src.experimental.only_message_process import MessageProcessor
from src.experimental.PFC.pfc_manager import PFCManager
from src.chat.focus_chat.heartflow_processor import HeartFCProcessor
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.config.config import global_config
# 定义日志配置

View File

@@ -7,7 +7,7 @@ import urllib3
from src.common.logger_manager import get_logger
from .chat_stream import ChatStream
from .utils_image import image_manager
from ..utils.utils_image import image_manager
from maim_message import Seg, UserInfo, BaseMessageInfo, MessageBase
from rich.traceback import install
@@ -100,6 +100,7 @@ class MessageRecv(Message):
Args:
message_dict: MessageCQ序列化后的字典
"""
# print(f"message_dict: {message_dict}")
self.message_info = BaseMessageInfo.from_dict(message_dict.get("message_info", {}))
self.message_segment = Seg.from_dict(message_dict.get("message_segment", {}))

View File

@@ -3,14 +3,14 @@ import asyncio
import time
from asyncio import Task
from typing import Union
from src.plugins.message.api import global_api
from src.common.message.api import global_api
# from ...common.database import db # 数据库依赖似乎不需要了,注释掉
from .message import MessageSending, MessageThinking, MessageSet
from ..storage.storage import MessageStorage
from .storage import MessageStorage
from ...config.config import global_config
from .utils import truncate_message, calculate_typing_time, count_messages_between
from ..utils.utils import truncate_message, calculate_typing_time, count_messages_between
from src.common.logger_manager import get_logger
from rich.traceback import install
@@ -212,7 +212,7 @@ class MessageManager:
_ = message.update_thinking_time() # 更新思考时间
thinking_start_time = message.thinking_start_time
now_time = time.time()
logger.debug(f"thinking_start_time:{thinking_start_time},now_time:{now_time}")
# logger.debug(f"thinking_start_time:{thinking_start_time},now_time:{now_time}")
thinking_messages_count, thinking_messages_length = count_messages_between(
start_time=thinking_start_time, end_time=now_time, stream_id=message.chat_stream.stream_id
)
@@ -236,7 +236,7 @@ class MessageManager:
await message.process() # 预处理消息内容
logger.debug(f"{message}")
# logger.debug(f"{message}")
# 使用全局 message_sender 实例
await send_message(message)

View File

@@ -2,8 +2,8 @@ import re
from typing import Union
from ...common.database import db
from ..chat.message import MessageSending, MessageRecv
from ..chat.chat_stream import ChatStream
from .message import MessageSending, MessageRecv
from .chat_stream import ChatStream
from src.common.logger import get_module_logger
logger = get_module_logger("message_storage")

View File

@@ -157,7 +157,7 @@ class LLMRequest:
completion_tokens: 输出token数
total_tokens: 总token数
user_id: 用户ID默认为system
request_type: 请求类型(chat/embedding/image/topic/schedule)
request_type: 请求类型
endpoint: API端点
"""
# 如果 request_type 为 None则使用实例变量中的值

View File

@@ -8,19 +8,19 @@ from typing import List, Optional # 导入 Optional
from maim_message import UserInfo, Seg
from src.common.logger_manager import get_logger
from src.heart_flow.utils_chat import get_chat_type_and_target_info
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
from src.manager.mood_manager import mood_manager
from src.plugins.chat.chat_stream import ChatStream, chat_manager
from src.plugins.person_info.relationship_manager import relationship_manager
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from src.plugins.utils.timer_calculator import Timer
from src.chat.message_receive.chat_stream import ChatStream, chat_manager
from src.chat.person_info.relationship_manager import relationship_manager
from src.chat.utils.info_catcher import info_catcher_manager
from src.chat.utils.timer_calculator import Timer
from .normal_chat_generator import NormalChatGenerator
from ..chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
from ..chat.message_sender import message_manager
from ..chat.utils_image import image_path_to_base64
from ..emoji_system.emoji_manager import emoji_manager
from ..willing.willing_manager import willing_manager
from ...config.config import global_config
from ..message_receive.message import MessageSending, MessageRecv, MessageThinking, MessageSet
from src.chat.message_receive.message_sender import message_manager
from src.chat.utils.utils_image import image_path_to_base64
from src.chat.emoji_system.emoji_manager import emoji_manager
from src.chat.normal_chat.willing.willing_manager import willing_manager
from src.config.config import global_config
logger = get_logger("chat")
@@ -353,7 +353,8 @@ class NormalChat:
async def _process_initial_interest_messages(self):
"""处理启动时存在于 interest_dict 中的高兴趣消息。"""
if not self.interest_dict:
return # 如果 interest_dict 为 None或空直接返回
return # 如果 interest_dict 为 None 或空,直接返回
items_to_process = list(self.interest_dict.items())
if not items_to_process:
return # 没有初始消息,直接返回

View File

@@ -2,12 +2,12 @@ from typing import List, Optional, Tuple, Union
import random
from ..models.utils_model import LLMRequest
from ...config.config import global_config
from ..chat.message import MessageThinking
from .heartflow_prompt_builder import prompt_builder
from ..chat.utils import process_llm_response
from ..utils.timer_calculator import Timer
from ..message_receive.message import MessageThinking
from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder
from src.chat.utils.utils import process_llm_response
from src.chat.utils.timer_calculator import Timer
from src.common.logger_manager import get_logger
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from src.chat.utils.info_catcher import info_catcher_manager
logger = get_logger("llm")

View File

@@ -1,9 +1,9 @@
from src.common.logger import LogConfig, WILLING_STYLE_CONFIG, LoguruLogger, get_module_logger
from dataclasses import dataclass
from ...config.config import global_config, BotConfig
from ..chat.chat_stream import ChatStream, GroupInfo
from ..chat.message import MessageRecv
from ..person_info.person_info import person_info_manager, PersonInfoManager
from src.config.config import global_config, BotConfig
from src.chat.message_receive.chat_stream import ChatStream, GroupInfo
from src.chat.message_receive.message import MessageRecv
from src.chat.person_info.person_info import person_info_manager, PersonInfoManager
from abc import ABC, abstractmethod
import importlib
from typing import Dict, Optional

View File

@@ -6,7 +6,7 @@ from typing import Any, Callable, Dict
import datetime
import asyncio
import numpy as np
from src.plugins.models.utils_model import LLMRequest
from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config
from src.individuality.individuality import Individuality

View File

@@ -1,5 +1,5 @@
from src.common.logger_manager import get_logger
from ..chat.chat_stream import ChatStream
from ..message_receive.chat_stream import ChatStream
import math
from bson.decimal128 import Decimal128
from .person_info import person_info_manager
@@ -94,13 +94,23 @@ class RelationshipManager:
return False
@staticmethod
async def first_knowing_some_one(platform, user_id, user_nickname, user_cardname, user_avatar):
async def first_knowing_some_one(
platform: str, user_id: str, user_nickname: str, user_cardname: str, user_avatar: str
):
"""判断是否认识某人"""
person_id = person_info_manager.get_person_id(platform, user_id)
await person_info_manager.update_one_field(person_id, "nickname", user_nickname)
# await person_info_manager.update_one_field(person_id, "user_cardname", user_cardname)
# await person_info_manager.update_one_field(person_id, "user_avatar", user_avatar)
await person_info_manager.qv_person_name(person_id, user_nickname, user_cardname, user_avatar)
data = {
"platform": platform,
"user_id": user_id,
"nickname": user_nickname,
"konw_time": int(time.time()),
}
await person_info_manager.update_one_field(
person_id=person_id, field_name="nickname", value=user_nickname, data=data
)
await person_info_manager.qv_person_name(
person_id=person_id, user_nickname=user_nickname, user_cardname=user_cardname, user_avatar=user_avatar
)
async def calculate_update_relationship_value(self, user_info: UserInfo, platform: str, label: str, stance: str):
"""计算并变更关系值

View File

@@ -1,22 +1,11 @@
from src.config.config import global_config
# 不再直接使用 db
# from src.common.database import db
# 移除 logger 和 traceback因为错误处理移至 repository
# from src.common.logger import get_module_logger
# import traceback
from typing import List, Dict, Any, Tuple # 确保类型提示被导入
import time # 导入 time 模块以获取当前时间
# 导入新的 repository 函数
import random
import re
from src.common.message_repository import find_messages, count_messages
# 导入 PersonInfoManager 和时间转换工具
from src.plugins.person_info.person_info import person_info_manager
from src.plugins.chat.utils import translate_timestamp_to_human_readable
# 不再需要文件级别的 logger
# logger = get_module_logger(__name__)
from src.chat.person_info.person_info import person_info_manager
from src.chat.utils.utils import translate_timestamp_to_human_readable
def get_raw_msg_by_timestamp(
@@ -69,6 +58,23 @@ def get_raw_msg_by_timestamp_with_chat_users(
return find_messages(message_filter=filter_query, sort=sort_order, limit=limit, limit_mode=limit_mode)
def get_raw_msg_by_timestamp_random(
timestamp_start: float, timestamp_end: float, limit: int = 0, limit_mode: str = "latest"
) -> List[Dict[str, Any]]:
"""
先在范围时间戳内随机选择一条消息取得消息的chat_id然后根据chat_id获取该聊天在指定时间戳范围内的消息
"""
# 获取所有消息只取chat_id字段
all_msgs = get_raw_msg_by_timestamp(timestamp_start, timestamp_end)
if not all_msgs:
return []
# 随机选一条
msg = random.choice(all_msgs)
chat_id = msg["chat_id"]
# 用 chat_id 获取该聊天在指定时间戳范围内的消息
return get_raw_msg_by_timestamp_with_chat(chat_id, timestamp_start, timestamp_end, limit, limit_mode)
def get_raw_msg_by_timestamp_with_users(
timestamp_start: float, timestamp_end: float, person_ids: list, limit: int = 0, limit_mode: str = "latest"
) -> List[Dict[str, Any]]:
@@ -197,7 +203,45 @@ async def _build_readable_messages_internal(
else:
person_name = "某人"
message_details_raw.append((timestamp, person_name, content))
# 检查是否有 回复<aaa:bbb> 字段
reply_pattern = r"回复<([^:<>]+):([^:<>]+)>"
match = re.search(reply_pattern, content)
if match:
aaa = match.group(1)
bbb = match.group(2)
reply_person_id = person_info_manager.get_person_id(platform, bbb)
reply_person_name = await person_info_manager.get_value(reply_person_id, "person_name")
if not reply_person_name:
reply_person_name = aaa
# 在内容前加上回复信息
content = re.sub(reply_pattern, f"回复 {reply_person_name}", content, count=1)
# 检查是否有 @<aaa:bbb> 字段 @<{member_info.get('nickname')}:{member_info.get('user_id')}>
at_pattern = r"@<([^:<>]+):([^:<>]+)>"
at_matches = list(re.finditer(at_pattern, content))
if at_matches:
new_content = ""
last_end = 0
for m in at_matches:
new_content += content[last_end : m.start()]
aaa = m.group(1)
bbb = m.group(2)
at_person_id = person_info_manager.get_person_id(platform, bbb)
at_person_name = await person_info_manager.get_value(at_person_id, "person_name")
if not at_person_name:
at_person_name = aaa
new_content += f"@{at_person_name}"
last_end = m.end()
new_content += content[last_end:]
content = new_content
target_str = "这是QQ的一个功能用于提及某人但没那么明显"
if target_str in content:
if random.random() < 0.6:
content = content.replace(target_str, "")
if content != "":
message_details_raw.append((timestamp, person_name, content))
if not message_details_raw:
return "", []

View File

@@ -1,5 +1,5 @@
from src.config.config import global_config
from src.plugins.chat.message import MessageRecv, MessageSending, Message
from src.chat.message_receive.message import MessageRecv, MessageSending, Message
from src.common.database import db
import time
import traceback

View File

@@ -512,46 +512,54 @@ class StatisticOutputTask(AsyncTask):
# format总在线时间
# 按模型分类统计
model_rows = "\n".join([
f"<tr>"
f"<td>{model_name}</td>"
f"<td>{count}</td>"
f"<td>{stat_data[IN_TOK_BY_MODEL][model_name]}</td>"
f"<td>{stat_data[OUT_TOK_BY_MODEL][model_name]}</td>"
f"<td>{stat_data[TOTAL_TOK_BY_MODEL][model_name]}</td>"
f"<td>{stat_data[COST_BY_MODEL][model_name]:.4f} ¥</td>"
f"</tr>"
for model_name, count in sorted(stat_data[REQ_CNT_BY_MODEL].items())
])
model_rows = "\n".join(
[
f"<tr>"
f"<td>{model_name}</td>"
f"<td>{count}</td>"
f"<td>{stat_data[IN_TOK_BY_MODEL][model_name]}</td>"
f"<td>{stat_data[OUT_TOK_BY_MODEL][model_name]}</td>"
f"<td>{stat_data[TOTAL_TOK_BY_MODEL][model_name]}</td>"
f"<td>{stat_data[COST_BY_MODEL][model_name]:.4f} ¥</td>"
f"</tr>"
for model_name, count in sorted(stat_data[REQ_CNT_BY_MODEL].items())
]
)
# 按请求类型分类统计
type_rows = "\n".join([
f"<tr>"
f"<td>{req_type}</td>"
f"<td>{count}</td>"
f"<td>{stat_data[IN_TOK_BY_TYPE][req_type]}</td>"
f"<td>{stat_data[OUT_TOK_BY_TYPE][req_type]}</td>"
f"<td>{stat_data[TOTAL_TOK_BY_TYPE][req_type]}</td>"
f"<td>{stat_data[COST_BY_TYPE][req_type]:.4f} ¥</td>"
f"</tr>"
for req_type, count in sorted(stat_data[REQ_CNT_BY_TYPE].items())
])
type_rows = "\n".join(
[
f"<tr>"
f"<td>{req_type}</td>"
f"<td>{count}</td>"
f"<td>{stat_data[IN_TOK_BY_TYPE][req_type]}</td>"
f"<td>{stat_data[OUT_TOK_BY_TYPE][req_type]}</td>"
f"<td>{stat_data[TOTAL_TOK_BY_TYPE][req_type]}</td>"
f"<td>{stat_data[COST_BY_TYPE][req_type]:.4f} ¥</td>"
f"</tr>"
for req_type, count in sorted(stat_data[REQ_CNT_BY_TYPE].items())
]
)
# 按用户分类统计
user_rows = "\n".join([
f"<tr>"
f"<td>{user_id}</td>"
f"<td>{count}</td>"
f"<td>{stat_data[IN_TOK_BY_USER][user_id]}</td>"
f"<td>{stat_data[OUT_TOK_BY_USER][user_id]}</td>"
f"<td>{stat_data[TOTAL_TOK_BY_USER][user_id]}</td>"
f"<td>{stat_data[COST_BY_USER][user_id]:.4f} ¥</td>"
f"</tr>"
for user_id, count in sorted(stat_data[REQ_CNT_BY_USER].items())
])
user_rows = "\n".join(
[
f"<tr>"
f"<td>{user_id}</td>"
f"<td>{count}</td>"
f"<td>{stat_data[IN_TOK_BY_USER][user_id]}</td>"
f"<td>{stat_data[OUT_TOK_BY_USER][user_id]}</td>"
f"<td>{stat_data[TOTAL_TOK_BY_USER][user_id]}</td>"
f"<td>{stat_data[COST_BY_USER][user_id]:.4f} ¥</td>"
f"</tr>"
for user_id, count in sorted(stat_data[REQ_CNT_BY_USER].items())
]
)
# 聊天消息统计
chat_rows = "\n".join([
f"<tr><td>{self.name_mapping[chat_id][0]}</td><td>{count}</td></tr>"
for chat_id, count in sorted(stat_data[MSG_CNT_BY_CHAT].items())
])
chat_rows = "\n".join(
[
f"<tr><td>{self.name_mapping[chat_id][0]}</td><td>{count}</td></tr>"
for chat_id, count in sorted(stat_data[MSG_CNT_BY_CHAT].items())
]
)
# 生成HTML
return f"""
<div id=\"{div_id}\" class=\"tab-content\">

View File

@@ -10,9 +10,9 @@ from pymongo.errors import PyMongoError
from src.common.logger import get_module_logger
from src.manager.mood_manager import mood_manager
from .message import MessageRecv
from ..message_receive.message import MessageRecv
from ..models.utils_model import LLMRequest
from ..utils.typo_generator import ChineseTypoGenerator
from .typo_generator import ChineseTypoGenerator
from ...common.database import db
from ...config.config import global_config

View File

@@ -117,7 +117,7 @@ class ImageManager:
cached_description = self._get_description_from_db(image_hash, "emoji")
if cached_description:
# logger.debug(f"缓存表情包描述: {cached_description}")
return f"[表达了{cached_description}]"
return f"[表情包,含义看起来是{cached_description}]"
# 调用AI获取描述
if image_format == "gif" or image_format == "GIF":
@@ -131,7 +131,7 @@ class ImageManager:
cached_description = self._get_description_from_db(image_hash, "emoji")
if cached_description:
logger.warning(f"虽然生成了描述,但是找到缓存表情包描述: {cached_description}")
return f"[表达了{cached_description}]"
return f"[表情包,含义看起来是{cached_description}]"
# 根据配置决定是否保存图片
if global_config.save_emoji:

Some files were not shown because too many files have changed in this diff Show More