feat:对HFC进行巨大重构,采用新架构
This commit is contained in:
@@ -23,9 +23,9 @@ install(extra_lines=3)
|
||||
logger = get_logger("config")
|
||||
|
||||
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
|
||||
is_test = False
|
||||
mai_version_main = "0.6.3"
|
||||
mai_version_fix = "fix-3"
|
||||
is_test = True
|
||||
mai_version_main = "0.6.4"
|
||||
mai_version_fix = "snapshot-1"
|
||||
|
||||
if mai_version_fix:
|
||||
if is_test:
|
||||
|
||||
@@ -7,7 +7,7 @@ import traceback
|
||||
from src.plugins.person_info.relationship_manager import relationship_manager
|
||||
from src.plugins.chat.utils import parse_text_timestamps
|
||||
from src.plugins.chat.chat_stream import ChatStream
|
||||
from src.heart_flow.observation import ChattingObservation
|
||||
from src.heart_flow.chatting_observation import ChattingObservation
|
||||
|
||||
logger = get_logger("tool_use")
|
||||
|
||||
|
||||
269
src/heart_flow/chatting_observation.py
Normal file
269
src/heart_flow/chatting_observation.py
Normal file
@@ -0,0 +1,269 @@
|
||||
from datetime import datetime
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
import traceback
|
||||
from src.plugins.utils.chat_message_builder import (
|
||||
get_raw_msg_before_timestamp_with_chat,
|
||||
build_readable_messages,
|
||||
get_raw_msg_by_timestamp_with_chat,
|
||||
num_new_messages_since,
|
||||
get_person_id_list,
|
||||
)
|
||||
from src.plugins.utils.prompt_builder import global_prompt_manager
|
||||
from typing import Optional
|
||||
import difflib
|
||||
from src.plugins.chat.message import MessageRecv # 添加 MessageRecv 导入
|
||||
from src.heart_flow.observation import Observation
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.heart_flow.utils_chat import get_chat_type_and_target_info
|
||||
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
# 聊天观察
|
||||
class ChattingObservation(Observation):
|
||||
def __init__(self, chat_id):
|
||||
super().__init__(chat_id)
|
||||
self.chat_id = chat_id
|
||||
|
||||
# --- Initialize attributes (defaults) ---
|
||||
self.is_group_chat: bool = False
|
||||
self.chat_target_info: Optional[dict] = None
|
||||
# --- End Initialization ---
|
||||
|
||||
# --- Other attributes initialized in __init__ ---
|
||||
self.talking_message = []
|
||||
self.talking_message_str = ""
|
||||
self.talking_message_str_truncate = ""
|
||||
self.name = global_config.BOT_NICKNAME
|
||||
self.nick_name = global_config.BOT_ALIAS_NAMES
|
||||
self.max_now_obs_len = global_config.observation_context_size
|
||||
self.overlap_len = global_config.compressed_length
|
||||
self.mid_memorys = []
|
||||
self.max_mid_memory_len = global_config.compress_length_limit
|
||||
self.mid_memory_info = ""
|
||||
self.person_list = []
|
||||
self.llm_summary = LLMRequest(
|
||||
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
||||
)
|
||||
|
||||
async def initialize(self):
|
||||
self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id)
|
||||
logger.debug(f"初始化observation: self.is_group_chat: {self.is_group_chat}")
|
||||
logger.debug(f"初始化observation: self.chat_target_info: {self.chat_target_info}")
|
||||
initial_messages = get_raw_msg_before_timestamp_with_chat(self.chat_id, self.last_observe_time, 10)
|
||||
self.talking_message = initial_messages
|
||||
self.talking_message_str = await build_readable_messages(self.talking_message)
|
||||
|
||||
# 进行一次观察 返回观察结果observe_info
|
||||
def get_observe_info(self, ids=None):
|
||||
if ids:
|
||||
mid_memory_str = ""
|
||||
for id in ids:
|
||||
print(f"id:{id}")
|
||||
try:
|
||||
for mid_memory in self.mid_memorys:
|
||||
if mid_memory["id"] == id:
|
||||
mid_memory_by_id = mid_memory
|
||||
msg_str = ""
|
||||
for msg in mid_memory_by_id["messages"]:
|
||||
msg_str += f"{msg['detailed_plain_text']}"
|
||||
# time_diff = int((datetime.now().timestamp() - mid_memory_by_id["created_at"]) / 60)
|
||||
# mid_memory_str += f"距离现在{time_diff}分钟前:\n{msg_str}\n"
|
||||
mid_memory_str += f"{msg_str}\n"
|
||||
except Exception as e:
|
||||
logger.error(f"获取mid_memory_id失败: {e}")
|
||||
traceback.print_exc()
|
||||
return self.talking_message_str
|
||||
|
||||
return mid_memory_str + "现在群里正在聊:\n" + self.talking_message_str
|
||||
|
||||
else:
|
||||
return self.talking_message_str
|
||||
|
||||
def serch_message_by_text(self, text: str) -> Optional[MessageRecv]:
|
||||
"""
|
||||
根据回复的纯文本
|
||||
1. 在talking_message中查找最新的,最匹配的消息
|
||||
2. 如果找到,则返回消息
|
||||
"""
|
||||
msg_list = []
|
||||
find_msg = None
|
||||
reverse_talking_message = list(reversed(self.talking_message))
|
||||
|
||||
for message in reverse_talking_message:
|
||||
if message["processed_plain_text"] == text:
|
||||
find_msg = message
|
||||
logger.debug(f"找到的锚定消息:find_msg: {find_msg}")
|
||||
break
|
||||
else:
|
||||
similarity = difflib.SequenceMatcher(None, text, message["processed_plain_text"]).ratio()
|
||||
msg_list.append({"message": message, "similarity": similarity})
|
||||
logger.debug(f"对锚定消息检查:message: {message['processed_plain_text']},similarity: {similarity}")
|
||||
if not find_msg:
|
||||
if msg_list:
|
||||
msg_list.sort(key=lambda x: x["similarity"], reverse=True)
|
||||
if msg_list[0]["similarity"] >= 0.5: # 只返回相似度大于等于0.5的消息
|
||||
find_msg = msg_list[0]["message"]
|
||||
else:
|
||||
logger.debug("没有找到锚定消息,相似度低")
|
||||
return None
|
||||
else:
|
||||
logger.debug("没有找到锚定消息,没有消息捕获")
|
||||
return None
|
||||
|
||||
# logger.debug(f"找到的锚定消息:find_msg: {find_msg}")
|
||||
group_info = find_msg.get("chat_info", {}).get("group_info")
|
||||
user_info = find_msg.get("chat_info", {}).get("user_info")
|
||||
|
||||
content_format = ""
|
||||
accept_format = ""
|
||||
template_items = {}
|
||||
template_name = {}
|
||||
template_default = True
|
||||
|
||||
format_info = {"content_format": content_format, "accept_format": accept_format}
|
||||
template_info = {
|
||||
"template_items": template_items,
|
||||
}
|
||||
|
||||
message_info = {
|
||||
"platform": find_msg.get("platform"),
|
||||
"message_id": find_msg.get("message_id"),
|
||||
"time": find_msg.get("time"),
|
||||
"group_info": group_info,
|
||||
"user_info": user_info,
|
||||
"format_info": find_msg.get("format_info"),
|
||||
"template_info": find_msg.get("template_info"),
|
||||
"additional_config": find_msg.get("additional_config"),
|
||||
"format_info": format_info,
|
||||
"template_info": template_info,
|
||||
}
|
||||
message_dict = {
|
||||
"message_info": message_info,
|
||||
"raw_message": find_msg.get("processed_plain_text"),
|
||||
"detailed_plain_text": find_msg.get("processed_plain_text"),
|
||||
"processed_plain_text": find_msg.get("processed_plain_text"),
|
||||
}
|
||||
find_rec_msg = MessageRecv(message_dict)
|
||||
logger.debug(f"锚定消息处理后:find_rec_msg: {find_rec_msg}")
|
||||
return find_rec_msg
|
||||
|
||||
async def observe(self):
|
||||
# 自上一次观察的新消息
|
||||
new_messages_list = get_raw_msg_by_timestamp_with_chat(
|
||||
chat_id=self.chat_id,
|
||||
timestamp_start=self.last_observe_time,
|
||||
timestamp_end=datetime.now().timestamp(),
|
||||
limit=self.max_now_obs_len,
|
||||
limit_mode="latest",
|
||||
)
|
||||
|
||||
last_obs_time_mark = self.last_observe_time
|
||||
if new_messages_list:
|
||||
self.last_observe_time = new_messages_list[-1]["time"]
|
||||
self.talking_message.extend(new_messages_list)
|
||||
|
||||
if len(self.talking_message) > self.max_now_obs_len:
|
||||
# 计算需要移除的消息数量,保留最新的 max_now_obs_len 条
|
||||
messages_to_remove_count = len(self.talking_message) - self.max_now_obs_len
|
||||
oldest_messages = self.talking_message[:messages_to_remove_count]
|
||||
self.talking_message = self.talking_message[messages_to_remove_count:] # 保留后半部分,即最新的
|
||||
|
||||
oldest_messages_str = await build_readable_messages(
|
||||
messages=oldest_messages, timestamp_mode="normal", read_mark=0
|
||||
)
|
||||
|
||||
# --- Build prompt using template ---
|
||||
prompt = None # Initialize prompt as None
|
||||
try:
|
||||
# 构建 Prompt - 根据 is_group_chat 选择模板
|
||||
if self.is_group_chat:
|
||||
prompt_template_name = "chat_summary_group_prompt"
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
prompt_template_name, chat_logs=oldest_messages_str
|
||||
)
|
||||
else:
|
||||
# For private chat, add chat_target to the prompt variables
|
||||
prompt_template_name = "chat_summary_private_prompt"
|
||||
# Determine the target name for the prompt
|
||||
chat_target_name = "对方" # Default fallback
|
||||
if self.chat_target_info:
|
||||
# Prioritize person_name, then nickname
|
||||
chat_target_name = (
|
||||
self.chat_target_info.get("person_name")
|
||||
or self.chat_target_info.get("user_nickname")
|
||||
or chat_target_name
|
||||
)
|
||||
|
||||
# Format the private chat prompt
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
prompt_template_name,
|
||||
# Assuming the private prompt template uses {chat_target}
|
||||
chat_target=chat_target_name,
|
||||
chat_logs=oldest_messages_str,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"构建总结 Prompt 失败 for chat {self.chat_id}: {e}")
|
||||
# prompt remains None
|
||||
|
||||
summary = "没有主题的闲聊" # 默认值
|
||||
|
||||
if prompt: # Check if prompt was built successfully
|
||||
try:
|
||||
summary_result, _, _ = await self.llm_summary.generate_response(prompt)
|
||||
if summary_result: # 确保结果不为空
|
||||
summary = summary_result
|
||||
except Exception as e:
|
||||
logger.error(f"总结主题失败 for chat {self.chat_id}: {e}")
|
||||
# 保留默认总结 "没有主题的闲聊"
|
||||
else:
|
||||
logger.warning(f"因 Prompt 构建失败,跳过 LLM 总结 for chat {self.chat_id}")
|
||||
|
||||
mid_memory = {
|
||||
"id": str(int(datetime.now().timestamp())),
|
||||
"theme": summary,
|
||||
"messages": oldest_messages, # 存储原始消息对象
|
||||
"readable_messages": oldest_messages_str,
|
||||
# "timestamps": oldest_timestamps,
|
||||
"chat_id": self.chat_id,
|
||||
"created_at": datetime.now().timestamp(),
|
||||
}
|
||||
|
||||
self.mid_memorys.append(mid_memory)
|
||||
if len(self.mid_memorys) > self.max_mid_memory_len:
|
||||
self.mid_memorys.pop(0) # 移除最旧的
|
||||
|
||||
mid_memory_str = "之前聊天的内容概述是:\n"
|
||||
for mid_memory_item in self.mid_memorys: # 重命名循环变量以示区分
|
||||
time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60)
|
||||
mid_memory_str += (
|
||||
f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}):{mid_memory_item['theme']}\n"
|
||||
)
|
||||
self.mid_memory_info = mid_memory_str
|
||||
|
||||
self.talking_message_str = await build_readable_messages(
|
||||
messages=self.talking_message,
|
||||
timestamp_mode="lite",
|
||||
read_mark=last_obs_time_mark,
|
||||
)
|
||||
self.talking_message_str_truncate = await build_readable_messages(
|
||||
messages=self.talking_message,
|
||||
timestamp_mode="normal",
|
||||
read_mark=last_obs_time_mark,
|
||||
truncate=True,
|
||||
)
|
||||
|
||||
self.person_list = await get_person_id_list(self.talking_message)
|
||||
|
||||
# print(f"self.11111person_list: {self.person_list}")
|
||||
|
||||
logger.trace(
|
||||
f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}"
|
||||
)
|
||||
|
||||
async def has_new_messages_since(self, timestamp: float) -> bool:
|
||||
"""检查指定时间戳之后是否有新消息"""
|
||||
count = num_new_messages_since(chat_id=self.chat_id, timestamp_start=timestamp)
|
||||
return count > 0
|
||||
74
src/heart_flow/hfcloop_observation.py
Normal file
74
src/heart_flow/hfcloop_observation.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# 定义了来自外部世界的信息
|
||||
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
|
||||
from datetime import datetime
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleDetail
|
||||
from typing import List
|
||||
# Import the new utility function
|
||||
|
||||
logger = get_logger("observation")
|
||||
|
||||
|
||||
# 所有观察的基类
|
||||
class HFCloopObservation:
|
||||
def __init__(self, observe_id):
|
||||
self.observe_info = ""
|
||||
self.observe_id = observe_id
|
||||
self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间
|
||||
self.history_loop: List[CycleDetail] = []
|
||||
|
||||
def get_observe_info(self):
|
||||
return self.observe_info
|
||||
|
||||
def add_loop_info(self, loop_info: CycleDetail):
|
||||
logger.debug(f"添加循环信息111111111111111111111111111111111111: {loop_info}")
|
||||
print(f"添加循环信息111111111111111111111111111111111111: {loop_info}")
|
||||
print(f"action_taken: {loop_info.action_taken}")
|
||||
print(f"action_type: {loop_info.action_type}")
|
||||
print(f"response_info: {loop_info.response_info}")
|
||||
self.history_loop.append(loop_info)
|
||||
|
||||
async def observe(self):
|
||||
recent_active_cycles: List[CycleDetail] = []
|
||||
for cycle in reversed(self.history_loop):
|
||||
# 只关心实际执行了动作的循环
|
||||
if cycle.action_taken:
|
||||
recent_active_cycles.append(cycle)
|
||||
# 最多找最近的3个活动循环
|
||||
if len(recent_active_cycles) == 3:
|
||||
break
|
||||
|
||||
cycle_info_block = ""
|
||||
consecutive_text_replies = 0
|
||||
responses_for_prompt = []
|
||||
|
||||
# 检查这最近的活动循环中有多少是连续的文本回复 (从最近的开始看)
|
||||
for cycle in recent_active_cycles:
|
||||
if cycle.action_type == "reply":
|
||||
consecutive_text_replies += 1
|
||||
# 获取回复内容,如果不存在则返回'[空回复]'
|
||||
response_text = cycle.response_info.get("response_text", [])
|
||||
# 使用简单的 join 来格式化回复内容列表
|
||||
formatted_response = "[空回复]" if not response_text else " ".join(response_text)
|
||||
responses_for_prompt.append(formatted_response)
|
||||
else:
|
||||
# 一旦遇到非文本回复,连续性中断
|
||||
break
|
||||
|
||||
# 根据连续文本回复的数量构建提示信息
|
||||
# 注意: responses_for_prompt 列表是从最近到最远排序的
|
||||
if consecutive_text_replies >= 3: # 如果最近的三个活动都是文本回复
|
||||
cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意'
|
||||
elif consecutive_text_replies == 2: # 如果最近的两个活动是文本回复
|
||||
cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意'
|
||||
elif consecutive_text_replies == 1: # 如果最近的一个活动是文本回复
|
||||
cycle_info_block = f'你刚刚已经回复一条消息(内容: "{responses_for_prompt[0]}")'
|
||||
|
||||
# 包装提示块,增加可读性,即使没有连续回复也给个标记
|
||||
if cycle_info_block:
|
||||
cycle_info_block = f"\n【近期回复历史】\n{cycle_info_block}\n"
|
||||
else:
|
||||
# 如果最近的活动循环不是文本回复,或者没有活动循环
|
||||
cycle_info_block = "\n【近期回复历史】\n(最近没有连续文本回复)\n"
|
||||
|
||||
self.observe_info = cycle_info_block
|
||||
97
src/heart_flow/info/chat_info.py
Normal file
97
src/heart_flow/info/chat_info.py
Normal file
@@ -0,0 +1,97 @@
|
||||
from typing import Dict, Optional
|
||||
from dataclasses import dataclass
|
||||
from .info_base import InfoBase
|
||||
|
||||
|
||||
@dataclass
|
||||
class ChatInfo(InfoBase):
|
||||
"""聊天信息类
|
||||
|
||||
用于记录和管理聊天相关的信息,包括聊天ID、名称和类型等。
|
||||
继承自 InfoBase 类,使用字典存储具体数据。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,固定为 "chat"
|
||||
|
||||
Data Fields:
|
||||
chat_id (str): 聊天的唯一标识符
|
||||
chat_name (str): 聊天的名称
|
||||
chat_type (str): 聊天的类型
|
||||
"""
|
||||
|
||||
type: str = "chat"
|
||||
|
||||
def set_chat_id(self, chat_id: str) -> None:
|
||||
"""设置聊天ID
|
||||
|
||||
Args:
|
||||
chat_id (str): 聊天的唯一标识符
|
||||
"""
|
||||
self.data["chat_id"] = chat_id
|
||||
|
||||
def set_chat_name(self, chat_name: str) -> None:
|
||||
"""设置聊天名称
|
||||
|
||||
Args:
|
||||
chat_name (str): 聊天的名称
|
||||
"""
|
||||
self.data["chat_name"] = chat_name
|
||||
|
||||
def set_chat_type(self, chat_type: str) -> None:
|
||||
"""设置聊天类型
|
||||
|
||||
Args:
|
||||
chat_type (str): 聊天的类型
|
||||
"""
|
||||
self.data["chat_type"] = chat_type
|
||||
|
||||
def get_chat_id(self) -> Optional[str]:
|
||||
"""获取聊天ID
|
||||
|
||||
Returns:
|
||||
Optional[str]: 聊天的唯一标识符,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("chat_id")
|
||||
|
||||
def get_chat_name(self) -> Optional[str]:
|
||||
"""获取聊天名称
|
||||
|
||||
Returns:
|
||||
Optional[str]: 聊天的名称,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("chat_name")
|
||||
|
||||
def get_chat_type(self) -> Optional[str]:
|
||||
"""获取聊天类型
|
||||
|
||||
Returns:
|
||||
Optional[str]: 聊天的类型,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("chat_type")
|
||||
|
||||
def get_type(self) -> str:
|
||||
"""获取信息类型
|
||||
|
||||
Returns:
|
||||
str: 当前信息对象的类型标识符
|
||||
"""
|
||||
return self.type
|
||||
|
||||
def get_data(self) -> Dict[str, str]:
|
||||
"""获取所有信息数据
|
||||
|
||||
Returns:
|
||||
Dict[str, str]: 包含所有信息数据的字典
|
||||
"""
|
||||
return self.data
|
||||
|
||||
def get_info(self, key: str) -> Optional[str]:
|
||||
"""获取特定属性的信息
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
Optional[str]: 属性值,如果键不存在则返回 None
|
||||
"""
|
||||
return self.data.get(key)
|
||||
157
src/heart_flow/info/cycle_info.py
Normal file
157
src/heart_flow/info/cycle_info.py
Normal file
@@ -0,0 +1,157 @@
|
||||
from typing import Dict, Optional, Any
|
||||
from dataclasses import dataclass
|
||||
from .info_base import InfoBase
|
||||
|
||||
|
||||
@dataclass
|
||||
class CycleInfo(InfoBase):
|
||||
"""循环信息类
|
||||
|
||||
用于记录和管理心跳循环的相关信息,包括循环ID、时间信息、动作信息等。
|
||||
继承自 InfoBase 类,使用字典存储具体数据。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,固定为 "cycle"
|
||||
|
||||
Data Fields:
|
||||
cycle_id (str): 当前循环的唯一标识符
|
||||
start_time (str): 循环开始的时间
|
||||
end_time (str): 循环结束的时间
|
||||
action (str): 在循环中采取的动作
|
||||
action_data (Dict[str, Any]): 动作相关的详细数据
|
||||
reason (str): 触发循环的原因
|
||||
observe_info (str): 当前的回复信息
|
||||
"""
|
||||
|
||||
type: str = "cycle"
|
||||
|
||||
def get_type(self) -> str:
|
||||
"""获取信息类型"""
|
||||
return self.type
|
||||
|
||||
def get_data(self) -> Dict[str, str]:
|
||||
"""获取信息数据"""
|
||||
return self.data
|
||||
|
||||
def get_info(self, key: str) -> Optional[str]:
|
||||
"""获取特定属性的信息
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
属性值,如果键不存在则返回 None
|
||||
"""
|
||||
return self.data.get(key)
|
||||
|
||||
def set_cycle_id(self, cycle_id: str) -> None:
|
||||
"""设置循环ID
|
||||
|
||||
Args:
|
||||
cycle_id (str): 循环的唯一标识符
|
||||
"""
|
||||
self.data["cycle_id"] = cycle_id
|
||||
|
||||
def set_start_time(self, start_time: str) -> None:
|
||||
"""设置开始时间
|
||||
|
||||
Args:
|
||||
start_time (str): 循环开始的时间,建议使用标准时间格式
|
||||
"""
|
||||
self.data["start_time"] = start_time
|
||||
|
||||
def set_end_time(self, end_time: str) -> None:
|
||||
"""设置结束时间
|
||||
|
||||
Args:
|
||||
end_time (str): 循环结束的时间,建议使用标准时间格式
|
||||
"""
|
||||
self.data["end_time"] = end_time
|
||||
|
||||
def set_action(self, action: str) -> None:
|
||||
"""设置采取的动作
|
||||
|
||||
Args:
|
||||
action (str): 在循环中执行的动作名称
|
||||
"""
|
||||
self.data["action"] = action
|
||||
|
||||
def set_action_data(self, action_data: Dict[str, Any]) -> None:
|
||||
"""设置动作数据
|
||||
|
||||
Args:
|
||||
action_data (Dict[str, Any]): 动作相关的详细数据,将被转换为字符串存储
|
||||
"""
|
||||
self.data["action_data"] = str(action_data)
|
||||
|
||||
def set_reason(self, reason: str) -> None:
|
||||
"""设置原因
|
||||
|
||||
Args:
|
||||
reason (str): 触发循环的原因说明
|
||||
"""
|
||||
self.data["reason"] = reason
|
||||
|
||||
def set_observe_info(self, observe_info: str) -> None:
|
||||
"""设置回复信息
|
||||
|
||||
Args:
|
||||
observe_info (str): 当前的回复信息
|
||||
"""
|
||||
self.data["observe_info"] = observe_info
|
||||
|
||||
def get_cycle_id(self) -> Optional[str]:
|
||||
"""获取循环ID
|
||||
|
||||
Returns:
|
||||
Optional[str]: 循环的唯一标识符,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("cycle_id")
|
||||
|
||||
def get_start_time(self) -> Optional[str]:
|
||||
"""获取开始时间
|
||||
|
||||
Returns:
|
||||
Optional[str]: 循环开始的时间,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("start_time")
|
||||
|
||||
def get_end_time(self) -> Optional[str]:
|
||||
"""获取结束时间
|
||||
|
||||
Returns:
|
||||
Optional[str]: 循环结束的时间,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("end_time")
|
||||
|
||||
def get_action(self) -> Optional[str]:
|
||||
"""获取采取的动作
|
||||
|
||||
Returns:
|
||||
Optional[str]: 在循环中执行的动作名称,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("action")
|
||||
|
||||
def get_action_data(self) -> Optional[str]:
|
||||
"""获取动作数据
|
||||
|
||||
Returns:
|
||||
Optional[str]: 动作相关的详细数据(字符串形式),如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("action_data")
|
||||
|
||||
def get_reason(self) -> Optional[str]:
|
||||
"""获取原因
|
||||
|
||||
Returns:
|
||||
Optional[str]: 触发循环的原因说明,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("reason")
|
||||
|
||||
def get_observe_info(self) -> Optional[str]:
|
||||
"""获取回复信息
|
||||
|
||||
Returns:
|
||||
Optional[str]: 当前的回复信息,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("observe_info")
|
||||
60
src/heart_flow/info/info_base.py
Normal file
60
src/heart_flow/info/info_base.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from typing import Dict, Optional, Any, List
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class InfoBase:
|
||||
"""信息基类
|
||||
|
||||
这是一个基础信息类,用于存储和管理各种类型的信息数据。
|
||||
所有具体的信息类都应该继承自这个基类。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,默认为 "base"
|
||||
data (Dict[str, Union[str, Dict, list]]): 存储具体信息数据的字典,
|
||||
支持存储字符串、字典、列表等嵌套数据结构
|
||||
"""
|
||||
|
||||
type: str = "base"
|
||||
data: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def get_type(self) -> str:
|
||||
"""获取信息类型
|
||||
|
||||
Returns:
|
||||
str: 当前信息对象的类型标识符
|
||||
"""
|
||||
return self.type
|
||||
|
||||
def get_data(self) -> Dict[str, Any]:
|
||||
"""获取所有信息数据
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: 包含所有信息数据的字典
|
||||
"""
|
||||
return self.data
|
||||
|
||||
def get_info(self, key: str) -> Optional[Any]:
|
||||
"""获取特定属性的信息
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
Optional[Any]: 属性值,如果键不存在则返回 None
|
||||
"""
|
||||
return self.data.get(key)
|
||||
|
||||
def get_info_list(self, key: str) -> List[Any]:
|
||||
"""获取特定属性的信息列表
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
List[Any]: 属性值列表,如果键不存在则返回空列表
|
||||
"""
|
||||
value = self.data.get(key)
|
||||
if isinstance(value, list):
|
||||
return value
|
||||
return []
|
||||
34
src/heart_flow/info/mind_info.py
Normal file
34
src/heart_flow/info/mind_info.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from typing import Dict, Any
|
||||
from dataclasses import dataclass, field
|
||||
from .info_base import InfoBase
|
||||
|
||||
|
||||
@dataclass
|
||||
class MindInfo(InfoBase):
|
||||
"""思维信息类
|
||||
|
||||
用于存储和管理当前思维状态的信息。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,默认为 "mind"
|
||||
data (Dict[str, Any]): 包含 current_mind 的数据字典
|
||||
"""
|
||||
|
||||
type: str = "mind"
|
||||
data: Dict[str, Any] = field(default_factory=lambda: {"current_mind": ""})
|
||||
|
||||
def get_current_mind(self) -> str:
|
||||
"""获取当前思维状态
|
||||
|
||||
Returns:
|
||||
str: 当前思维状态
|
||||
"""
|
||||
return self.get_info("current_mind") or ""
|
||||
|
||||
def set_current_mind(self, mind: str) -> None:
|
||||
"""设置当前思维状态
|
||||
|
||||
Args:
|
||||
mind: 要设置的思维状态
|
||||
"""
|
||||
self.data["current_mind"] = mind
|
||||
107
src/heart_flow/info/obs_info.py
Normal file
107
src/heart_flow/info/obs_info.py
Normal file
@@ -0,0 +1,107 @@
|
||||
from typing import Dict, Optional
|
||||
from dataclasses import dataclass
|
||||
from .info_base import InfoBase
|
||||
|
||||
|
||||
@dataclass
|
||||
class ObsInfo(InfoBase):
|
||||
"""OBS信息类
|
||||
|
||||
用于记录和管理OBS相关的信息,包括说话消息、截断后的说话消息和聊天类型。
|
||||
继承自 InfoBase 类,使用字典存储具体数据。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,固定为 "obs"
|
||||
|
||||
Data Fields:
|
||||
talking_message (str): 说话消息内容
|
||||
talking_message_str_truncate (str): 截断后的说话消息内容
|
||||
chat_type (str): 聊天类型,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他)
|
||||
"""
|
||||
|
||||
type: str = "obs"
|
||||
|
||||
def set_talking_message(self, message: str) -> None:
|
||||
"""设置说话消息
|
||||
|
||||
Args:
|
||||
message (str): 说话消息内容
|
||||
"""
|
||||
self.data["talking_message"] = message
|
||||
|
||||
def set_talking_message_str_truncate(self, message: str) -> None:
|
||||
"""设置截断后的说话消息
|
||||
|
||||
Args:
|
||||
message (str): 截断后的说话消息内容
|
||||
"""
|
||||
self.data["talking_message_str_truncate"] = message
|
||||
|
||||
def set_chat_type(self, chat_type: str) -> None:
|
||||
"""设置聊天类型
|
||||
|
||||
Args:
|
||||
chat_type (str): 聊天类型,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他)
|
||||
"""
|
||||
if chat_type not in ["private", "group", "other"]:
|
||||
chat_type = "other"
|
||||
self.data["chat_type"] = chat_type
|
||||
|
||||
def set_chat_target(self, chat_target: str) -> None:
|
||||
"""设置聊天目标
|
||||
|
||||
Args:
|
||||
chat_target (str): 聊天目标,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他)
|
||||
"""
|
||||
self.data["chat_target"] = chat_target
|
||||
|
||||
def get_talking_message(self) -> Optional[str]:
|
||||
"""获取说话消息
|
||||
|
||||
Returns:
|
||||
Optional[str]: 说话消息内容,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("talking_message")
|
||||
|
||||
def get_talking_message_str_truncate(self) -> Optional[str]:
|
||||
"""获取截断后的说话消息
|
||||
|
||||
Returns:
|
||||
Optional[str]: 截断后的说话消息内容,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("talking_message_str_truncate")
|
||||
|
||||
def get_chat_type(self) -> str:
|
||||
"""获取聊天类型
|
||||
|
||||
Returns:
|
||||
str: 聊天类型,默认为 "other"
|
||||
"""
|
||||
return self.get_info("chat_type") or "other"
|
||||
|
||||
def get_type(self) -> str:
|
||||
"""获取信息类型
|
||||
|
||||
Returns:
|
||||
str: 当前信息对象的类型标识符
|
||||
"""
|
||||
return self.type
|
||||
|
||||
def get_data(self) -> Dict[str, str]:
|
||||
"""获取所有信息数据
|
||||
|
||||
Returns:
|
||||
Dict[str, str]: 包含所有信息数据的字典
|
||||
"""
|
||||
return self.data
|
||||
|
||||
def get_info(self, key: str) -> Optional[str]:
|
||||
"""获取特定属性的信息
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
Optional[str]: 属性值,如果键不存在则返回 None
|
||||
"""
|
||||
return self.data.get(key)
|
||||
69
src/heart_flow/info/structured_info.py
Normal file
69
src/heart_flow/info/structured_info.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from typing import Dict, Optional, Any, List
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class StructuredInfo:
|
||||
"""信息基类
|
||||
|
||||
这是一个基础信息类,用于存储和管理各种类型的信息数据。
|
||||
所有具体的信息类都应该继承自这个基类。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,默认为 "base"
|
||||
data (Dict[str, Union[str, Dict, list]]): 存储具体信息数据的字典,
|
||||
支持存储字符串、字典、列表等嵌套数据结构
|
||||
"""
|
||||
|
||||
type: str = "structured_info"
|
||||
data: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def get_type(self) -> str:
|
||||
"""获取信息类型
|
||||
|
||||
Returns:
|
||||
str: 当前信息对象的类型标识符
|
||||
"""
|
||||
return self.type
|
||||
|
||||
def get_data(self) -> Dict[str, Any]:
|
||||
"""获取所有信息数据
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: 包含所有信息数据的字典
|
||||
"""
|
||||
return self.data
|
||||
|
||||
def get_info(self, key: str) -> Optional[Any]:
|
||||
"""获取特定属性的信息
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
Optional[Any]: 属性值,如果键不存在则返回 None
|
||||
"""
|
||||
return self.data.get(key)
|
||||
|
||||
def get_info_list(self, key: str) -> List[Any]:
|
||||
"""获取特定属性的信息列表
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
List[Any]: 属性值列表,如果键不存在则返回空列表
|
||||
"""
|
||||
value = self.data.get(key)
|
||||
if isinstance(value, list):
|
||||
return value
|
||||
return []
|
||||
|
||||
def set_info(self, key: str, value: Any) -> None:
|
||||
"""设置特定属性的信息值
|
||||
|
||||
Args:
|
||||
key: 要设置的属性键名
|
||||
value: 要设置的属性值
|
||||
"""
|
||||
self.data[key] = value
|
||||
57
src/heart_flow/memory_observation.py
Normal file
57
src/heart_flow/memory_observation.py
Normal file
@@ -0,0 +1,57 @@
|
||||
from src.heart_flow.chatting_observation import Observation
|
||||
from datetime import datetime
|
||||
from src.common.logger_manager import get_logger
|
||||
import traceback
|
||||
|
||||
# Import the new utility function
|
||||
from src.plugins.memory_system.Hippocampus import HippocampusManager
|
||||
import jieba
|
||||
from typing import List
|
||||
|
||||
logger = get_logger("memory")
|
||||
|
||||
|
||||
class MemoryObservation(Observation):
|
||||
def __init__(self, observe_id):
|
||||
super().__init__(observe_id)
|
||||
self.observe_info: str = ""
|
||||
self.context: str = ""
|
||||
self.running_memory: List[dict] = []
|
||||
|
||||
def get_observe_info(self):
|
||||
for memory in self.running_memory:
|
||||
self.observe_info += f"{memory['topic']}:{memory['content']}\n"
|
||||
return self.observe_info
|
||||
|
||||
async def observe(self):
|
||||
# ---------- 2. 获取记忆 ----------
|
||||
try:
|
||||
# 从聊天内容中提取关键词
|
||||
chat_words = set(jieba.cut(self.context))
|
||||
# 过滤掉停用词和单字词
|
||||
keywords = [word for word in chat_words if len(word) > 1]
|
||||
# 去重并限制数量
|
||||
keywords = list(set(keywords))[:5]
|
||||
|
||||
logger.debug(f"取的关键词: {keywords}")
|
||||
|
||||
# 调用记忆系统获取相关记忆
|
||||
related_memory = await HippocampusManager.get_instance().get_memory_from_topic(
|
||||
valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
|
||||
)
|
||||
|
||||
logger.debug(f"获取到的记忆: {related_memory}")
|
||||
|
||||
if related_memory:
|
||||
for topic, memory in related_memory:
|
||||
new_item = {"type": "memory", "id": topic, "content": memory, "ttl": 3}
|
||||
self.structured_info.append(new_item)
|
||||
# 将记忆添加到 running_memory
|
||||
self.running_memory.append(
|
||||
{"topic": topic, "content": memory, "timestamp": datetime.now().isoformat()}
|
||||
)
|
||||
logger.debug(f"添加新记忆: {topic} - {memory}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"观察 记忆时出错: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
@@ -1,24 +1,10 @@
|
||||
# 定义了来自外部世界的信息
|
||||
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
|
||||
from datetime import datetime
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.common.logger_manager import get_logger
|
||||
import traceback
|
||||
from src.plugins.utils.chat_message_builder import (
|
||||
get_raw_msg_before_timestamp_with_chat,
|
||||
build_readable_messages,
|
||||
get_raw_msg_by_timestamp_with_chat,
|
||||
num_new_messages_since,
|
||||
get_person_id_list,
|
||||
)
|
||||
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from typing import Optional
|
||||
import difflib
|
||||
from src.plugins.chat.message import MessageRecv # 添加 MessageRecv 导入
|
||||
from src.plugins.utils.prompt_builder import Prompt
|
||||
|
||||
# Import the new utility function
|
||||
from .utils_chat import get_chat_type_and_target_info
|
||||
|
||||
logger = get_logger("observation")
|
||||
|
||||
@@ -41,259 +27,10 @@ Prompt(
|
||||
|
||||
# 所有观察的基类
|
||||
class Observation:
|
||||
def __init__(self, observe_type, observe_id):
|
||||
def __init__(self, observe_id):
|
||||
self.observe_info = ""
|
||||
self.observe_type = observe_type
|
||||
self.observe_id = observe_id
|
||||
self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间
|
||||
|
||||
async def observe(self):
|
||||
pass
|
||||
|
||||
|
||||
# 聊天观察
|
||||
class ChattingObservation(Observation):
|
||||
def __init__(self, chat_id):
|
||||
super().__init__("chat", chat_id)
|
||||
self.chat_id = chat_id
|
||||
|
||||
# --- Initialize attributes (defaults) ---
|
||||
self.is_group_chat: bool = False
|
||||
self.chat_target_info: Optional[dict] = None
|
||||
# --- End Initialization ---
|
||||
|
||||
# --- Other attributes initialized in __init__ ---
|
||||
self.talking_message = []
|
||||
self.talking_message_str = ""
|
||||
self.talking_message_str_truncate = ""
|
||||
self.name = global_config.BOT_NICKNAME
|
||||
self.nick_name = global_config.BOT_ALIAS_NAMES
|
||||
self.max_now_obs_len = global_config.observation_context_size
|
||||
self.overlap_len = global_config.compressed_length
|
||||
self.mid_memorys = []
|
||||
self.max_mid_memory_len = global_config.compress_length_limit
|
||||
self.mid_memory_info = ""
|
||||
self.person_list = []
|
||||
self.llm_summary = LLMRequest(
|
||||
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
||||
)
|
||||
|
||||
async def initialize(self):
|
||||
# --- Use utility function to determine chat type and fetch info ---
|
||||
self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id)
|
||||
# logger.debug(f"is_group_chat: {self.is_group_chat}")
|
||||
# logger.debug(f"chat_target_info: {self.chat_target_info}")
|
||||
# --- End using utility function ---
|
||||
|
||||
# Fetch initial messages (existing logic)
|
||||
initial_messages = get_raw_msg_before_timestamp_with_chat(self.chat_id, self.last_observe_time, 10)
|
||||
self.talking_message = initial_messages
|
||||
self.talking_message_str = await build_readable_messages(self.talking_message)
|
||||
|
||||
# 进行一次观察 返回观察结果observe_info
|
||||
def get_observe_info(self, ids=None):
|
||||
if ids:
|
||||
mid_memory_str = ""
|
||||
for id in ids:
|
||||
print(f"id:{id}")
|
||||
try:
|
||||
for mid_memory in self.mid_memorys:
|
||||
if mid_memory["id"] == id:
|
||||
mid_memory_by_id = mid_memory
|
||||
msg_str = ""
|
||||
for msg in mid_memory_by_id["messages"]:
|
||||
msg_str += f"{msg['detailed_plain_text']}"
|
||||
# time_diff = int((datetime.now().timestamp() - mid_memory_by_id["created_at"]) / 60)
|
||||
# mid_memory_str += f"距离现在{time_diff}分钟前:\n{msg_str}\n"
|
||||
mid_memory_str += f"{msg_str}\n"
|
||||
except Exception as e:
|
||||
logger.error(f"获取mid_memory_id失败: {e}")
|
||||
traceback.print_exc()
|
||||
return self.talking_message_str
|
||||
|
||||
return mid_memory_str + "现在群里正在聊:\n" + self.talking_message_str
|
||||
|
||||
else:
|
||||
return self.talking_message_str
|
||||
|
||||
async def observe(self):
|
||||
# 自上一次观察的新消息
|
||||
new_messages_list = get_raw_msg_by_timestamp_with_chat(
|
||||
chat_id=self.chat_id,
|
||||
timestamp_start=self.last_observe_time,
|
||||
timestamp_end=datetime.now().timestamp(),
|
||||
limit=self.max_now_obs_len,
|
||||
limit_mode="latest",
|
||||
)
|
||||
|
||||
last_obs_time_mark = self.last_observe_time
|
||||
if new_messages_list:
|
||||
self.last_observe_time = new_messages_list[-1]["time"]
|
||||
self.talking_message.extend(new_messages_list)
|
||||
|
||||
if len(self.talking_message) > self.max_now_obs_len:
|
||||
# 计算需要移除的消息数量,保留最新的 max_now_obs_len 条
|
||||
messages_to_remove_count = len(self.talking_message) - self.max_now_obs_len
|
||||
oldest_messages = self.talking_message[:messages_to_remove_count]
|
||||
self.talking_message = self.talking_message[messages_to_remove_count:] # 保留后半部分,即最新的
|
||||
|
||||
oldest_messages_str = await build_readable_messages(
|
||||
messages=oldest_messages, timestamp_mode="normal", read_mark=0
|
||||
)
|
||||
|
||||
# --- Build prompt using template ---
|
||||
prompt = None # Initialize prompt as None
|
||||
try:
|
||||
# 构建 Prompt - 根据 is_group_chat 选择模板
|
||||
if self.is_group_chat:
|
||||
prompt_template_name = "chat_summary_group_prompt"
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
prompt_template_name, chat_logs=oldest_messages_str
|
||||
)
|
||||
else:
|
||||
# For private chat, add chat_target to the prompt variables
|
||||
prompt_template_name = "chat_summary_private_prompt"
|
||||
# Determine the target name for the prompt
|
||||
chat_target_name = "对方" # Default fallback
|
||||
if self.chat_target_info:
|
||||
# Prioritize person_name, then nickname
|
||||
chat_target_name = (
|
||||
self.chat_target_info.get("person_name")
|
||||
or self.chat_target_info.get("user_nickname")
|
||||
or chat_target_name
|
||||
)
|
||||
|
||||
# Format the private chat prompt
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
prompt_template_name,
|
||||
# Assuming the private prompt template uses {chat_target}
|
||||
chat_target=chat_target_name,
|
||||
chat_logs=oldest_messages_str,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"构建总结 Prompt 失败 for chat {self.chat_id}: {e}")
|
||||
# prompt remains None
|
||||
|
||||
summary = "没有主题的闲聊" # 默认值
|
||||
|
||||
if prompt: # Check if prompt was built successfully
|
||||
try:
|
||||
summary_result, _, _ = await self.llm_summary.generate_response(prompt)
|
||||
if summary_result: # 确保结果不为空
|
||||
summary = summary_result
|
||||
except Exception as e:
|
||||
logger.error(f"总结主题失败 for chat {self.chat_id}: {e}")
|
||||
# 保留默认总结 "没有主题的闲聊"
|
||||
else:
|
||||
logger.warning(f"因 Prompt 构建失败,跳过 LLM 总结 for chat {self.chat_id}")
|
||||
|
||||
mid_memory = {
|
||||
"id": str(int(datetime.now().timestamp())),
|
||||
"theme": summary,
|
||||
"messages": oldest_messages, # 存储原始消息对象
|
||||
"readable_messages": oldest_messages_str,
|
||||
# "timestamps": oldest_timestamps,
|
||||
"chat_id": self.chat_id,
|
||||
"created_at": datetime.now().timestamp(),
|
||||
}
|
||||
|
||||
self.mid_memorys.append(mid_memory)
|
||||
if len(self.mid_memorys) > self.max_mid_memory_len:
|
||||
self.mid_memorys.pop(0) # 移除最旧的
|
||||
|
||||
mid_memory_str = "之前聊天的内容概述是:\n"
|
||||
for mid_memory_item in self.mid_memorys: # 重命名循环变量以示区分
|
||||
time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60)
|
||||
mid_memory_str += (
|
||||
f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}):{mid_memory_item['theme']}\n"
|
||||
)
|
||||
self.mid_memory_info = mid_memory_str
|
||||
|
||||
self.talking_message_str = await build_readable_messages(
|
||||
messages=self.talking_message,
|
||||
timestamp_mode="lite",
|
||||
read_mark=last_obs_time_mark,
|
||||
)
|
||||
self.talking_message_str_truncate = await build_readable_messages(
|
||||
messages=self.talking_message,
|
||||
timestamp_mode="normal",
|
||||
read_mark=last_obs_time_mark,
|
||||
truncate=True,
|
||||
)
|
||||
|
||||
self.person_list = await get_person_id_list(self.talking_message)
|
||||
|
||||
# print(f"self.11111person_list: {self.person_list}")
|
||||
|
||||
logger.trace(
|
||||
f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}"
|
||||
)
|
||||
|
||||
async def find_best_matching_message(self, search_str: str, min_similarity: float = 0.6) -> Optional[MessageRecv]:
|
||||
"""
|
||||
在 talking_message 中查找与 search_str 最匹配的消息。
|
||||
|
||||
Args:
|
||||
search_str: 要搜索的字符串。
|
||||
min_similarity: 要求的最低相似度(0到1之间)。
|
||||
|
||||
Returns:
|
||||
匹配的 MessageRecv 实例,如果找不到则返回 None。
|
||||
"""
|
||||
best_match_score = -1.0
|
||||
best_match_dict = None
|
||||
|
||||
if not self.talking_message:
|
||||
logger.debug(f"Chat {self.chat_id}: talking_message is empty, cannot find match for '{search_str}'")
|
||||
return None
|
||||
|
||||
for message_dict in self.talking_message:
|
||||
try:
|
||||
# 临时创建 MessageRecv 以处理文本
|
||||
temp_msg = MessageRecv(message_dict)
|
||||
await temp_msg.process() # 处理消息以获取 processed_plain_text
|
||||
current_text = temp_msg.processed_plain_text
|
||||
|
||||
if not current_text: # 跳过没有文本内容的消息
|
||||
continue
|
||||
|
||||
# 计算相似度
|
||||
matcher = difflib.SequenceMatcher(None, search_str, current_text)
|
||||
score = matcher.ratio()
|
||||
|
||||
# logger.debug(f"Comparing '{search_str}' with '{current_text}', score: {score}") # 可选:用于调试
|
||||
|
||||
if score > best_match_score:
|
||||
best_match_score = score
|
||||
best_match_dict = message_dict
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing message for matching in chat {self.chat_id}: {e}", exc_info=True)
|
||||
continue # 继续处理下一条消息
|
||||
|
||||
if best_match_dict is not None and best_match_score >= min_similarity:
|
||||
logger.debug(f"Found best match for '{search_str}' with score {best_match_score:.2f}")
|
||||
try:
|
||||
final_msg = MessageRecv(best_match_dict)
|
||||
await final_msg.process()
|
||||
# 确保 MessageRecv 实例有关联的 chat_stream
|
||||
if hasattr(self, "chat_stream"):
|
||||
final_msg.update_chat_stream(self.chat_stream)
|
||||
else:
|
||||
logger.warning(
|
||||
f"ChattingObservation instance for chat {self.chat_id} does not have a chat_stream attribute set."
|
||||
)
|
||||
return final_msg
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating final MessageRecv for chat {self.chat_id}: {e}", exc_info=True)
|
||||
return None
|
||||
else:
|
||||
logger.debug(
|
||||
f"No suitable match found for '{search_str}' in chat {self.chat_id} (best score: {best_match_score:.2f}, threshold: {min_similarity})"
|
||||
)
|
||||
return None
|
||||
|
||||
async def has_new_messages_since(self, timestamp: float) -> bool:
|
||||
"""检查指定时间戳之后是否有新消息"""
|
||||
count = num_new_messages_since(chat_id=self.chat_id, timestamp_start=timestamp)
|
||||
return count > 0
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from .observation import Observation, ChattingObservation
|
||||
from .observation import Observation
|
||||
from .chatting_observation import ChattingObservation
|
||||
import asyncio
|
||||
import time
|
||||
from typing import Optional, List, Dict, Tuple, Callable, Coroutine
|
||||
@@ -10,7 +11,6 @@ from src.plugins.heartFC_chat.heartFC_chat import HeartFChatting
|
||||
from src.plugins.heartFC_chat.normal_chat import NormalChat
|
||||
from src.heart_flow.mai_state_manager import MaiStateInfo
|
||||
from src.heart_flow.chat_state_info import ChatState, ChatStateInfo
|
||||
from src.heart_flow.sub_mind import SubMind
|
||||
from .utils_chat import get_chat_type_and_target_info
|
||||
from .interest_chatting import InterestChatting
|
||||
|
||||
@@ -68,11 +68,6 @@ class SubHeartflow:
|
||||
self.observations: List[ChattingObservation] = [] # 观察列表
|
||||
# self.running_knowledges = [] # 运行中的知识,待完善
|
||||
|
||||
# LLM模型配置,负责进行思考
|
||||
self.sub_mind = SubMind(
|
||||
subheartflow_id=self.subheartflow_id, chat_state=self.chat_state, observations=self.observations
|
||||
)
|
||||
|
||||
# 日志前缀 - Moved determination to initialize
|
||||
self.log_prefix = str(subheartflow_id) # Initial default prefix
|
||||
|
||||
@@ -186,7 +181,6 @@ class SubHeartflow:
|
||||
# 创建 HeartFChatting 实例,并传递 从构造函数传入的 回调函数
|
||||
self.heart_fc_instance = HeartFChatting(
|
||||
chat_id=self.subheartflow_id,
|
||||
sub_mind=self.sub_mind,
|
||||
observations=self.observations, # 传递所有观察者
|
||||
on_consecutive_no_reply_callback=self.hfc_no_reply_callback, # <-- Use stored callback
|
||||
)
|
||||
@@ -288,9 +282,6 @@ class SubHeartflow:
|
||||
|
||||
logger.info(f"{self.log_prefix} 子心流后台任务已停止。")
|
||||
|
||||
def update_current_mind(self, response):
|
||||
self.sub_mind.update_current_mind(response)
|
||||
|
||||
def add_observation(self, observation: Observation):
|
||||
for existing_obs in self.observations:
|
||||
if existing_obs.observe_id == observation.observe_id:
|
||||
@@ -332,7 +323,6 @@ class SubHeartflow:
|
||||
interest_state = await self.get_interest_state()
|
||||
return {
|
||||
"interest_state": interest_state,
|
||||
"current_mind": self.sub_mind.current_mind,
|
||||
"chat_state": self.chat_state.chat_status.value,
|
||||
"chat_state_changed_time": self.chat_state_changed_time,
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ from src.plugins.chat.chat_stream import chat_manager
|
||||
# 导入心流相关类
|
||||
from src.heart_flow.sub_heartflow import SubHeartflow, ChatState
|
||||
from src.heart_flow.mai_state_manager import MaiStateInfo
|
||||
from .observation import ChattingObservation
|
||||
from src.heart_flow.chatting_observation import ChattingObservation
|
||||
|
||||
# 导入LLM请求工具
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
|
||||
@@ -2,24 +2,17 @@ from .observation import ChattingObservation
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
import time
|
||||
import traceback
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.individuality.individuality import Individuality
|
||||
import random
|
||||
from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.do_tool.tool_use import ToolUser
|
||||
from src.plugins.utils.json_utils import safe_json_dumps, process_llm_tool_calls
|
||||
from src.heart_flow.chat_state_info import ChatStateInfo
|
||||
from src.plugins.chat.chat_stream import chat_manager
|
||||
from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo
|
||||
import difflib
|
||||
from src.plugins.utils.json_utils import process_llm_tool_calls
|
||||
from src.plugins.person_info.relationship_manager import relationship_manager
|
||||
from src.plugins.memory_system.Hippocampus import HippocampusManager
|
||||
import jieba
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.heart_flow.sub_mind import SubMind
|
||||
|
||||
logger = get_logger("tool_use")
|
||||
|
||||
|
||||
def init_prompt():
|
||||
# ... 原有代码 ...
|
||||
|
||||
@@ -51,6 +44,7 @@ def init_prompt():
|
||||
"""
|
||||
Prompt(tool_executor_prompt, "tool_executor_prompt")
|
||||
|
||||
|
||||
class ToolExecutor:
|
||||
def __init__(self, subheartflow_id: str):
|
||||
self.subheartflow_id = subheartflow_id
|
||||
@@ -63,7 +57,9 @@ class ToolExecutor:
|
||||
)
|
||||
self.structured_info = []
|
||||
|
||||
async def execute_tools(self, sub_mind: SubMind, chat_target_name="对方", is_group_chat=False, return_details=False, cycle_info=None):
|
||||
async def execute_tools(
|
||||
self, sub_mind: SubMind, chat_target_name="对方", is_group_chat=False, return_details=False, cycle_info=None
|
||||
):
|
||||
"""
|
||||
并行执行工具,返回结构化信息
|
||||
|
||||
@@ -119,7 +115,7 @@ class ToolExecutor:
|
||||
prompt_personality=prompt_personality,
|
||||
mood_info=mood_info,
|
||||
bot_name=individuality.name,
|
||||
time_now=time_now
|
||||
time_now=time_now,
|
||||
)
|
||||
|
||||
# 如果指定了cycle_info,记录工具执行的prompt
|
||||
@@ -128,9 +124,7 @@ class ToolExecutor:
|
||||
|
||||
# 调用LLM,专注于工具使用
|
||||
logger.info(f"开始执行工具调用{prompt}")
|
||||
response, _, tool_calls = await self.llm_model.generate_response_tool_async(
|
||||
prompt=prompt, tools=tools
|
||||
)
|
||||
response, _, tool_calls = await self.llm_model.generate_response_tool_async(prompt=prompt, tools=tools)
|
||||
|
||||
logger.debug(f"获取到工具原始输出:\n{tool_calls}")
|
||||
# 处理工具调用和结果收集,类似于SubMind中的逻辑
|
||||
@@ -165,10 +159,7 @@ class ToolExecutor:
|
||||
|
||||
# 如果指定了cycle_info,记录工具执行结果
|
||||
if cycle_info:
|
||||
cycle_info.set_tooluse_info(
|
||||
tools_used=used_tools,
|
||||
tool_results=new_structured_items
|
||||
)
|
||||
cycle_info.set_tooluse_info(tools_used=used_tools, tool_results=new_structured_items)
|
||||
|
||||
# 根据return_details决定返回值
|
||||
if return_details:
|
||||
|
||||
34
src/heart_flow/working_observation.py
Normal file
34
src/heart_flow/working_observation.py
Normal file
@@ -0,0 +1,34 @@
|
||||
# 定义了来自外部世界的信息
|
||||
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
|
||||
from datetime import datetime
|
||||
from src.common.logger_manager import get_logger
|
||||
|
||||
# Import the new utility function
|
||||
|
||||
logger = get_logger("observation")
|
||||
|
||||
|
||||
# 所有观察的基类
|
||||
class WorkingObservation:
|
||||
def __init__(self, observe_id):
|
||||
self.observe_info = ""
|
||||
self.observe_id = observe_id
|
||||
self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间
|
||||
self.history_loop = []
|
||||
self.structured_info = []
|
||||
|
||||
def get_observe_info(self):
|
||||
return self.structured_info
|
||||
|
||||
def add_structured_info(self, structured_info: dict):
|
||||
self.structured_info.append(structured_info)
|
||||
|
||||
async def observe(self):
|
||||
observed_structured_infos = []
|
||||
for structured_info in self.structured_info:
|
||||
if structured_info.get("ttl") > 0:
|
||||
structured_info["ttl"] -= 1
|
||||
observed_structured_infos.append(structured_info)
|
||||
logger.debug(f"观察到结构化信息仍旧在: {structured_info}")
|
||||
|
||||
self.structured_info = observed_structured_infos
|
||||
@@ -100,6 +100,7 @@ class MessageRecv(Message):
|
||||
Args:
|
||||
message_dict: MessageCQ序列化后的字典
|
||||
"""
|
||||
# print(f"message_dict: {message_dict}")
|
||||
self.message_info = BaseMessageInfo.from_dict(message_dict.get("message_info", {}))
|
||||
|
||||
self.message_segment = Seg.from_dict(message_dict.get("message_segment", {}))
|
||||
|
||||
@@ -212,7 +212,7 @@ class MessageManager:
|
||||
_ = message.update_thinking_time() # 更新思考时间
|
||||
thinking_start_time = message.thinking_start_time
|
||||
now_time = time.time()
|
||||
logger.debug(f"thinking_start_time:{thinking_start_time},now_time:{now_time}")
|
||||
# logger.debug(f"thinking_start_time:{thinking_start_time},now_time:{now_time}")
|
||||
thinking_messages_count, thinking_messages_length = count_messages_between(
|
||||
start_time=thinking_start_time, end_time=now_time, stream_id=message.chat_stream.stream_id
|
||||
)
|
||||
@@ -236,7 +236,7 @@ class MessageManager:
|
||||
|
||||
await message.process() # 预处理消息内容
|
||||
|
||||
logger.debug(f"{message}")
|
||||
# logger.debug(f"{message}")
|
||||
|
||||
# 使用全局 message_sender 实例
|
||||
await send_message(message)
|
||||
|
||||
@@ -117,7 +117,7 @@ class ImageManager:
|
||||
cached_description = self._get_description_from_db(image_hash, "emoji")
|
||||
if cached_description:
|
||||
# logger.debug(f"缓存表情包描述: {cached_description}")
|
||||
return f"[表达了:{cached_description}]"
|
||||
return f"[表情包,含义看起来是:{cached_description}]"
|
||||
|
||||
# 调用AI获取描述
|
||||
if image_format == "gif" or image_format == "GIF":
|
||||
@@ -131,7 +131,7 @@ class ImageManager:
|
||||
cached_description = self._get_description_from_db(image_hash, "emoji")
|
||||
if cached_description:
|
||||
logger.warning(f"虽然生成了描述,但是找到缓存表情包描述: {cached_description}")
|
||||
return f"[表达了:{cached_description}]"
|
||||
return f"[表情包,含义看起来是:{cached_description}]"
|
||||
|
||||
# 根据配置决定是否保存图片
|
||||
if global_config.save_emoji:
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
from typing import List, Dict, Any, Optional, Tuple
|
||||
from typing import List, Dict, Any, Tuple
|
||||
from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo
|
||||
from src.common.logger_manager import get_logger
|
||||
|
||||
logger = get_logger("cycle_analyzer")
|
||||
|
||||
|
||||
class CycleAnalyzer:
|
||||
"""循环信息分析类,提供查询和分析CycleInfo的工具"""
|
||||
|
||||
@@ -30,8 +30,7 @@ class CycleAnalyzer:
|
||||
if not os.path.exists(self.base_dir):
|
||||
return []
|
||||
|
||||
return [d for d in os.listdir(self.base_dir)
|
||||
if os.path.isdir(os.path.join(self.base_dir, d))]
|
||||
return [d for d in os.listdir(self.base_dir) if os.path.isdir(os.path.join(self.base_dir, d))]
|
||||
except Exception as e:
|
||||
logger.error(f"获取聊天流列表时出错: {e}")
|
||||
return []
|
||||
@@ -70,7 +69,7 @@ class CycleAnalyzer:
|
||||
if limit < 0:
|
||||
return files[start:]
|
||||
else:
|
||||
return files[start:start+limit]
|
||||
return files[start : start + limit]
|
||||
except Exception as e:
|
||||
logger.error(f"获取聊天流循环文件列表时出错: {e}")
|
||||
return []
|
||||
@@ -89,7 +88,7 @@ class CycleAnalyzer:
|
||||
if not os.path.exists(filepath):
|
||||
return f"文件不存在: {filepath}"
|
||||
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
return f.read()
|
||||
except Exception as e:
|
||||
logger.error(f"读取循环文件内容时出错: {e}")
|
||||
@@ -116,11 +115,11 @@ class CycleAnalyzer:
|
||||
tool_usage = {}
|
||||
|
||||
for filepath in files:
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
|
||||
# 解析动作类型
|
||||
for line in content.split('\n'):
|
||||
for line in content.split("\n"):
|
||||
if line.startswith("动作:"):
|
||||
action = line[3:].strip()
|
||||
action_counts[action] = action_counts.get(action, 0) + 1
|
||||
@@ -128,14 +127,14 @@ class CycleAnalyzer:
|
||||
# 解析耗时
|
||||
elif line.startswith("耗时:"):
|
||||
try:
|
||||
duration = float(line[3:].strip().split('秒')[0])
|
||||
duration = float(line[3:].strip().split("秒")[0])
|
||||
total_duration += duration
|
||||
except:
|
||||
pass
|
||||
|
||||
# 解析工具使用
|
||||
elif line.startswith("使用的工具:"):
|
||||
tools = line[6:].strip().split(', ')
|
||||
tools = line[6:].strip().split(", ")
|
||||
for tool in tools:
|
||||
tool_usage[tool] = tool_usage.get(tool, 0) + 1
|
||||
|
||||
@@ -146,7 +145,7 @@ class CycleAnalyzer:
|
||||
"动作统计": action_counts,
|
||||
"平均耗时": f"{avg_duration:.2f}秒",
|
||||
"总耗时": f"{total_duration:.2f}秒",
|
||||
"工具使用次数": tool_usage
|
||||
"工具使用次数": tool_usage,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"分析聊天流循环时出错: {e}")
|
||||
@@ -172,7 +171,7 @@ class CycleAnalyzer:
|
||||
try:
|
||||
# 从文件名中提取时间戳
|
||||
filename = os.path.basename(filepath)
|
||||
timestamp_str = filename.split('_', 2)[2].split('.')[0]
|
||||
timestamp_str = filename.split("_", 2)[2].split(".")[0]
|
||||
timestamp = time.mktime(time.strptime(timestamp_str, "%Y%m%d_%H%M%S"))
|
||||
all_cycles.append((timestamp, stream_id, filepath))
|
||||
except:
|
||||
@@ -205,7 +204,7 @@ if __name__ == "__main__":
|
||||
# 获取最新的循环
|
||||
cycles = analyzer.get_stream_cycles(stream_id, limit=1)
|
||||
if cycles:
|
||||
print(f"\n最新循环内容:")
|
||||
print("\n最新循环内容:")
|
||||
print(analyzer.get_cycle_content(cycles[0]))
|
||||
|
||||
# 获取所有聊天流中最新的3个循环
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
from typing import List, Dict, Any
|
||||
from src.plugins.heartFC_chat.cycle_analyzer import CycleAnalyzer
|
||||
|
||||
|
||||
def print_section(title: str, width: int = 80):
|
||||
"""打印分隔线和标题"""
|
||||
print("\n" + "=" * width)
|
||||
print(f" {title} ".center(width, "="))
|
||||
print("=" * width)
|
||||
|
||||
|
||||
def list_streams_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace):
|
||||
"""列出所有聊天流"""
|
||||
print_section("所有聊天流")
|
||||
@@ -21,7 +21,8 @@ def list_streams_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace):
|
||||
|
||||
for i, stream_id in enumerate(streams):
|
||||
count = analyzer.get_stream_cycle_count(stream_id)
|
||||
print(f"[{i+1}] {stream_id} - {count} 个循环")
|
||||
print(f"[{i + 1}] {stream_id} - {count} 个循环")
|
||||
|
||||
|
||||
def analyze_stream_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace):
|
||||
"""分析指定聊天流的循环信息"""
|
||||
@@ -40,16 +41,17 @@ def analyze_stream_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace):
|
||||
print(f" 平均耗时: {stats['平均耗时']}")
|
||||
|
||||
print("\n动作统计:")
|
||||
for action, count in stats['动作统计'].items():
|
||||
for action, count in stats["动作统计"].items():
|
||||
if count > 0:
|
||||
percent = (count / stats['总循环数']) * 100
|
||||
percent = (count / stats["总循环数"]) * 100
|
||||
print(f" {action}: {count} ({percent:.1f}%)")
|
||||
|
||||
if stats.get('工具使用次数'):
|
||||
if stats.get("工具使用次数"):
|
||||
print("\n工具使用次数:")
|
||||
for tool, count in stats['工具使用次数'].items():
|
||||
for tool, count in stats["工具使用次数"].items():
|
||||
print(f" {tool}: {count}")
|
||||
|
||||
|
||||
def list_cycles_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace):
|
||||
"""列出指定聊天流的循环"""
|
||||
stream_id = args.stream_id
|
||||
@@ -70,9 +72,10 @@ def list_cycles_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace):
|
||||
|
||||
for i, filepath in enumerate(cycles):
|
||||
filename = os.path.basename(filepath)
|
||||
cycle_id = filename.split('_')[1]
|
||||
timestamp = filename.split('_', 2)[2].split('.')[0]
|
||||
print(f"[{i+1}] 循环ID: {cycle_id}, 时间: {timestamp}, 文件: {filename}")
|
||||
cycle_id = filename.split("_")[1]
|
||||
timestamp = filename.split("_", 2)[2].split(".")[0]
|
||||
print(f"[{i + 1}] 循环ID: {cycle_id}, 时间: {timestamp}, 文件: {filename}")
|
||||
|
||||
|
||||
def view_cycle_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace):
|
||||
"""查看指定循环的详细信息"""
|
||||
@@ -95,6 +98,7 @@ def view_cycle_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace):
|
||||
content = analyzer.get_cycle_content(filepath)
|
||||
print(content)
|
||||
|
||||
|
||||
def latest_cycles_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace):
|
||||
"""查看所有聊天流中最新的几个循环"""
|
||||
count = args.count if args.count > 0 else 10
|
||||
@@ -108,12 +112,12 @@ def latest_cycles_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace):
|
||||
|
||||
for i, (stream_id, filepath) in enumerate(latest_cycles):
|
||||
filename = os.path.basename(filepath)
|
||||
cycle_id = filename.split('_')[1]
|
||||
timestamp = filename.split('_', 2)[2].split('.')[0]
|
||||
print(f"[{i+1}] 聊天流: {stream_id}, 循环ID: {cycle_id}, 时间: {timestamp}")
|
||||
cycle_id = filename.split("_")[1]
|
||||
timestamp = filename.split("_", 2)[2].split(".")[0]
|
||||
print(f"[{i + 1}] 聊天流: {stream_id}, 循环ID: {cycle_id}, 时间: {timestamp}")
|
||||
|
||||
# 可以选择性添加提取基本信息的功能
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
if line.startswith("动作:"):
|
||||
action = line.strip()
|
||||
@@ -121,6 +125,7 @@ def latest_cycles_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace):
|
||||
break
|
||||
print()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="HeartFC循环信息查看工具")
|
||||
subparsers = parser.add_subparsers(dest="command", help="子命令")
|
||||
@@ -163,5 +168,6 @@ def main():
|
||||
else:
|
||||
parser.print_help()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
319
src/plugins/heartFC_chat/expressors/default_expressor.py
Normal file
319
src/plugins/heartFC_chat/expressors/default_expressor.py
Normal file
@@ -0,0 +1,319 @@
|
||||
import time
|
||||
import traceback
|
||||
from typing import List, Optional, Dict, Any
|
||||
from src.plugins.chat.message import MessageRecv, MessageThinking, MessageSending
|
||||
from src.plugins.chat.message import Seg # Local import needed after move
|
||||
from src.plugins.chat.message import UserInfo
|
||||
from src.plugins.chat.chat_stream import chat_manager
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.plugins.chat.utils_image import image_path_to_base64 # Local import needed after move
|
||||
from src.plugins.utils.timer_calculator import Timer # <--- Import Timer
|
||||
from src.plugins.emoji_system.emoji_manager import emoji_manager
|
||||
from src.plugins.heartFC_chat.heartflow_prompt_builder import prompt_builder
|
||||
from src.plugins.heartFC_chat.heartFC_sender import HeartFCSender
|
||||
from src.plugins.chat.utils import process_llm_response
|
||||
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
|
||||
from src.plugins.moods.moods import MoodManager
|
||||
from src.heart_flow.utils_chat import get_chat_type_and_target_info
|
||||
from src.plugins.chat.chat_stream import ChatStream
|
||||
|
||||
logger = get_logger("expressor")
|
||||
|
||||
|
||||
class DefaultExpressor:
|
||||
def __init__(self, chat_id: str):
|
||||
self.log_prefix = "expressor"
|
||||
self.express_model = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
temperature=global_config.llm_normal["temp"],
|
||||
max_tokens=256,
|
||||
request_type="response_heartflow",
|
||||
)
|
||||
self.heart_fc_sender = HeartFCSender()
|
||||
|
||||
self.chat_id = chat_id
|
||||
self.chat_stream: Optional[ChatStream] = None
|
||||
self.is_group_chat = True
|
||||
self.chat_target_info = None
|
||||
|
||||
async def initialize(self):
|
||||
self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id)
|
||||
|
||||
async def _create_thinking_message(self, anchor_message: Optional[MessageRecv]) -> Optional[str]:
|
||||
"""创建思考消息 (尝试锚定到 anchor_message)"""
|
||||
if not anchor_message or not anchor_message.chat_stream:
|
||||
logger.error(f"{self.log_prefix} 无法创建思考消息,缺少有效的锚点消息或聊天流。")
|
||||
return None
|
||||
|
||||
chat = anchor_message.chat_stream
|
||||
messageinfo = anchor_message.message_info
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.BOT_QQ,
|
||||
user_nickname=global_config.BOT_NICKNAME,
|
||||
platform=messageinfo.platform,
|
||||
)
|
||||
logger.debug(f"创建思考消息:{anchor_message}")
|
||||
logger.debug(f"创建思考消息chat:{chat}")
|
||||
logger.debug(f"创建思考消息bot_user_info:{bot_user_info}")
|
||||
logger.debug(f"创建思考消息messageinfo:{messageinfo}")
|
||||
|
||||
thinking_time_point = round(time.time(), 2)
|
||||
thinking_id = "mt" + str(thinking_time_point)
|
||||
thinking_message = MessageThinking(
|
||||
message_id=thinking_id,
|
||||
chat_stream=chat,
|
||||
bot_user_info=bot_user_info,
|
||||
reply=anchor_message, # 回复的是锚点消息
|
||||
thinking_start_time=thinking_time_point,
|
||||
)
|
||||
logger.debug(f"创建思考消息thinking_message:{thinking_message}")
|
||||
# Access MessageManager directly (using heart_fc_sender)
|
||||
await self.heart_fc_sender.register_thinking(thinking_message)
|
||||
return thinking_id
|
||||
|
||||
async def deal_reply(
|
||||
self,
|
||||
cycle_timers: dict,
|
||||
action_data: Dict[str, Any],
|
||||
reasoning: str,
|
||||
anchor_message: MessageRecv,
|
||||
) -> tuple[bool, str]:
|
||||
# 创建思考消息
|
||||
thinking_id = await self._create_thinking_message(anchor_message)
|
||||
if not thinking_id:
|
||||
raise Exception("无法创建思考消息")
|
||||
|
||||
try:
|
||||
has_sent_something = False
|
||||
|
||||
# 处理文本部分
|
||||
text_part = action_data.get("text", [])
|
||||
if text_part:
|
||||
with Timer("生成回复", cycle_timers):
|
||||
# 可以保留原有的文本处理逻辑或进行适当调整
|
||||
reply = await self.express(
|
||||
in_mind_reply=text_part,
|
||||
anchor_message=anchor_message,
|
||||
thinking_id=thinking_id,
|
||||
reason=reasoning,
|
||||
)
|
||||
|
||||
if reply:
|
||||
with Timer("发送文本消息", cycle_timers):
|
||||
await self._send_response_messages(
|
||||
anchor_message=anchor_message,
|
||||
thinking_id=thinking_id,
|
||||
response_set=reply,
|
||||
)
|
||||
has_sent_something = True
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 文本回复生成失败")
|
||||
|
||||
# 处理表情部分
|
||||
emoji_keyword = action_data.get("emojis", [])
|
||||
if emoji_keyword:
|
||||
await self._handle_emoji(anchor_message, [], emoji_keyword)
|
||||
has_sent_something = True
|
||||
|
||||
if not has_sent_something:
|
||||
logger.warning(f"{self.log_prefix} 回复动作未包含任何有效内容")
|
||||
|
||||
return has_sent_something, thinking_id
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"回复失败: {e}")
|
||||
return False, thinking_id
|
||||
|
||||
# --- 回复器 (Replier) 的定义 --- #
|
||||
|
||||
async def express(
|
||||
self,
|
||||
in_mind_reply: str,
|
||||
reason: str,
|
||||
anchor_message: MessageRecv,
|
||||
thinking_id: str,
|
||||
) -> Optional[List[str]]:
|
||||
"""
|
||||
回复器 (Replier): 核心逻辑,负责生成回复文本。
|
||||
(已整合原 HeartFCGenerator 的功能)
|
||||
"""
|
||||
try:
|
||||
# 1. 获取情绪影响因子并调整模型温度
|
||||
arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier()
|
||||
current_temp = global_config.llm_normal["temp"] * arousal_multiplier
|
||||
self.express_model.temperature = current_temp # 动态调整温度
|
||||
|
||||
# 2. 获取信息捕捉器
|
||||
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
|
||||
|
||||
# --- Determine sender_name for private chat ---
|
||||
sender_name_for_prompt = "某人" # Default for group or if info unavailable
|
||||
if not self.is_group_chat and self.chat_target_info:
|
||||
# Prioritize person_name, then nickname
|
||||
sender_name_for_prompt = (
|
||||
self.chat_target_info.get("person_name")
|
||||
or self.chat_target_info.get("user_nickname")
|
||||
or sender_name_for_prompt
|
||||
)
|
||||
# --- End determining sender_name ---
|
||||
|
||||
# 3. 构建 Prompt
|
||||
with Timer("构建Prompt", {}): # 内部计时器,可选保留
|
||||
prompt = await prompt_builder.build_prompt(
|
||||
build_mode="focus",
|
||||
chat_stream=self.chat_stream, # Pass the stream object
|
||||
in_mind_reply=in_mind_reply,
|
||||
reason=reason,
|
||||
current_mind_info="",
|
||||
structured_info="",
|
||||
sender_name=sender_name_for_prompt, # Pass determined name
|
||||
)
|
||||
|
||||
# 4. 调用 LLM 生成回复
|
||||
content = None
|
||||
reasoning_content = None
|
||||
model_name = "unknown_model"
|
||||
if not prompt:
|
||||
logger.error(f"{self.log_prefix}[Replier-{thinking_id}] Prompt 构建失败,无法生成回复。")
|
||||
return None
|
||||
|
||||
try:
|
||||
with Timer("LLM生成", {}): # 内部计时器,可选保留
|
||||
content, reasoning_content, model_name = await self.express_model.generate_response(prompt)
|
||||
# logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n生成回复: {content}\n")
|
||||
# 捕捉 LLM 输出信息
|
||||
info_catcher.catch_after_llm_generated(
|
||||
prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=model_name
|
||||
)
|
||||
|
||||
except Exception as llm_e:
|
||||
# 精简报错信息
|
||||
logger.error(f"{self.log_prefix}[Replier-{thinking_id}] LLM 生成失败: {llm_e}")
|
||||
return None # LLM 调用失败则无法生成回复
|
||||
|
||||
# 5. 处理 LLM 响应
|
||||
if not content:
|
||||
logger.warning(f"{self.log_prefix}[Replier-{thinking_id}] LLM 生成了空内容。")
|
||||
return None
|
||||
|
||||
processed_response = process_llm_response(content)
|
||||
|
||||
if not processed_response:
|
||||
logger.warning(f"{self.log_prefix}[Replier-{thinking_id}] 处理后的回复为空。")
|
||||
return None
|
||||
|
||||
return processed_response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}[Replier-{thinking_id}] 回复生成意外失败: {e}")
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
# --- 发送器 (Sender) --- #
|
||||
|
||||
async def _send_response_messages(
|
||||
self, anchor_message: Optional[MessageRecv], response_set: List[str], thinking_id: str
|
||||
) -> Optional[MessageSending]:
|
||||
"""发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender"""
|
||||
if not anchor_message or not anchor_message.chat_stream:
|
||||
logger.error(f"{self.log_prefix} 无法发送回复,缺少有效的锚点消息或聊天流。")
|
||||
return None
|
||||
|
||||
chat = self.chat_stream
|
||||
chat_id = self.chat_id
|
||||
stream_name = chat_manager.get_stream_name(chat_id) or chat_id # 获取流名称用于日志
|
||||
|
||||
# 检查思考过程是否仍在进行,并获取开始时间
|
||||
thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(chat_id, thinking_id)
|
||||
|
||||
if thinking_start_time is None:
|
||||
logger.warning(f"[{stream_name}] {thinking_id} 思考过程未找到或已结束,无法发送回复。")
|
||||
return None
|
||||
|
||||
mark_head = False
|
||||
first_bot_msg: Optional[MessageSending] = None
|
||||
reply_message_ids = [] # 记录实际发送的消息ID
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.BOT_QQ,
|
||||
user_nickname=global_config.BOT_NICKNAME,
|
||||
platform=chat.platform,
|
||||
)
|
||||
|
||||
for i, msg_text in enumerate(response_set):
|
||||
# 为每个消息片段生成唯一ID
|
||||
part_message_id = f"{thinking_id}_{i}"
|
||||
message_segment = Seg(type="text", data=msg_text)
|
||||
bot_message = MessageSending(
|
||||
message_id=part_message_id, # 使用片段的唯一ID
|
||||
chat_stream=chat,
|
||||
bot_user_info=bot_user_info,
|
||||
sender_info=anchor_message.message_info.user_info,
|
||||
message_segment=message_segment,
|
||||
reply=anchor_message, # 回复原始锚点
|
||||
is_head=not mark_head,
|
||||
is_emoji=False,
|
||||
thinking_start_time=thinking_start_time, # 传递原始思考开始时间
|
||||
)
|
||||
try:
|
||||
if not mark_head:
|
||||
mark_head = True
|
||||
first_bot_msg = bot_message # 保存第一个成功发送的消息对象
|
||||
await self.heart_fc_sender.type_and_send_message(bot_message, typing=False)
|
||||
else:
|
||||
await self.heart_fc_sender.type_and_send_message(bot_message, typing=True)
|
||||
|
||||
reply_message_ids.append(part_message_id) # 记录我们生成的ID
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"{self.log_prefix}[Sender-{thinking_id}] 发送回复片段 {i} ({part_message_id}) 时失败: {e}"
|
||||
)
|
||||
# 这里可以选择是继续发送下一个片段还是中止
|
||||
|
||||
# 在尝试发送完所有片段后,完成原始的 thinking_id 状态
|
||||
try:
|
||||
await self.heart_fc_sender.complete_thinking(chat_id, thinking_id)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}[Sender-{thinking_id}] 完成思考状态 {thinking_id} 时出错: {e}")
|
||||
|
||||
return first_bot_msg # 返回第一个成功发送的消息对象
|
||||
|
||||
async def _handle_emoji(self, anchor_message: Optional[MessageRecv], response_set: List[str], send_emoji: str = ""):
|
||||
"""处理表情包 (尝试锚定到 anchor_message),使用 HeartFCSender"""
|
||||
if not anchor_message or not anchor_message.chat_stream:
|
||||
logger.error(f"{self.log_prefix} 无法处理表情包,缺少有效的锚点消息或聊天流。")
|
||||
return
|
||||
|
||||
chat = anchor_message.chat_stream
|
||||
|
||||
emoji_raw = await emoji_manager.get_emoji_for_text(send_emoji)
|
||||
|
||||
if emoji_raw:
|
||||
emoji_path, description = emoji_raw
|
||||
|
||||
emoji_cq = image_path_to_base64(emoji_path)
|
||||
thinking_time_point = round(time.time(), 2) # 用于唯一ID
|
||||
message_segment = Seg(type="emoji", data=emoji_cq)
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.BOT_QQ,
|
||||
user_nickname=global_config.BOT_NICKNAME,
|
||||
platform=anchor_message.message_info.platform,
|
||||
)
|
||||
bot_message = MessageSending(
|
||||
message_id="me" + str(thinking_time_point), # 表情消息的唯一ID
|
||||
chat_stream=chat,
|
||||
bot_user_info=bot_user_info,
|
||||
sender_info=anchor_message.message_info.user_info,
|
||||
message_segment=message_segment,
|
||||
reply=anchor_message, # 回复原始锚点
|
||||
is_head=False, # 表情通常不是头部消息
|
||||
is_emoji=True,
|
||||
# 不需要 thinking_start_time
|
||||
)
|
||||
|
||||
try:
|
||||
await self.heart_fc_sender.send_and_store(bot_message)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 发送表情包 {bot_message.message_info.message_id} 时失败: {e}")
|
||||
@@ -4,7 +4,7 @@ import json
|
||||
from typing import List, Optional, Dict, Any
|
||||
|
||||
|
||||
class CycleInfo:
|
||||
class CycleDetail:
|
||||
"""循环信息记录类"""
|
||||
|
||||
def __init__(self, cycle_id: int):
|
||||
@@ -70,9 +70,12 @@ class CycleInfo:
|
||||
"""完成循环,记录结束时间"""
|
||||
self.end_time = time.time()
|
||||
|
||||
def set_action_info(self, action_type: str, reasoning: str, action_taken: bool):
|
||||
def set_action_info(
|
||||
self, action_type: str, reasoning: str, action_taken: bool, action_data: Optional[Dict[str, Any]] = None
|
||||
):
|
||||
"""设置动作信息"""
|
||||
self.action_type = action_type
|
||||
self.action_data = action_data
|
||||
self.reasoning = reasoning
|
||||
self.action_taken = action_taken
|
||||
|
||||
@@ -143,7 +146,7 @@ class CycleInfo:
|
||||
self.planner_info["parsed_result"] = parsed_result
|
||||
|
||||
@staticmethod
|
||||
def save_to_file(cycle_info: 'CycleInfo', stream_id: str, base_dir: str = "log_debug") -> str:
|
||||
def save_to_file(cycle_info: "CycleDetail", stream_id: str, base_dir: str = "log_debug") -> str:
|
||||
"""
|
||||
将CycleInfo保存到文件
|
||||
|
||||
@@ -169,7 +172,7 @@ class CycleInfo:
|
||||
cycle_data = cycle_info.to_dict()
|
||||
|
||||
# 格式化输出成易读的格式
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
# 写入基本信息
|
||||
f.write(f"循环ID: {cycle_info.cycle_id}\n")
|
||||
f.write(f"开始时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(cycle_info.start_time))}\n")
|
||||
@@ -194,13 +197,13 @@ class CycleInfo:
|
||||
# 写入响应信息
|
||||
f.write("== 响应信息 ==\n")
|
||||
f.write(f"锚点消息ID: {cycle_info.response_info['anchor_message_id']}\n")
|
||||
if cycle_info.response_info['response_text']:
|
||||
if cycle_info.response_info["response_text"]:
|
||||
f.write("回复文本:\n")
|
||||
for i, text in enumerate(cycle_info.response_info['response_text']):
|
||||
f.write(f" [{i+1}] {text}\n")
|
||||
if cycle_info.response_info['emoji_info']:
|
||||
for i, text in enumerate(cycle_info.response_info["response_text"]):
|
||||
f.write(f" [{i + 1}] {text}\n")
|
||||
if cycle_info.response_info["emoji_info"]:
|
||||
f.write(f"表情信息: {cycle_info.response_info['emoji_info']}\n")
|
||||
if cycle_info.response_info['reply_message_ids']:
|
||||
if cycle_info.response_info["reply_message_ids"]:
|
||||
f.write(f"回复消息ID: {', '.join(cycle_info.response_info['reply_message_ids'])}\n")
|
||||
f.write("\n")
|
||||
|
||||
@@ -213,15 +216,15 @@ class CycleInfo:
|
||||
|
||||
# 写入ToolUse信息
|
||||
f.write("== 工具使用信息 ==\n")
|
||||
if cycle_info.tooluse_info['tools_used']:
|
||||
if cycle_info.tooluse_info["tools_used"]:
|
||||
f.write(f"使用的工具: {', '.join(cycle_info.tooluse_info['tools_used'])}\n")
|
||||
else:
|
||||
f.write("未使用工具\n")
|
||||
|
||||
if cycle_info.tooluse_info['tool_results']:
|
||||
if cycle_info.tooluse_info["tool_results"]:
|
||||
f.write("工具结果:\n")
|
||||
for i, result in enumerate(cycle_info.tooluse_info['tool_results']):
|
||||
f.write(f" [{i+1}] 类型: {result.get('type', '未知')}, 内容: {result.get('content', '')}\n")
|
||||
for i, result in enumerate(cycle_info.tooluse_info["tool_results"]):
|
||||
f.write(f" [{i + 1}] 类型: {result.get('type', '未知')}, 内容: {result.get('content', '')}\n")
|
||||
f.write("\n")
|
||||
f.write("工具执行 Prompt:\n")
|
||||
f.write(f"{cycle_info.tooluse_info['prompt']}\n\n")
|
||||
@@ -257,15 +260,15 @@ class CycleInfo:
|
||||
return None
|
||||
|
||||
# 尝试从文件末尾读取JSON数据
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
# 查找"解析结果:"后的JSON数据
|
||||
for i, line in enumerate(lines):
|
||||
if "解析结果:" in line and i+1 < len(lines):
|
||||
if "解析结果:" in line and i + 1 < len(lines):
|
||||
# 尝试解析后面的行
|
||||
json_data = ""
|
||||
for j in range(i+1, len(lines)):
|
||||
for j in range(i + 1, len(lines)):
|
||||
json_data += lines[j]
|
||||
|
||||
try:
|
||||
@@ -296,8 +299,11 @@ class CycleInfo:
|
||||
if not os.path.exists(stream_dir):
|
||||
return []
|
||||
|
||||
files = [os.path.join(stream_dir, f) for f in os.listdir(stream_dir)
|
||||
if f.startswith("cycle_") and f.endswith(".txt")]
|
||||
files = [
|
||||
os.path.join(stream_dir, f)
|
||||
for f in os.listdir(stream_dir)
|
||||
if f.startswith("cycle_") and f.endswith(".txt")
|
||||
]
|
||||
return sorted(files)
|
||||
except Exception as e:
|
||||
print(f"列出循环文件时出错: {e}")
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -99,9 +99,13 @@ class HeartFCSender:
|
||||
_ = message.update_thinking_time()
|
||||
|
||||
# --- 条件应用 set_reply 逻辑 ---
|
||||
if message.apply_set_reply_logic and message.is_head and not message.is_private_message():
|
||||
if (
|
||||
message.is_head
|
||||
and not message.is_private_message()
|
||||
and message.reply.processed_plain_text != "[System Trigger Context]"
|
||||
):
|
||||
logger.debug(f"[{chat_id}] 应用 set_reply 逻辑: {message.processed_plain_text[:20]}...")
|
||||
message.set_reply()
|
||||
message.set_reply(message.reply)
|
||||
# --- 结束条件 set_reply ---
|
||||
|
||||
await message.process()
|
||||
|
||||
@@ -15,29 +15,51 @@ from ..memory_system.Hippocampus import HippocampusManager
|
||||
from ..schedule.schedule_generator import bot_schedule
|
||||
from ..knowledge.knowledge_lib import qa_manager
|
||||
import traceback
|
||||
from .heartFC_Cycleinfo import CycleInfo
|
||||
from .heartFC_Cycleinfo import CycleDetail
|
||||
|
||||
|
||||
logger = get_logger("prompt")
|
||||
|
||||
|
||||
def init_prompt():
|
||||
# Prompt(
|
||||
# """
|
||||
# {info_from_tools}
|
||||
# {chat_target}
|
||||
# {chat_talking_prompt}
|
||||
# 现在你想要在群里发言或者回复。\n
|
||||
# 你需要扮演一位网名叫{bot_name}的人进行回复,这个人的特点是:"{prompt_personality}"。
|
||||
# 你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,你可以参考贴吧,知乎或者微博的回复风格。
|
||||
# 看到以上聊天记录,你刚刚在想:
|
||||
|
||||
# {current_mind_info}
|
||||
# 因为上述想法,你决定发言,原因是:{reason}
|
||||
# 依照这些内容组织回复:{in_mind_reply},不要原句回复,根据下面的要求,对其进行修改
|
||||
# 要求:是尽量简短一些。把握聊天内容,{reply_style2}。不要复读自己说的话。{prompt_ger}
|
||||
# {reply_style1},说中文,不要刻意突出自身学科背景。
|
||||
# {moderation_prompt}。不要浮夸,平淡一些。
|
||||
# 注意:回复不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。""",
|
||||
# "heart_flow_prompt",
|
||||
# )
|
||||
Prompt(
|
||||
"""
|
||||
{info_from_tools}
|
||||
你可以参考以下的语言习惯:
|
||||
当表示惊叹时,使用 我嘞个xxxx
|
||||
当表示惊讶无语是,使用 不是?
|
||||
当表示无语时,使用 阿这
|
||||
当表示震惊时,使用 卧槽
|
||||
|
||||
{chat_target}
|
||||
{chat_talking_prompt}
|
||||
现在你想要在群里发言或者回复。\n
|
||||
你需要扮演一位网名叫{bot_name}的人进行回复,这个人的特点是:"{prompt_personality}"。
|
||||
你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,你可以参考贴吧,知乎或者微博的回复风格。
|
||||
看到以上聊天记录,你刚刚在想:
|
||||
|
||||
{current_mind_info}
|
||||
因为上述想法,你决定发言,原因是:{reason}
|
||||
依照这些内容组织回复:{in_mind_reply},不要原句回复,根据下面的要求,对其进行修改
|
||||
要求:是尽量简短一些。把握聊天内容,{reply_style2}。不要复读自己说的话。{prompt_ger}
|
||||
{reply_style1},说中文,不要刻意突出自身学科背景。
|
||||
{moderation_prompt}。不要浮夸,平淡一些。
|
||||
你想表达:{in_mind_reply}
|
||||
原因是:{reason}
|
||||
请根据你想表达的内容,参考上述语言习惯,和下面的要求,给出回复
|
||||
回复要求:
|
||||
尽量简短一些。{reply_style2}。{prompt_ger}
|
||||
{reply_style1},说中文,不要刻意突出自身学科背景。不要浮夸,平淡一些。
|
||||
注意:回复不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。""",
|
||||
"heart_flow_prompt",
|
||||
)
|
||||
@@ -71,14 +93,19 @@ def init_prompt():
|
||||
2. 回复(reply)适用:
|
||||
- 有实质性内容需要表达
|
||||
- 有人提到你,但你还没有回应他
|
||||
- 可以追加emoji_query表达情绪(emoji_query填写表情包的适用场合,也就是当前场合)
|
||||
- 不要追加太多表情
|
||||
- 在合适的时候添加表情(不要总是添加)
|
||||
- 如果你要回复特定某人的某句话,或者你想回复较早的消息,请在target中指定那句话的原始文本
|
||||
|
||||
3. 回复要求:
|
||||
3. 回复target选择:
|
||||
-如果选择了target,不用特别提到某个人的人名
|
||||
- 除非有明确的回复目标,否则不要添加target
|
||||
|
||||
4. 回复要求:
|
||||
-不要太浮夸
|
||||
-一次只回复一个人
|
||||
-一次只回复一个话题
|
||||
|
||||
4. 自我对话处理:
|
||||
5. 自我对话处理:
|
||||
- 如果是自己发的消息想继续,需自然衔接
|
||||
- 避免重复或评价自己的发言
|
||||
- 不要和自己聊天
|
||||
@@ -95,8 +122,9 @@ def init_prompt():
|
||||
如果选择reply,请按以下JSON格式返回:
|
||||
{{
|
||||
"action": "reply",
|
||||
"text": ["第一段文本", "第二段文本"], // 可选,如果想发送文本
|
||||
"emojis": ["表情关键词1", "表情关键词2"] // 可选,如果想发送表情
|
||||
"text": "你想表达的内容",
|
||||
"emojis": "表情关键词",
|
||||
"target": "你想要回复的原始文本内容(非必须,仅文本,不包含发送者)",
|
||||
"reasoning": "你的决策理由",
|
||||
}}
|
||||
|
||||
@@ -196,7 +224,9 @@ def init_prompt():
|
||||
)
|
||||
|
||||
|
||||
async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_stream, sender_name, in_mind_reply) -> str:
|
||||
async def _build_prompt_focus(
|
||||
reason, current_mind_info, structured_info, chat_stream, sender_name, in_mind_reply
|
||||
) -> str:
|
||||
individuality = Individuality.get_instance()
|
||||
prompt_personality = individuality.get_prompt(x_person=0, level=2)
|
||||
|
||||
@@ -265,19 +295,20 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
info_from_tools=structured_info_prompt,
|
||||
# info_from_tools=structured_info_prompt,
|
||||
chat_target=chat_target_1, # Used in group template
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
# chat_talking_prompt=chat_talking_prompt,
|
||||
bot_name=global_config.BOT_NICKNAME,
|
||||
prompt_personality=prompt_personality,
|
||||
# prompt_personality=prompt_personality,
|
||||
prompt_personality="",
|
||||
chat_target_2=chat_target_2, # Used in group template
|
||||
current_mind_info=current_mind_info,
|
||||
# current_mind_info=current_mind_info,
|
||||
reply_style2=reply_style2_chosen,
|
||||
reply_style1=reply_style1_chosen,
|
||||
reason=reason,
|
||||
in_mind_reply=in_mind_reply,
|
||||
prompt_ger=prompt_ger,
|
||||
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
|
||||
# moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
|
||||
# sender_name is not used in the group template
|
||||
)
|
||||
else: # Private chat
|
||||
@@ -766,11 +797,11 @@ class PromptBuilder:
|
||||
self,
|
||||
is_group_chat: bool, # Now passed as argument
|
||||
chat_target_info: Optional[dict], # Now passed as argument
|
||||
cycle_history: Deque["CycleInfo"], # Now passed as argument (Type hint needs import or string)
|
||||
observed_messages_str: str,
|
||||
current_mind: Optional[str],
|
||||
structured_info: Dict[str, Any],
|
||||
current_available_actions: Dict[str, str],
|
||||
cycle_info: Optional[str],
|
||||
# replan_prompt: str, # Replan logic still simplified
|
||||
) -> str:
|
||||
"""构建 Planner LLM 的提示词 (获取模板并填充数据)"""
|
||||
@@ -809,35 +840,6 @@ class PromptBuilder:
|
||||
else:
|
||||
current_mind_block = "你的内心想法:\n[没有特别的想法]"
|
||||
|
||||
# Cycle info block (using passed cycle_history)
|
||||
cycle_info_block = ""
|
||||
recent_active_cycles = []
|
||||
for cycle in reversed(cycle_history):
|
||||
if cycle.action_taken:
|
||||
recent_active_cycles.append(cycle)
|
||||
if len(recent_active_cycles) == 3:
|
||||
break
|
||||
consecutive_text_replies = 0
|
||||
responses_for_prompt = []
|
||||
for cycle in recent_active_cycles:
|
||||
if cycle.action_type == "text_reply":
|
||||
consecutive_text_replies += 1
|
||||
response_text = cycle.response_info.get("response_text", [])
|
||||
formatted_response = "[空回复]" if not response_text else " ".join(response_text)
|
||||
responses_for_prompt.append(formatted_response)
|
||||
else:
|
||||
break
|
||||
if consecutive_text_replies >= 3:
|
||||
cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意'
|
||||
elif consecutive_text_replies == 2:
|
||||
cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意'
|
||||
elif consecutive_text_replies == 1:
|
||||
cycle_info_block = f'你刚刚已经回复一条消息(内容: "{responses_for_prompt[0]}")'
|
||||
if cycle_info_block:
|
||||
cycle_info_block = f"\n【近期回复历史】\n{cycle_info_block}\n"
|
||||
else:
|
||||
cycle_info_block = "\n【近期回复历史】\n(最近没有连续文本回复)\n"
|
||||
|
||||
individuality = Individuality.get_instance()
|
||||
prompt_personality = individuality.get_prompt(x_person=2, level=2)
|
||||
|
||||
@@ -857,7 +859,7 @@ class PromptBuilder:
|
||||
structured_info_block=structured_info_block,
|
||||
chat_content_block=chat_content_block,
|
||||
current_mind_block=current_mind_block,
|
||||
cycle_info_block=cycle_info_block,
|
||||
cycle_info_block=cycle_info,
|
||||
action_options_text=action_options_text,
|
||||
# example_action=example_action_key,
|
||||
)
|
||||
@@ -872,7 +874,7 @@ class PromptBuilder:
|
||||
self,
|
||||
is_group_chat: bool,
|
||||
chat_target_info: Optional[dict],
|
||||
cycle_history: Deque["CycleInfo"],
|
||||
cycle_history: Deque["CycleDetail"],
|
||||
observed_messages_str: str,
|
||||
structured_info: str,
|
||||
current_available_actions: Dict[str, str],
|
||||
|
||||
44
src/plugins/heartFC_chat/hfc_utils.py
Normal file
44
src/plugins/heartFC_chat/hfc_utils.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import time
|
||||
import traceback
|
||||
from typing import Optional
|
||||
from src.plugins.chat.message import MessageRecv, BaseMessageInfo
|
||||
from src.plugins.chat.chat_stream import ChatStream
|
||||
from src.plugins.chat.message import UserInfo
|
||||
from src.common.logger_manager import get_logger
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
async def _create_empty_anchor_message(
|
||||
platform: str, group_info: dict, chat_stream: ChatStream
|
||||
) -> Optional[MessageRecv]:
|
||||
"""
|
||||
重构观察到的最后一条消息作为回复的锚点,
|
||||
如果重构失败或观察为空,则创建一个占位符。
|
||||
"""
|
||||
|
||||
try:
|
||||
placeholder_id = f"mid_pf_{int(time.time() * 1000)}"
|
||||
placeholder_user = UserInfo(user_id="system_trigger", user_nickname="System Trigger", platform=platform)
|
||||
placeholder_msg_info = BaseMessageInfo(
|
||||
message_id=placeholder_id,
|
||||
platform=platform,
|
||||
group_info=group_info,
|
||||
user_info=placeholder_user,
|
||||
time=time.time(),
|
||||
)
|
||||
placeholder_msg_dict = {
|
||||
"message_info": placeholder_msg_info.to_dict(),
|
||||
"processed_plain_text": "[System Trigger Context]",
|
||||
"raw_message": "",
|
||||
"time": placeholder_msg_info.time,
|
||||
}
|
||||
anchor_message = MessageRecv(placeholder_msg_dict)
|
||||
anchor_message.update_chat_stream(chat_stream)
|
||||
logger.debug(f"创建占位符锚点消息: ID={anchor_message.message_info.message_id}")
|
||||
return anchor_message
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting/creating anchor message: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
return None
|
||||
48
src/plugins/heartFC_chat/info_processors/base_processor.py
Normal file
48
src/plugins/heartFC_chat/info_processors/base_processor.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Any, Optional
|
||||
from src.heart_flow.info.info_base import InfoBase
|
||||
from src.heart_flow.chatting_observation import Observation
|
||||
from src.common.logger_manager import get_logger
|
||||
|
||||
logger = get_logger("base_processor")
|
||||
|
||||
|
||||
class BaseProcessor(ABC):
|
||||
"""信息处理器基类
|
||||
|
||||
所有具体的信息处理器都应该继承这个基类,并实现process_info方法。
|
||||
支持处理InfoBase和Observation类型的输入。
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self):
|
||||
"""初始化处理器"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def process_info(
|
||||
self, infos: List[InfoBase], observations: Optional[List[Observation]] = None, **kwargs: Any
|
||||
) -> List[InfoBase]:
|
||||
"""处理信息对象的抽象方法
|
||||
|
||||
Args:
|
||||
infos: InfoBase对象列表
|
||||
observations: 可选的Observation对象列表
|
||||
**kwargs: 其他可选参数
|
||||
|
||||
Returns:
|
||||
List[InfoBase]: 处理后的InfoBase实例列表
|
||||
"""
|
||||
pass
|
||||
|
||||
def _create_processed_item(self, info_type: str, info_data: Any) -> dict:
|
||||
"""创建处理后的信息项
|
||||
|
||||
Args:
|
||||
info_type: 信息类型
|
||||
info_data: 信息数据
|
||||
|
||||
Returns:
|
||||
dict: 处理后的信息项
|
||||
"""
|
||||
return {"type": info_type, "id": f"info_{info_type}", "content": info_data, "ttl": 3}
|
||||
@@ -0,0 +1,70 @@
|
||||
from typing import List, Optional, Any
|
||||
from src.heart_flow.info.obs_info import ObsInfo
|
||||
from src.heart_flow.chatting_observation import Observation
|
||||
from src.heart_flow.info.info_base import InfoBase
|
||||
from .base_processor import BaseProcessor
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.heart_flow.chatting_observation import ChattingObservation
|
||||
from src.heart_flow.hfcloop_observation import HFCloopObservation
|
||||
from src.heart_flow.info.cycle_info import CycleInfo
|
||||
|
||||
logger = get_logger("observation")
|
||||
|
||||
|
||||
class ChattingInfoProcessor(BaseProcessor):
|
||||
"""观察处理器
|
||||
|
||||
用于处理Observation对象,将其转换为ObsInfo对象。
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""初始化观察处理器"""
|
||||
super().__init__()
|
||||
|
||||
async def process_info(self, observations: Optional[List[Observation]] = None, **kwargs: Any) -> List[InfoBase]:
|
||||
"""处理Observation对象
|
||||
|
||||
Args:
|
||||
infos: InfoBase对象列表
|
||||
observations: 可选的Observation对象列表
|
||||
**kwargs: 其他可选参数
|
||||
|
||||
Returns:
|
||||
List[InfoBase]: 处理后的ObsInfo实例列表
|
||||
"""
|
||||
print(f"observations: {observations}")
|
||||
processed_infos = []
|
||||
|
||||
# 处理Observation对象
|
||||
if observations:
|
||||
for obs in observations:
|
||||
print(f"obs: {obs}")
|
||||
if isinstance(obs, ChattingObservation):
|
||||
obs_info = ObsInfo()
|
||||
|
||||
# 设置说话消息
|
||||
if hasattr(obs, "talking_message_str"):
|
||||
obs_info.set_talking_message(obs.talking_message_str)
|
||||
|
||||
# 设置截断后的说话消息
|
||||
if hasattr(obs, "talking_message_str_truncate"):
|
||||
obs_info.set_talking_message_str_truncate(obs.talking_message_str_truncate)
|
||||
|
||||
# 设置聊天类型
|
||||
is_group_chat = obs.is_group_chat
|
||||
if is_group_chat:
|
||||
chat_type = "group"
|
||||
else:
|
||||
chat_type = "private"
|
||||
obs_info.set_chat_target(obs.chat_target_info.get("person_name", "某人"))
|
||||
obs_info.set_chat_type(chat_type)
|
||||
|
||||
logger.debug(f"聊天信息处理器处理后的信息: {obs_info}")
|
||||
|
||||
processed_infos.append(obs_info)
|
||||
if isinstance(obs, HFCloopObservation):
|
||||
obs_info = CycleInfo()
|
||||
obs_info.set_observe_info(obs.observe_info)
|
||||
processed_infos.append(obs_info)
|
||||
|
||||
return processed_infos
|
||||
@@ -1,4 +1,4 @@
|
||||
from .observation import ChattingObservation
|
||||
from src.heart_flow.chatting_observation import ChattingObservation, Observation
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
import time
|
||||
@@ -6,17 +6,21 @@ import traceback
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.individuality.individuality import Individuality
|
||||
import random
|
||||
from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.do_tool.tool_use import ToolUser
|
||||
from src.plugins.utils.json_utils import safe_json_dumps, process_llm_tool_calls
|
||||
from src.heart_flow.chat_state_info import ChatStateInfo
|
||||
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.plugins.utils.json_utils import safe_json_dumps
|
||||
from src.plugins.chat.chat_stream import chat_manager
|
||||
from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo
|
||||
import difflib
|
||||
from src.plugins.person_info.relationship_manager import relationship_manager
|
||||
from src.plugins.memory_system.Hippocampus import HippocampusManager
|
||||
import jieba
|
||||
|
||||
from .base_processor import BaseProcessor
|
||||
from src.heart_flow.info.mind_info import MindInfo
|
||||
from typing import List, Optional
|
||||
from src.heart_flow.memory_observation import MemoryObservation
|
||||
from src.heart_flow.hfcloop_observation import HFCloopObservation
|
||||
from src.plugins.heartFC_chat.info_processors.processor_utils import (
|
||||
calculate_similarity,
|
||||
calculate_replacement_probability,
|
||||
get_spark,
|
||||
)
|
||||
|
||||
logger = get_logger("sub_heartflow")
|
||||
|
||||
@@ -67,43 +71,9 @@ def init_prompt():
|
||||
Prompt(private_prompt, "sub_heartflow_prompt_private_before")
|
||||
|
||||
|
||||
def calculate_similarity(text_a: str, text_b: str) -> float:
|
||||
"""
|
||||
计算两个文本字符串的相似度。
|
||||
"""
|
||||
if not text_a or not text_b:
|
||||
return 0.0
|
||||
matcher = difflib.SequenceMatcher(None, text_a, text_b)
|
||||
return matcher.ratio()
|
||||
|
||||
|
||||
def calculate_replacement_probability(similarity: float) -> float:
|
||||
"""
|
||||
根据相似度计算替换的概率。
|
||||
规则:
|
||||
- 相似度 <= 0.4: 概率 = 0
|
||||
- 相似度 >= 0.9: 概率 = 1
|
||||
- 相似度 == 0.6: 概率 = 0.7
|
||||
- 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.7)
|
||||
- 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.7) 到 (0.9, 1.0)
|
||||
"""
|
||||
if similarity <= 0.4:
|
||||
return 0.0
|
||||
elif similarity >= 0.9:
|
||||
return 1.0
|
||||
elif 0.4 < similarity <= 0.6:
|
||||
# p = 3.5 * s - 1.4
|
||||
probability = 3.5 * similarity - 1.4
|
||||
return max(0.0, probability)
|
||||
else: # 0.6 < similarity < 0.9
|
||||
# p = s + 0.1
|
||||
probability = similarity + 0.1
|
||||
return min(1.0, max(0.0, probability))
|
||||
|
||||
|
||||
class SubMind:
|
||||
def __init__(self, subheartflow_id: str, chat_state: ChatStateInfo, observations: ChattingObservation):
|
||||
self.last_active_time = None
|
||||
class MindProcessor(BaseProcessor):
|
||||
def __init__(self, subheartflow_id: str):
|
||||
super().__init__()
|
||||
self.subheartflow_id = subheartflow_id
|
||||
|
||||
self.llm_model = LLMRequest(
|
||||
@@ -113,9 +83,6 @@ class SubMind:
|
||||
request_type="sub_heart_flow",
|
||||
)
|
||||
|
||||
self.chat_state = chat_state
|
||||
self.observations = observations
|
||||
|
||||
self.current_mind = ""
|
||||
self.past_mind = []
|
||||
self.structured_info = []
|
||||
@@ -153,16 +120,28 @@ class SubMind:
|
||||
self.structured_info_str = "\n".join(lines)
|
||||
logger.debug(f"{self.log_prefix} 更新 structured_info_str: \n{self.structured_info_str}")
|
||||
|
||||
async def do_thinking_before_reply(self, history_cycle: list[CycleInfo] = None, parallel_mode: bool = True, no_tools: bool = True, return_prompt: bool = False, cycle_info: CycleInfo = None):
|
||||
async def process_info(self, observations: Optional[List[Observation]] = None, *infos) -> List[dict]:
|
||||
"""处理信息对象
|
||||
|
||||
Args:
|
||||
*infos: 可变数量的InfoBase类型的信息对象
|
||||
|
||||
Returns:
|
||||
List[dict]: 处理后的结构化信息列表
|
||||
"""
|
||||
current_mind = await self.do_thinking_before_reply(observations)
|
||||
|
||||
mind_info = MindInfo()
|
||||
mind_info.set_current_mind(current_mind)
|
||||
|
||||
return [mind_info]
|
||||
|
||||
async def do_thinking_before_reply(self, observations: Optional[List[Observation]] = None):
|
||||
"""
|
||||
在回复前进行思考,生成内心想法并收集工具调用结果
|
||||
|
||||
参数:
|
||||
history_cycle: 历史循环信息
|
||||
parallel_mode: 是否在并行模式下执行,默认为True
|
||||
no_tools: 是否禁用工具调用,默认为True
|
||||
return_prompt: 是否返回prompt,默认为False
|
||||
cycle_info: 循环信息对象,可用于记录详细执行信息
|
||||
observations: 观察信息
|
||||
|
||||
返回:
|
||||
如果return_prompt为False:
|
||||
@@ -170,8 +149,6 @@ class SubMind:
|
||||
如果return_prompt为True:
|
||||
tuple: (current_mind, past_mind, prompt) 当前想法、过去的想法列表和使用的prompt
|
||||
"""
|
||||
# 更新活跃时间
|
||||
self.last_active_time = time.time()
|
||||
|
||||
# ---------- 0. 更新和清理 structured_info ----------
|
||||
if self.structured_info:
|
||||
@@ -191,68 +168,25 @@ class SubMind:
|
||||
# ---------- 1. 准备基础数据 ----------
|
||||
# 获取现有想法和情绪状态
|
||||
previous_mind = self.current_mind if self.current_mind else ""
|
||||
mood_info = self.chat_state.mood
|
||||
|
||||
# 获取观察对象
|
||||
observation: ChattingObservation = self.observations[0] if self.observations else None
|
||||
if not observation or not hasattr(observation, "is_group_chat"): # Ensure it's ChattingObservation or similar
|
||||
logger.error(f"{self.log_prefix} 无法获取有效的观察对象或缺少聊天类型信息")
|
||||
self.update_current_mind("(观察出错了...)")
|
||||
return self.current_mind, self.past_mind
|
||||
|
||||
for observation in observations:
|
||||
if isinstance(observation, ChattingObservation):
|
||||
# 获取聊天元信息
|
||||
is_group_chat = observation.is_group_chat
|
||||
|
||||
chat_target_info = observation.chat_target_info
|
||||
chat_target_name = "对方" # Default for private
|
||||
chat_target_name = "对方" # 私聊默认名称
|
||||
if not is_group_chat and chat_target_info:
|
||||
# 优先使用person_name,其次user_nickname,最后回退到默认值
|
||||
chat_target_name = (
|
||||
chat_target_info.get("person_name") or chat_target_info.get("user_nickname") or chat_target_name
|
||||
)
|
||||
|
||||
# 获取观察内容
|
||||
# 获取聊天内容
|
||||
chat_observe_info = observation.get_observe_info()
|
||||
person_list = observation.person_list
|
||||
|
||||
# ---------- 2. 获取记忆 ----------
|
||||
try:
|
||||
# 从聊天内容中提取关键词
|
||||
chat_words = set(jieba.cut(chat_observe_info))
|
||||
# 过滤掉停用词和单字词
|
||||
keywords = [word for word in chat_words if len(word) > 1]
|
||||
# 去重并限制数量
|
||||
keywords = list(set(keywords))[:5]
|
||||
|
||||
logger.debug(f"{self.log_prefix} 提取的关键词: {keywords}")
|
||||
# 检查已有记忆,过滤掉已存在的主题
|
||||
existing_topics = set()
|
||||
for item in self.structured_info:
|
||||
if item["type"] == "memory":
|
||||
existing_topics.add(item["id"])
|
||||
|
||||
# 过滤掉已存在的主题
|
||||
filtered_keywords = [k for k in keywords if k not in existing_topics]
|
||||
|
||||
if not filtered_keywords:
|
||||
logger.debug(f"{self.log_prefix} 所有关键词对应的记忆都已存在,跳过记忆提取")
|
||||
else:
|
||||
# 调用记忆系统获取相关记忆
|
||||
related_memory = await HippocampusManager.get_instance().get_memory_from_topic(
|
||||
valid_keywords=filtered_keywords, max_memory_num=3, max_memory_length=2, max_depth=3
|
||||
)
|
||||
|
||||
logger.debug(f"{self.log_prefix} 获取到的记忆: {related_memory}")
|
||||
|
||||
if related_memory:
|
||||
for topic, memory in related_memory:
|
||||
new_item = {"type": "memory", "id": topic, "content": memory, "ttl": 3}
|
||||
self.structured_info.append(new_item)
|
||||
logger.debug(f"{self.log_prefix} 添加新记忆: {topic} - {memory}")
|
||||
else:
|
||||
logger.debug(f"{self.log_prefix} 没有找到相关记忆")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 获取记忆时出错: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
if isinstance(observation, MemoryObservation):
|
||||
memory_observe_info = observation.get_observe_info()
|
||||
if isinstance(observation, HFCloopObservation):
|
||||
hfcloop_observe_info = observation.get_observe_info()
|
||||
|
||||
# ---------- 3. 准备个性化数据 ----------
|
||||
# 获取个性化信息
|
||||
@@ -268,72 +202,9 @@ class SubMind:
|
||||
# 获取当前时间
|
||||
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
|
||||
# ---------- 4. 构建思考指导部分 ----------
|
||||
# 创建本地随机数生成器,基于分钟数作为种子
|
||||
local_random = random.Random()
|
||||
current_minute = int(time.strftime("%M"))
|
||||
local_random.seed(current_minute)
|
||||
|
||||
# 思考指导选项和权重
|
||||
hf_options = [
|
||||
("可以参考之前的想法,在原来想法的基础上继续思考", 0.2),
|
||||
("可以参考之前的想法,在原来的想法上尝试新的话题", 0.4),
|
||||
("不要太深入", 0.2),
|
||||
("进行深入思考", 0.2),
|
||||
]
|
||||
|
||||
# 准备循环信息块 (分析最近的活动循环)
|
||||
recent_active_cycles = []
|
||||
for cycle in reversed(history_cycle):
|
||||
# 只关心实际执行了动作的循环
|
||||
if cycle.action_taken:
|
||||
recent_active_cycles.append(cycle)
|
||||
# 最多找最近的3个活动循环
|
||||
if len(recent_active_cycles) == 3:
|
||||
break
|
||||
|
||||
cycle_info_block = ""
|
||||
consecutive_text_replies = 0
|
||||
responses_for_prompt = []
|
||||
|
||||
# 检查这最近的活动循环中有多少是连续的文本回复 (从最近的开始看)
|
||||
for cycle in recent_active_cycles:
|
||||
if cycle.action_type == "text_reply":
|
||||
consecutive_text_replies += 1
|
||||
# 获取回复内容,如果不存在则返回'[空回复]'
|
||||
response_text = cycle.response_info.get("response_text", [])
|
||||
# 使用简单的 join 来格式化回复内容列表
|
||||
formatted_response = "[空回复]" if not response_text else " ".join(response_text)
|
||||
responses_for_prompt.append(formatted_response)
|
||||
else:
|
||||
# 一旦遇到非文本回复,连续性中断
|
||||
break
|
||||
|
||||
# 根据连续文本回复的数量构建提示信息
|
||||
# 注意: responses_for_prompt 列表是从最近到最远排序的
|
||||
if consecutive_text_replies >= 3: # 如果最近的三个活动都是文本回复
|
||||
cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意'
|
||||
elif consecutive_text_replies == 2: # 如果最近的两个活动是文本回复
|
||||
cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意'
|
||||
elif consecutive_text_replies == 1: # 如果最近的一个活动是文本回复
|
||||
cycle_info_block = f'你刚刚已经回复一条消息(内容: "{responses_for_prompt[0]}")'
|
||||
|
||||
# 包装提示块,增加可读性,即使没有连续回复也给个标记
|
||||
if cycle_info_block:
|
||||
cycle_info_block = f"\n【近期回复历史】\n{cycle_info_block}\n"
|
||||
else:
|
||||
# 如果最近的活动循环不是文本回复,或者没有活动循环
|
||||
cycle_info_block = "\n【近期回复历史】\n(最近没有连续文本回复)\n"
|
||||
|
||||
# 加权随机选择思考指导
|
||||
hf_do_next = local_random.choices(
|
||||
[option[0] for option in hf_options], weights=[option[1] for option in hf_options], k=1
|
||||
)[0]
|
||||
spark_prompt = get_spark()
|
||||
|
||||
# ---------- 5. 构建最终提示词 ----------
|
||||
# --- 根据聊天类型选择模板 ---
|
||||
logger.debug(f"is_group_chat: {is_group_chat}")
|
||||
|
||||
template_name = "sub_heartflow_prompt_before" if is_group_chat else "sub_heartflow_prompt_private_before"
|
||||
logger.debug(f"{self.log_prefix} 使用{'群聊' if is_group_chat else '私聊'}思考模板")
|
||||
|
||||
@@ -344,31 +215,21 @@ class SubMind:
|
||||
bot_name=individuality.name,
|
||||
time_now=time_now,
|
||||
chat_observe_info=chat_observe_info,
|
||||
mood_info=mood_info,
|
||||
hf_do_next=hf_do_next,
|
||||
last_mind = previous_mind,
|
||||
cycle_info_block=cycle_info_block,
|
||||
mood_info="mood_info",
|
||||
hf_do_next=spark_prompt,
|
||||
last_mind=previous_mind,
|
||||
cycle_info_block=hfcloop_observe_info,
|
||||
chat_target_name=chat_target_name,
|
||||
)
|
||||
|
||||
# 在构建完提示词后,生成最终的prompt字符串
|
||||
final_prompt = prompt
|
||||
|
||||
# ---------- 6. 调用LLM ----------
|
||||
# 如果指定了cycle_info,记录structured_info和prompt
|
||||
if cycle_info:
|
||||
cycle_info.set_submind_info(
|
||||
prompt=final_prompt,
|
||||
structured_info=self.structured_info_str
|
||||
)
|
||||
|
||||
content = "" # 初始化内容变量
|
||||
|
||||
try:
|
||||
# 调用LLM生成响应
|
||||
response = await self.llm_model.generate_response_async(
|
||||
prompt=final_prompt
|
||||
)
|
||||
response, _ = await self.llm_model.generate_response_async(prompt=final_prompt)
|
||||
|
||||
# 直接使用LLM返回的文本响应作为 content
|
||||
content = response if response else ""
|
||||
@@ -380,15 +241,26 @@ class SubMind:
|
||||
content = "思考过程中出现错误"
|
||||
|
||||
# 记录初步思考结果
|
||||
logger.debug(f"{self.log_prefix} 初步心流思考结果: {content}\nprompt: {final_prompt}\n")
|
||||
logger.debug(f"{self.log_prefix} 思考prompt: \n{final_prompt}\n")
|
||||
|
||||
# 处理空响应情况
|
||||
if not content:
|
||||
content = "(不知道该想些什么...)"
|
||||
logger.warning(f"{self.log_prefix} LLM返回空结果,思考失败。")
|
||||
|
||||
# ---------- 7. 应用概率性去重和修饰 ----------
|
||||
new_content = content # 保存 LLM 直接输出的结果
|
||||
# ---------- 8. 更新思考状态并返回结果 ----------
|
||||
logger.info(f"{self.log_prefix} 思考结果: {content}")
|
||||
# 更新当前思考内容
|
||||
self.update_current_mind(content)
|
||||
|
||||
return content
|
||||
|
||||
def update_current_mind(self, response):
|
||||
if self.current_mind: # 只有当 current_mind 非空时才添加到 past_mind
|
||||
self.past_mind.append(self.current_mind)
|
||||
self.current_mind = response
|
||||
|
||||
def de_similar(self, previous_mind, new_content):
|
||||
try:
|
||||
similarity = calculate_similarity(previous_mind, new_content)
|
||||
replacement_prob = calculate_replacement_probability(similarity)
|
||||
@@ -422,7 +294,9 @@ class SubMind:
|
||||
else:
|
||||
# 相似度较高但非100%,执行标准去重逻辑
|
||||
logger.debug(f"{self.log_prefix} 执行概率性去重 (概率: {replacement_prob:.2f})...")
|
||||
logger.debug(f"{self.log_prefix} previous_mind类型: {type(previous_mind)}, new_content类型: {type(new_content)}")
|
||||
logger.debug(
|
||||
f"{self.log_prefix} previous_mind类型: {type(previous_mind)}, new_content类型: {type(new_content)}"
|
||||
)
|
||||
|
||||
matcher = difflib.SequenceMatcher(None, previous_mind, new_content)
|
||||
logger.debug(f"{self.log_prefix} matcher类型: {type(matcher)}")
|
||||
@@ -433,7 +307,9 @@ class SubMind:
|
||||
# 获取并记录所有匹配块
|
||||
matching_blocks = matcher.get_matching_blocks()
|
||||
logger.debug(f"{self.log_prefix} 匹配块数量: {len(matching_blocks)}")
|
||||
logger.debug(f"{self.log_prefix} 匹配块示例(前3个): {matching_blocks[:3] if len(matching_blocks) > 3 else matching_blocks}")
|
||||
logger.debug(
|
||||
f"{self.log_prefix} 匹配块示例(前3个): {matching_blocks[:3] if len(matching_blocks) > 3 else matching_blocks}"
|
||||
)
|
||||
|
||||
# get_matching_blocks()返回形如[(i, j, n), ...]的列表,其中i是a中的索引,j是b中的索引,n是匹配的长度
|
||||
for idx, match in enumerate(matching_blocks):
|
||||
@@ -449,9 +325,13 @@ class SubMind:
|
||||
# 确保添加的是字符串,而不是元组
|
||||
try:
|
||||
non_matching_part = new_content[last_match_end_in_b:j]
|
||||
logger.debug(f"{self.log_prefix} 添加非匹配部分: '{non_matching_part}', 类型: {type(non_matching_part)}")
|
||||
logger.debug(
|
||||
f"{self.log_prefix} 添加非匹配部分: '{non_matching_part}', 类型: {type(non_matching_part)}"
|
||||
)
|
||||
if not isinstance(non_matching_part, str):
|
||||
logger.warning(f"{self.log_prefix} 非匹配部分不是字符串类型: {type(non_matching_part)}")
|
||||
logger.warning(
|
||||
f"{self.log_prefix} 非匹配部分不是字符串类型: {type(non_matching_part)}"
|
||||
)
|
||||
non_matching_part = str(non_matching_part)
|
||||
deduplicated_parts.append(non_matching_part)
|
||||
except Exception as e:
|
||||
@@ -511,31 +391,7 @@ class SubMind:
|
||||
# 出错时保留原始 content
|
||||
content = new_content
|
||||
|
||||
# ---------- 8. 更新思考状态并返回结果 ----------
|
||||
logger.info(f"{self.log_prefix} 最终心流思考结果: {content}")
|
||||
# 更新当前思考内容
|
||||
self.update_current_mind(content)
|
||||
|
||||
# 在原始代码的return语句前,记录结果并根据return_prompt决定返回值
|
||||
if cycle_info:
|
||||
cycle_info.set_submind_info(
|
||||
result=content
|
||||
)
|
||||
|
||||
if return_prompt:
|
||||
return content, self.past_mind, final_prompt
|
||||
else:
|
||||
return content, self.past_mind
|
||||
|
||||
def update_current_mind(self, response):
|
||||
if self.current_mind: # 只有当 current_mind 非空时才添加到 past_mind
|
||||
self.past_mind.append(self.current_mind)
|
||||
# 可以考虑限制 past_mind 的大小,例如:
|
||||
# max_past_mind_size = 10
|
||||
# if len(self.past_mind) > max_past_mind_size:
|
||||
# self.past_mind.pop(0) # 移除最旧的
|
||||
|
||||
self.current_mind = response
|
||||
return content
|
||||
|
||||
|
||||
init_prompt()
|
||||
56
src/plugins/heartFC_chat/info_processors/processor_utils.py
Normal file
56
src/plugins/heartFC_chat/info_processors/processor_utils.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import difflib
|
||||
import random
|
||||
import time
|
||||
|
||||
|
||||
def calculate_similarity(text_a: str, text_b: str) -> float:
|
||||
"""
|
||||
计算两个文本字符串的相似度。
|
||||
"""
|
||||
if not text_a or not text_b:
|
||||
return 0.0
|
||||
matcher = difflib.SequenceMatcher(None, text_a, text_b)
|
||||
return matcher.ratio()
|
||||
|
||||
|
||||
def calculate_replacement_probability(similarity: float) -> float:
|
||||
"""
|
||||
根据相似度计算替换的概率。
|
||||
规则:
|
||||
- 相似度 <= 0.4: 概率 = 0
|
||||
- 相似度 >= 0.9: 概率 = 1
|
||||
- 相似度 == 0.6: 概率 = 0.7
|
||||
- 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.7)
|
||||
- 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.7) 到 (0.9, 1.0)
|
||||
"""
|
||||
if similarity <= 0.4:
|
||||
return 0.0
|
||||
elif similarity >= 0.9:
|
||||
return 1.0
|
||||
elif 0.4 < similarity <= 0.6:
|
||||
# p = 3.5 * s - 1.4
|
||||
probability = 3.5 * similarity - 1.4
|
||||
return max(0.0, probability)
|
||||
else: # 0.6 < similarity < 0.9
|
||||
# p = s + 0.1
|
||||
probability = similarity + 0.1
|
||||
return min(1.0, max(0.0, probability))
|
||||
|
||||
|
||||
def get_spark():
|
||||
local_random = random.Random()
|
||||
current_minute = int(time.strftime("%M"))
|
||||
local_random.seed(current_minute)
|
||||
|
||||
hf_options = [
|
||||
("可以参考之前的想法,在原来想法的基础上继续思考", 0.2),
|
||||
("可以参考之前的想法,在原来的想法上尝试新的话题", 0.4),
|
||||
("不要太深入", 0.2),
|
||||
("进行深入思考", 0.2),
|
||||
]
|
||||
# 加权随机选择思考指导
|
||||
hf_do_next = local_random.choices(
|
||||
[option[0] for option in hf_options], weights=[option[1] for option in hf_options], k=1
|
||||
)[0]
|
||||
|
||||
return hf_do_next
|
||||
200
src/plugins/heartFC_chat/info_processors/tool_processor.py
Normal file
200
src/plugins/heartFC_chat/info_processors/tool_processor.py
Normal file
@@ -0,0 +1,200 @@
|
||||
from src.heart_flow.chatting_observation import ChattingObservation
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
import time
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.individuality.individuality import Individuality
|
||||
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.do_tool.tool_use import ToolUser
|
||||
from src.plugins.utils.json_utils import process_llm_tool_calls
|
||||
from src.plugins.person_info.relationship_manager import relationship_manager
|
||||
from .base_processor import BaseProcessor
|
||||
from typing import List, Optional
|
||||
from src.heart_flow.chatting_observation import Observation
|
||||
from src.heart_flow.working_observation import WorkingObservation
|
||||
from src.heart_flow.info.structured_info import StructuredInfo
|
||||
|
||||
logger = get_logger("tool_use")
|
||||
|
||||
|
||||
def init_prompt():
|
||||
# ... 原有代码 ...
|
||||
|
||||
# 添加工具执行器提示词
|
||||
tool_executor_prompt = """
|
||||
你是一个专门执行工具的助手。你的名字是{bot_name}。现在是{time_now}。
|
||||
|
||||
你要在群聊中扮演以下角色:
|
||||
{prompt_personality}
|
||||
|
||||
你当前的额外信息:
|
||||
{extra_info}
|
||||
|
||||
你的心情是:{mood_info}
|
||||
|
||||
{relation_prompt}
|
||||
|
||||
群里正在进行的聊天内容:
|
||||
{chat_observe_info}
|
||||
|
||||
请仔细分析聊天内容,考虑以下几点:
|
||||
1. 内容中是否包含需要查询信息的问题
|
||||
2. 是否需要执行特定操作
|
||||
3. 是否有明确的工具使用指令
|
||||
4. 考虑用户与你的关系以及当前的对话氛围
|
||||
|
||||
如果需要使用工具,请直接调用相应的工具函数。如果不需要使用工具,请简单输出"无需使用工具"。
|
||||
尽量只在确实必要时才使用工具。
|
||||
"""
|
||||
Prompt(tool_executor_prompt, "tool_executor_prompt")
|
||||
|
||||
|
||||
class ToolProcessor(BaseProcessor):
|
||||
def __init__(self, subheartflow_id: str):
|
||||
super().__init__()
|
||||
self.subheartflow_id = subheartflow_id
|
||||
self.log_prefix = f"[{subheartflow_id}:ToolExecutor] "
|
||||
self.llm_model = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
max_tokens=500,
|
||||
request_type="tool_execution",
|
||||
)
|
||||
self.structured_info = []
|
||||
|
||||
async def process_info(self, observations: Optional[List[Observation]] = None, *infos) -> List[dict]:
|
||||
"""处理信息对象
|
||||
|
||||
Args:
|
||||
*infos: 可变数量的InfoBase类型的信息对象
|
||||
|
||||
Returns:
|
||||
list: 处理后的结构化信息列表
|
||||
"""
|
||||
|
||||
if observations:
|
||||
for observation in observations:
|
||||
if isinstance(observation, ChattingObservation):
|
||||
result, used_tools, prompt = await self.execute_tools(observation)
|
||||
|
||||
# 更新WorkingObservation中的结构化信息
|
||||
for observation in observations:
|
||||
if isinstance(observation, WorkingObservation):
|
||||
for structured_info in result:
|
||||
logger.debug(f"{self.log_prefix} 更新WorkingObservation中的结构化信息: {structured_info}")
|
||||
observation.add_structured_info(structured_info)
|
||||
|
||||
working_infos = observation.get_observe_info()
|
||||
logger.debug(f"{self.log_prefix} 获取更新后WorkingObservation中的结构化信息: {working_infos}")
|
||||
|
||||
structured_info = StructuredInfo()
|
||||
for working_info in working_infos:
|
||||
structured_info.set_info(working_info.get("type"), working_info.get("content"))
|
||||
|
||||
return [structured_info]
|
||||
|
||||
async def execute_tools(self, observation: ChattingObservation):
|
||||
"""
|
||||
并行执行工具,返回结构化信息
|
||||
|
||||
参数:
|
||||
sub_mind: 子思维对象
|
||||
chat_target_name: 聊天目标名称,默认为"对方"
|
||||
is_group_chat: 是否为群聊,默认为False
|
||||
return_details: 是否返回详细信息,默认为False
|
||||
cycle_info: 循环信息对象,可用于记录详细执行信息
|
||||
|
||||
返回:
|
||||
如果return_details为False:
|
||||
List[Dict]: 工具执行结果的结构化信息列表
|
||||
如果return_details为True:
|
||||
Tuple[List[Dict], List[str], str]: (工具执行结果列表, 使用的工具列表, 工具执行提示词)
|
||||
"""
|
||||
tool_instance = ToolUser()
|
||||
tools = tool_instance._define_tools()
|
||||
|
||||
logger.debug(f"observation: {observation}")
|
||||
logger.debug(f"observation.chat_target_info: {observation.chat_target_info}")
|
||||
logger.debug(f"observation.is_group_chat: {observation.is_group_chat}")
|
||||
logger.debug(f"observation.person_list: {observation.person_list}")
|
||||
|
||||
is_group_chat = observation.is_group_chat
|
||||
if not is_group_chat:
|
||||
chat_target_name = (
|
||||
observation.chat_target_info.get("person_name")
|
||||
or observation.chat_target_info.get("user_nickname")
|
||||
or "对方"
|
||||
)
|
||||
else:
|
||||
chat_target_name = "群聊"
|
||||
|
||||
chat_observe_info = observation.get_observe_info()
|
||||
person_list = observation.person_list
|
||||
|
||||
# 构建关系信息
|
||||
relation_prompt = "【关系信息】\n"
|
||||
for person in person_list:
|
||||
relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True)
|
||||
|
||||
# 获取个性信息
|
||||
individuality = Individuality.get_instance()
|
||||
prompt_personality = individuality.get_prompt(x_person=2, level=2)
|
||||
|
||||
# 获取心情信息
|
||||
mood_info = observation.chat_state.mood if hasattr(observation, "chat_state") else ""
|
||||
|
||||
# 获取时间信息
|
||||
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
|
||||
# 构建专用于工具调用的提示词
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"tool_executor_prompt",
|
||||
extra_info="extra_structured_info",
|
||||
chat_observe_info=chat_observe_info,
|
||||
# chat_target_name=chat_target_name,
|
||||
is_group_chat=is_group_chat,
|
||||
relation_prompt=relation_prompt,
|
||||
prompt_personality=prompt_personality,
|
||||
mood_info=mood_info,
|
||||
bot_name=individuality.name,
|
||||
time_now=time_now,
|
||||
)
|
||||
|
||||
# 调用LLM,专注于工具使用
|
||||
logger.info(f"开始执行工具调用{prompt}")
|
||||
response, _, tool_calls = await self.llm_model.generate_response_tool_async(prompt=prompt, tools=tools)
|
||||
|
||||
logger.debug(f"获取到工具原始输出:\n{tool_calls}")
|
||||
# 处理工具调用和结果收集,类似于SubMind中的逻辑
|
||||
new_structured_items = []
|
||||
used_tools = [] # 记录使用了哪些工具
|
||||
|
||||
if tool_calls:
|
||||
success, valid_tool_calls, error_msg = process_llm_tool_calls(tool_calls)
|
||||
if success and valid_tool_calls:
|
||||
for tool_call in valid_tool_calls:
|
||||
try:
|
||||
# 记录使用的工具名称
|
||||
tool_name = tool_call.get("name", "unknown_tool")
|
||||
used_tools.append(tool_name)
|
||||
|
||||
result = await tool_instance._execute_tool_call(tool_call)
|
||||
|
||||
name = result.get("type", "unknown_type")
|
||||
content = result.get("content", "")
|
||||
|
||||
logger.info(f"工具{name},获得信息:{content}")
|
||||
if result:
|
||||
new_item = {
|
||||
"type": result.get("type", "unknown_type"),
|
||||
"id": result.get("id", f"tool_exec_{time.time()}"),
|
||||
"content": result.get("content", ""),
|
||||
"ttl": 3,
|
||||
}
|
||||
new_structured_items.append(new_item)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}工具执行失败: {e}")
|
||||
|
||||
return new_structured_items, used_tools, prompt
|
||||
|
||||
|
||||
init_prompt()
|
||||
@@ -352,6 +352,9 @@ class NormalChat:
|
||||
# --- 新增:处理初始高兴趣消息的私有方法 ---
|
||||
async def _process_initial_interest_messages(self):
|
||||
"""处理启动时存在于 interest_dict 中的高兴趣消息。"""
|
||||
if not self.interest_dict:
|
||||
return # 如果 interest_dict 为 None 或空,直接返回
|
||||
|
||||
items_to_process = list(self.interest_dict.items())
|
||||
if not items_to_process:
|
||||
return # 没有初始消息,直接返回
|
||||
|
||||
Reference in New Issue
Block a user