remove:移除info_catcher

This commit is contained in:
SengokuCola
2025-06-09 16:08:44 +08:00
parent 97ffbe5145
commit 956af05454
13 changed files with 401 additions and 280 deletions

View File

@@ -12,7 +12,6 @@ from src.chat.utils.timer_calculator import Timer # <--- Import Timer
from src.chat.emoji_system.emoji_manager import emoji_manager
from src.chat.focus_chat.heartFC_sender import HeartFCSender
from src.chat.utils.utils import process_llm_response
from src.chat.utils.info_catcher import info_catcher_manager
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp
@@ -186,9 +185,6 @@ class DefaultExpressor:
# current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
# self.express_model.params["temperature"] = current_temp # 动态调整温度
# 2. 获取信息捕捉器
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
# --- Determine sender_name for private chat ---
sender_name_for_prompt = "某人" # Default for group or if info unavailable
if not self.is_group_chat and self.chat_target_info:
@@ -227,14 +223,10 @@ class DefaultExpressor:
# logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n")
content, (reasoning_content, model_name) = await self.express_model.generate_response_async(prompt)
# logger.info(f"{self.log_prefix}\nPrompt:\n{prompt}\n---------------------------\n")
logger.info(f"想要表达:{in_mind_reply}||理由:{reason}")
logger.info(f"最终回复: {content}\n")
info_catcher.catch_after_llm_generated(
prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=model_name
)
except Exception as llm_e:
# 精简报错信息

View File

@@ -110,7 +110,9 @@ class HeartFCSender:
message.set_reply()
logger.debug(f"[{chat_id}] 应用 set_reply 逻辑: {message.processed_plain_text[:20]}...")
# print(f"message.display_message: {message.display_message}")
await message.process()
# print(f"message.display_message: {message.display_message}")
if typing:
if has_thinking:

View File

@@ -12,7 +12,6 @@ from src.chat.utils.timer_calculator import Timer # <--- Import Timer
from src.chat.emoji_system.emoji_manager import emoji_manager
from src.chat.focus_chat.heartFC_sender import HeartFCSender
from src.chat.utils.utils import process_llm_response
from src.chat.utils.info_catcher import info_catcher_manager
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp
@@ -238,8 +237,6 @@ class DefaultReplyer:
# current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
# self.express_model.params["temperature"] = current_temp # 动态调整温度
# 2. 获取信息捕捉器
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
reply_to = action_data.get("reply_to", "none")
@@ -286,10 +283,6 @@ class DefaultReplyer:
# logger.info(f"prompt: {prompt}")
logger.info(f"最终回复: {content}")
info_catcher.catch_after_llm_generated(
prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=model_name
)
except Exception as llm_e:
# 精简报错信息
logger.error(f"{self.log_prefix}LLM 生成失败: {llm_e}")

View File

@@ -24,6 +24,7 @@ class MessageStorage:
else:
filtered_processed_plain_text = ""
if isinstance(message, MessageSending):
display_message = message.display_message
if display_message:

View File

@@ -8,7 +8,6 @@ from src.common.logger_manager import get_logger
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
from src.manager.mood_manager import mood_manager
from src.chat.message_receive.chat_stream import ChatStream, chat_manager
from src.chat.utils.info_catcher import info_catcher_manager
from src.chat.utils.timer_calculator import Timer
from src.chat.utils.prompt_builder import global_prompt_manager
from .normal_chat_generator import NormalChatGenerator
@@ -277,9 +276,6 @@ class NormalChat:
logger.debug(f"[{self.stream_name}] 创建捕捉器thinking_id:{thinking_id}")
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
info_catcher.catch_decide_to_response(message)
# 如果启用planner预先修改可用actions避免在并行任务中重复调用
available_actions = None
if self.enable_planner:
@@ -373,8 +369,6 @@ class NormalChat:
if isinstance(response_set, Exception):
logger.error(f"[{self.stream_name}] 回复生成异常: {response_set}")
response_set = None
elif response_set:
info_catcher.catch_after_generate_response(timing_results["并行生成回复和规划"])
# 处理规划结果(可选,不影响回复)
if isinstance(plan_result, Exception):
@@ -414,7 +408,6 @@ class NormalChat:
# 检查 first_bot_msg 是否为 None (例如思考消息已被移除的情况)
if first_bot_msg:
info_catcher.catch_after_response(timing_results["消息发送"], response_set, first_bot_msg)
# 记录回复信息到最近回复列表中
reply_info = {
@@ -447,8 +440,6 @@ class NormalChat:
# await self._check_switch_to_focus()
pass
info_catcher.done_catch()
with Timer("处理表情包", timing_results):
await self._handle_emoji(message, response_set[0])

View File

@@ -133,6 +133,7 @@ class NormalChatExpressor:
thinking_start_time=time.time(),
reply_to=mark_head,
is_emoji=is_emoji,
display_message=display_message,
)
logger.debug(f"{self.log_prefix} 添加{response_type}类型消息: {content}")
@@ -167,6 +168,7 @@ class NormalChatExpressor:
thinking_start_time: float,
reply_to: bool = False,
is_emoji: bool = False,
display_message: str = "",
) -> MessageSending:
"""构建发送消息
@@ -197,6 +199,7 @@ class NormalChatExpressor:
reply=anchor_message if reply_to else None,
thinking_start_time=thinking_start_time,
is_emoji=is_emoji,
display_message=display_message,
)
return message_sending

View File

@@ -6,7 +6,6 @@ from src.chat.message_receive.message import MessageThinking
from src.chat.normal_chat.normal_prompt import prompt_builder
from src.chat.utils.timer_calculator import Timer
from src.common.logger_manager import get_logger
from src.chat.utils.info_catcher import info_catcher_manager
from src.person_info.person_info import person_info_manager
from src.chat.utils.utils import process_llm_response
@@ -69,7 +68,6 @@ class NormalChatGenerator:
enable_planner: bool = False,
available_actions=None,
):
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
person_id = person_info_manager.get_person_id(
message.chat_stream.user_info.platform, message.chat_stream.user_info.user_id
@@ -105,9 +103,6 @@ class NormalChatGenerator:
logger.info(f"{message.processed_plain_text} 的回复:{content}")
info_catcher.catch_after_llm_generated(
prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=self.current_model_name
)
except Exception:
logger.exception("生成回复时出错")

View File

@@ -150,7 +150,7 @@ class NormalChatPlanner:
try:
content, (reasoning_content, model_name) = await self.planner_llm.generate_response_async(prompt)
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
# logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
logger.info(f"{self.log_prefix}规划器原始响应: {content}")
logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}")
logger.info(f"{self.log_prefix}规划器模型: {model_name}")

View File

@@ -1,223 +0,0 @@
from src.config.config import global_config
from src.chat.message_receive.message import MessageRecv, MessageSending, Message
from src.common.database.database_model import Messages, ThinkingLog
import time
import traceback
from typing import List
import json
class InfoCatcher:
def __init__(self):
self.chat_history = [] # 聊天历史,长度为三倍使用的上下文喵~
self.chat_history_in_thinking = [] # 思考期间的聊天内容喵~
self.chat_history_after_response = [] # 回复后的聊天内容,长度为一倍上下文喵~
self.chat_id = ""
self.trigger_response_text = ""
self.response_text = ""
self.trigger_response_time = 0
self.trigger_response_message = None
self.response_time = 0
self.response_messages = []
# 使用字典来存储 heartflow 模式的数据
self.heartflow_data = {
"heart_flow_prompt": "",
"sub_heartflow_before": "",
"sub_heartflow_now": "",
"sub_heartflow_after": "",
"sub_heartflow_model": "",
"prompt": "",
"response": "",
"model": "",
}
# 使用字典来存储 reasoning 模式的数据喵~
self.reasoning_data = {"thinking_log": "", "prompt": "", "response": "", "model": ""}
# 耗时喵~
self.timing_results = {
"interested_rate_time": 0,
"sub_heartflow_observe_time": 0,
"sub_heartflow_step_time": 0,
"make_response_time": 0,
}
def catch_decide_to_response(self, message: MessageRecv):
# 搜集决定回复时的信息
self.trigger_response_message = message
self.trigger_response_text = message.detailed_plain_text
self.trigger_response_time = time.time()
self.chat_id = message.chat_stream.stream_id
self.chat_history = self.get_message_from_db_before_msg(message)
def catch_after_observe(self, obs_duration: float): # 这里可以有更多信息
self.timing_results["sub_heartflow_observe_time"] = obs_duration
def catch_afer_shf_step(self, step_duration: float, past_mind: str, current_mind: str):
self.timing_results["sub_heartflow_step_time"] = step_duration
if len(past_mind) > 1:
self.heartflow_data["sub_heartflow_before"] = past_mind[-1]
self.heartflow_data["sub_heartflow_now"] = current_mind
else:
self.heartflow_data["sub_heartflow_before"] = past_mind[-1]
self.heartflow_data["sub_heartflow_now"] = current_mind
def catch_after_llm_generated(self, prompt: str, response: str, reasoning_content: str = "", model_name: str = ""):
self.reasoning_data["thinking_log"] = reasoning_content
self.reasoning_data["prompt"] = prompt
self.reasoning_data["response"] = response
self.reasoning_data["model"] = model_name
self.response_text = response
def catch_after_generate_response(self, response_duration: float):
self.timing_results["make_response_time"] = response_duration
def catch_after_response(
self, response_duration: float, response_message: List[str], first_bot_msg: MessageSending
):
self.timing_results["make_response_time"] = response_duration
self.response_time = time.time()
self.response_messages = []
for msg in response_message:
self.response_messages.append(msg)
self.chat_history_in_thinking = self.get_message_from_db_between_msgs(
self.trigger_response_message, first_bot_msg
)
@staticmethod
def get_message_from_db_between_msgs(message_start: Message, message_end: Message):
try:
time_start = message_start.message_info.time
time_end = message_end.message_info.time
chat_id = message_start.chat_stream.stream_id
# print(f"查询参数: time_start={time_start}, time_end={time_end}, chat_id={chat_id}")
messages_between_query = (
Messages.select()
.where((Messages.chat_id == chat_id) & (Messages.time > time_start) & (Messages.time < time_end))
.order_by(Messages.time.desc())
)
result = list(messages_between_query)
# print(f"查询结果数量: {len(result)}")
# if result:
# print(f"第一条消息时间: {result[0].time}")
# print(f"最后一条消息时间: {result[-1].time}")
return result
except Exception as e:
print(f"获取消息时出错: {str(e)}")
print(traceback.format_exc())
return []
def get_message_from_db_before_msg(self, message: MessageRecv):
message_id_val = message.message_info.message_id
chat_id_val = message.chat_stream.stream_id
messages_before_query = (
Messages.select()
.where((Messages.chat_id == chat_id_val) & (Messages.message_id < message_id_val))
.order_by(Messages.time.desc())
.limit(global_config.focus_chat.observation_context_size * 3)
)
return list(messages_before_query)
def message_list_to_dict(self, message_list):
result = []
for msg_item in message_list:
processed_msg_item = msg_item
if not isinstance(msg_item, dict):
processed_msg_item = self.message_to_dict(msg_item)
if not processed_msg_item:
continue
lite_message = {
"time": processed_msg_item.get("time"),
"user_nickname": processed_msg_item.get("user_nickname"),
"processed_plain_text": processed_msg_item.get("processed_plain_text"),
}
result.append(lite_message)
return result
@staticmethod
def message_to_dict(msg_obj):
if not msg_obj:
return None
if isinstance(msg_obj, dict):
return msg_obj
if isinstance(msg_obj, Messages):
return {
"time": msg_obj.time,
"user_id": msg_obj.user_id,
"user_nickname": msg_obj.user_nickname,
"processed_plain_text": msg_obj.processed_plain_text,
}
if hasattr(msg_obj, "message_info") and hasattr(msg_obj.message_info, "user_info"):
return {
"time": msg_obj.message_info.time,
"user_id": msg_obj.message_info.user_info.user_id,
"user_nickname": msg_obj.message_info.user_info.user_nickname,
"processed_plain_text": msg_obj.processed_plain_text,
}
print(f"Warning: message_to_dict received an unhandled type: {type(msg_obj)}")
return {}
def done_catch(self):
"""将收集到的信息存储到数据库的 thinking_log 表中喵~"""
try:
trigger_info_dict = self.message_to_dict(self.trigger_response_message)
response_info_dict = {
"time": self.response_time,
"message": self.response_messages,
}
chat_history_list = self.message_list_to_dict(self.chat_history)
chat_history_in_thinking_list = self.message_list_to_dict(self.chat_history_in_thinking)
chat_history_after_response_list = self.message_list_to_dict(self.chat_history_after_response)
log_entry = ThinkingLog(
chat_id=self.chat_id,
trigger_text=self.trigger_response_text,
response_text=self.response_text,
trigger_info_json=json.dumps(trigger_info_dict) if trigger_info_dict else None,
response_info_json=json.dumps(response_info_dict),
timing_results_json=json.dumps(self.timing_results),
chat_history_json=json.dumps(chat_history_list),
chat_history_in_thinking_json=json.dumps(chat_history_in_thinking_list),
chat_history_after_response_json=json.dumps(chat_history_after_response_list),
heartflow_data_json=json.dumps(self.heartflow_data),
reasoning_data_json=json.dumps(self.reasoning_data),
)
log_entry.save()
return True
except Exception as e:
print(f"存储思考日志时出错: {str(e)} 喵~")
print(traceback.format_exc())
return False
class InfoCatcherManager:
def __init__(self):
self.info_catchers = {}
def get_info_catcher(self, thinking_id: str) -> InfoCatcher:
if thinking_id not in self.info_catchers:
self.info_catchers[thinking_id] = InfoCatcher()
return self.info_catchers[thinking_id]
info_catcher_manager = InfoCatcherManager()