fix:拯救大兵ruff 2
This commit is contained in:
@@ -24,7 +24,9 @@ class ToolUser:
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def _build_tool_prompt(message_txt: str, chat_stream: ChatStream = None, observation: ChattingObservation = None):
|
||||
async def _build_tool_prompt(
|
||||
message_txt: str, chat_stream: ChatStream = None, observation: ChattingObservation = None
|
||||
):
|
||||
"""构建工具使用的提示词
|
||||
|
||||
Args:
|
||||
|
||||
@@ -15,6 +15,7 @@ import enum
|
||||
import os # 新增
|
||||
import json # 新增
|
||||
from src.plugins.chat.chat_stream import chat_manager # 新增
|
||||
|
||||
# --- Add imports for merged dependencies ---
|
||||
from src.plugins.heartFC_chat.heartFC_generator import ResponseGenerator
|
||||
from src.do_tool.tool_use import ToolUser
|
||||
@@ -34,6 +35,7 @@ if TYPE_CHECKING:
|
||||
from src.heart_flow.sub_heartflow import SubHeartflow, ChatState # Keep SubHeartflow here too
|
||||
# from src.plugins.heartFC_chat.heartFC_controler import HeartFCController # No longer needed
|
||||
|
||||
|
||||
def init_prompt():
|
||||
prompt = ""
|
||||
prompt += "你刚刚在做的事情是:{schedule_info}\n"
|
||||
@@ -109,7 +111,6 @@ class MaiState(enum.Enum):
|
||||
|
||||
class MaiStateInfo:
|
||||
def __init__(self):
|
||||
|
||||
# 使用枚举类型初始化状态,默认为正常聊天
|
||||
self.mai_status: MaiState = MaiState.OFFLINE
|
||||
self.mai_status_history = [] # 历史状态,包含 状态,最后时间
|
||||
@@ -148,11 +149,13 @@ class Heartflow:
|
||||
model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow"
|
||||
)
|
||||
|
||||
self._subheartflows: Dict[Any, 'SubHeartflow'] = {} # Update type hint
|
||||
self._subheartflows: Dict[Any, "SubHeartflow"] = {} # Update type hint
|
||||
|
||||
# --- Dependencies moved from HeartFCController ---
|
||||
self.gpt_instance = ResponseGenerator()
|
||||
self.mood_manager = MoodManager.get_instance() # Note: MaiStateInfo also has one, consider consolidating later if needed
|
||||
self.mood_manager = (
|
||||
MoodManager.get_instance()
|
||||
) # Note: MaiStateInfo also has one, consider consolidating later if needed
|
||||
self.tool_user_instance = ToolUser()
|
||||
self.emoji_manager_instance = emoji_manager # Module instance
|
||||
self.relationship_manager_instance = relationship_manager # Module instance
|
||||
@@ -340,10 +343,15 @@ class Heartflow:
|
||||
|
||||
for sub_hf in subflows_snapshot:
|
||||
# Double-check if subflow still exists and is in CHAT state
|
||||
if sub_hf.subheartflow_id in self._subheartflows and sub_hf.chat_state.chat_status == ChatState.CHAT:
|
||||
if (
|
||||
sub_hf.subheartflow_id in self._subheartflows
|
||||
and sub_hf.chat_state.chat_status == ChatState.CHAT
|
||||
):
|
||||
evaluated_count += 1
|
||||
if sub_hf.should_evaluate_reply():
|
||||
stream_name = chat_manager.get_stream_name(sub_hf.subheartflow_id) or sub_hf.subheartflow_id
|
||||
stream_name = (
|
||||
chat_manager.get_stream_name(sub_hf.subheartflow_id) or sub_hf.subheartflow_id
|
||||
)
|
||||
log_prefix = f"[{stream_name}]"
|
||||
logger.info(f"{log_prefix} 兴趣概率触发,尝试将状态从 CHAT 提升到 FOCUSED")
|
||||
# set_chat_state handles limit checks and HeartFChatting creation internally
|
||||
@@ -355,7 +363,9 @@ class Heartflow:
|
||||
# logger.trace(f"[{sub_hf.subheartflow_id}] In CHAT state, but should_evaluate_reply returned False.")
|
||||
|
||||
if evaluated_count > 0:
|
||||
logger.debug(f"[Heartflow Interest Eval] Evaluated {evaluated_count} CHAT flows. Promoted {promoted_count} to FOCUSED.")
|
||||
logger.debug(
|
||||
f"[Heartflow Interest Eval] Evaluated {evaluated_count} CHAT flows. Promoted {promoted_count} to FOCUSED."
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[Heartflow] 兴趣评估任务出错: {e}")
|
||||
@@ -431,7 +441,6 @@ class Heartflow:
|
||||
# logger.info(f"[Heartflow] 清理完成。没有流符合移除条件。当前数量: {initial_count}") # 减少日志噪音
|
||||
pass
|
||||
|
||||
|
||||
async def heartflow_start_working(self):
|
||||
# 启动清理任务 (使用新的 periodic_cleanup_task)
|
||||
if self._cleanup_task is None or self._cleanup_task.done():
|
||||
@@ -593,7 +602,7 @@ class Heartflow:
|
||||
return "(想法汇总时发生错误...)"
|
||||
|
||||
# --- Add helper method to count subflows by state --- #
|
||||
def count_subflows_by_state(self, target_state: 'ChatState') -> int:
|
||||
def count_subflows_by_state(self, target_state: "ChatState") -> int:
|
||||
"""Counts the number of subheartflows currently in the specified state."""
|
||||
count = 0
|
||||
# Use items() directly for read-only iteration if thread safety isn't a major concern here
|
||||
@@ -604,9 +613,10 @@ class Heartflow:
|
||||
if flow.subheartflow_id in self._subheartflows and flow.chat_state.chat_status == target_state:
|
||||
count += 1
|
||||
return count
|
||||
|
||||
# --- End helper method --- #
|
||||
|
||||
async def create_subheartflow(self, subheartflow_id: Any) -> Optional['SubHeartflow']:
|
||||
async def create_subheartflow(self, subheartflow_id: Any) -> Optional["SubHeartflow"]:
|
||||
"""
|
||||
获取或创建一个新的SubHeartflow实例。
|
||||
创建本身不受限,因为初始状态是ABSENT。
|
||||
@@ -627,7 +637,6 @@ class Heartflow:
|
||||
await observation.initialize()
|
||||
subheartflow.add_observation(observation)
|
||||
|
||||
|
||||
# 创建并存储后台任务 (SubHeartflow 自己的后台任务)
|
||||
subheartflow.task = asyncio.create_task(subheartflow.subheartflow_start_working())
|
||||
logger.debug(f"[Heartflow] 为 {subheartflow_id} 创建后台任务成功,添加 observation 成功")
|
||||
@@ -641,7 +650,7 @@ class Heartflow:
|
||||
logger.error(traceback.format_exc())
|
||||
return None
|
||||
|
||||
def get_subheartflow(self, observe_chat_id: Any) -> Optional['SubHeartflow']:
|
||||
def get_subheartflow(self, observe_chat_id: Any) -> Optional["SubHeartflow"]:
|
||||
"""获取指定ID的SubHeartflow实例"""
|
||||
return self._subheartflows.get(observe_chat_id)
|
||||
|
||||
@@ -692,7 +701,9 @@ class Heartflow:
|
||||
"""根据当前的 MaiState 强制执行 SubHeartflow 数量限制"""
|
||||
normal_limit = current_mai_state.get_normal_chat_max_num()
|
||||
focused_limit = current_mai_state.get_focused_chat_max_num()
|
||||
logger.debug(f"[Heartflow Limits] 执行限制检查。当前状态: {current_mai_state.value}, Normal上限: {normal_limit}, Focused上限: {focused_limit}")
|
||||
logger.debug(
|
||||
f"[Heartflow Limits] 执行限制检查。当前状态: {current_mai_state.value}, Normal上限: {normal_limit}, Focused上限: {focused_limit}"
|
||||
)
|
||||
|
||||
# 分类并统计当前 subheartflows
|
||||
normal_flows = []
|
||||
@@ -713,7 +724,9 @@ class Heartflow:
|
||||
else:
|
||||
other_flows.append((flow_id, flow.last_active_time))
|
||||
|
||||
logger.debug(f"[Heartflow Limits] 当前计数 - Normal: {len(normal_flows)}, Focused: {len(focused_flows)}, Other: {len(other_flows)}")
|
||||
logger.debug(
|
||||
f"[Heartflow Limits] 当前计数 - Normal: {len(normal_flows)}, Focused: {len(focused_flows)}, Other: {len(other_flows)}"
|
||||
)
|
||||
|
||||
stopped_count = 0
|
||||
|
||||
@@ -726,7 +739,9 @@ class Heartflow:
|
||||
# 停止最不活跃的超额部分
|
||||
for i in range(excess_count):
|
||||
flow_id_to_stop = normal_flows[i][0]
|
||||
if await self._stop_subheartflow(flow_id_to_stop, f"Normal (CHAT) 状态超出上限 ({normal_limit}),停止最不活跃的实例"):
|
||||
if await self._stop_subheartflow(
|
||||
flow_id_to_stop, f"Normal (CHAT) 状态超出上限 ({normal_limit}),停止最不活跃的实例"
|
||||
):
|
||||
stopped_count += 1
|
||||
|
||||
# 重新获取 focused_flows 列表,因为上面的停止操作可能已经改变了状态或移除了实例
|
||||
@@ -741,17 +756,23 @@ class Heartflow:
|
||||
# 检查 Focused (FOCUSED) 限制
|
||||
if len(focused_flows) > focused_limit:
|
||||
excess_count = len(focused_flows) - focused_limit
|
||||
logger.info(f"[Heartflow Limits] 检测到 Focused (FOCUSED) 状态超额 {excess_count} 个。上限: {focused_limit}")
|
||||
logger.info(
|
||||
f"[Heartflow Limits] 检测到 Focused (FOCUSED) 状态超额 {excess_count} 个。上限: {focused_limit}"
|
||||
)
|
||||
# 按 last_active_time 升序排序
|
||||
focused_flows.sort(key=lambda item: item[1])
|
||||
# 停止最不活跃的超额部分
|
||||
for i in range(excess_count):
|
||||
flow_id_to_stop = focused_flows[i][0]
|
||||
if await self._stop_subheartflow(flow_id_to_stop, f"Focused (FOCUSED) 状态超出上限 ({focused_limit}),停止最不活跃的实例"):
|
||||
if await self._stop_subheartflow(
|
||||
flow_id_to_stop, f"Focused (FOCUSED) 状态超出上限 ({focused_limit}),停止最不活跃的实例"
|
||||
):
|
||||
stopped_count += 1
|
||||
|
||||
if stopped_count > 0:
|
||||
logger.info(f"[Heartflow Limits] 限制执行完成,共停止了 {stopped_count} 个子心流。当前总数: {len(self._subheartflows)}")
|
||||
logger.info(
|
||||
f"[Heartflow Limits] 限制执行完成,共停止了 {stopped_count} 个子心流。当前总数: {len(self._subheartflows)}"
|
||||
)
|
||||
else:
|
||||
logger.debug(f"[Heartflow Limits] 限制检查完成,无需停止子心流。当前总数: {len(self._subheartflows)}")
|
||||
|
||||
@@ -765,7 +786,11 @@ class Heartflow:
|
||||
|
||||
# 使用快照进行迭代
|
||||
all_flows_snapshot = list(self._subheartflows.values())
|
||||
absent_flows = [flow for flow in all_flows_snapshot if flow.subheartflow_id in self._subheartflows and flow.chat_state.chat_status == ChatState.ABSENT]
|
||||
absent_flows = [
|
||||
flow
|
||||
for flow in all_flows_snapshot
|
||||
if flow.subheartflow_id in self._subheartflows and flow.chat_state.chat_status == ChatState.ABSENT
|
||||
]
|
||||
|
||||
num_to_activate = min(limit, len(absent_flows))
|
||||
|
||||
@@ -773,13 +798,18 @@ class Heartflow:
|
||||
logger.info(f"[Heartflow Activate] 没有处于 ABSENT 状态的子心流可供激活至 CHAT (上限: {limit})。")
|
||||
return
|
||||
|
||||
logger.info(f"[Heartflow Activate] 将随机选择 {num_to_activate} 个 (上限 {limit}) ABSENT 子心流激活至 CHAT 状态。")
|
||||
logger.info(
|
||||
f"[Heartflow Activate] 将随机选择 {num_to_activate} 个 (上限 {limit}) ABSENT 子心流激活至 CHAT 状态。"
|
||||
)
|
||||
selected_flows = random.sample(absent_flows, num_to_activate)
|
||||
|
||||
activated_count = 0
|
||||
for flow in selected_flows:
|
||||
# 再次检查 flow 是否仍然存在且状态为 ABSENT (以防并发修改)
|
||||
if flow.subheartflow_id in self._subheartflows and self._subheartflows[flow.subheartflow_id].chat_state.chat_status == ChatState.ABSENT:
|
||||
if (
|
||||
flow.subheartflow_id in self._subheartflows
|
||||
and self._subheartflows[flow.subheartflow_id].chat_state.chat_status == ChatState.ABSENT
|
||||
):
|
||||
stream_name = chat_manager.get_stream_name(flow.subheartflow_id) or flow.subheartflow_id
|
||||
logger.debug(f"[Heartflow Activate] 正在将子心流 {stream_name} 状态设置为 CHAT。")
|
||||
# 调用 set_chat_state,它内部会处理日志记录
|
||||
@@ -835,11 +865,14 @@ class Heartflow:
|
||||
logger.error(f"[Heartflow Deactivate] 停用子心流 {stream_name} 时出错: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
logger.info(f"[Heartflow Deactivate] 完成停用,共将 {deactivated_count} 个子心流设置为 ABSENT 状态 (不包括已是 ABSENT 的)。")
|
||||
logger.info(
|
||||
f"[Heartflow Deactivate] 完成停用,共将 {deactivated_count} 个子心流设置为 ABSENT 状态 (不包括已是 ABSENT 的)。"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"[Heartflow Deactivate] 停用所有子心流时出错: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
|
||||
init_prompt()
|
||||
# 创建一个全局的管理器实例
|
||||
heartflow = Heartflow()
|
||||
|
||||
@@ -137,7 +137,6 @@ class ChattingObservation(Observation):
|
||||
)
|
||||
self.mid_memory_info = mid_memory_str
|
||||
|
||||
|
||||
self.talking_message_str = await build_readable_messages(messages=self.talking_message, timestamp_mode="normal")
|
||||
|
||||
logger.trace(
|
||||
|
||||
@@ -92,7 +92,7 @@ class InterestChatting:
|
||||
increase_rate=probability_increase_rate_per_second,
|
||||
decay_factor=global_config.probability_decay_factor_per_second,
|
||||
max_probability=max_reply_probability,
|
||||
state_change_callback: Optional[Callable[[ChatState], None]] = None
|
||||
state_change_callback: Optional[Callable[[ChatState], None]] = None,
|
||||
):
|
||||
self.interest_level: float = 0.0
|
||||
self.last_update_time: float = time.time()
|
||||
@@ -231,7 +231,7 @@ class InterestChatting:
|
||||
|
||||
|
||||
class SubHeartflow:
|
||||
def __init__(self, subheartflow_id, parent_heartflow: 'Heartflow'):
|
||||
def __init__(self, subheartflow_id, parent_heartflow: "Heartflow"):
|
||||
"""子心流初始化函数
|
||||
|
||||
Args:
|
||||
@@ -257,7 +257,7 @@ class SubHeartflow:
|
||||
self.is_active = False # 是否活跃标志
|
||||
self.should_stop = False # 停止标志
|
||||
self.task: Optional[asyncio.Task] = None # 后台任务
|
||||
self.heart_fc_instance: Optional['HeartFChatting'] = None # <-- Add instance variable
|
||||
self.heart_fc_instance: Optional["HeartFChatting"] = None # <-- Add instance variable
|
||||
|
||||
# 观察和知识系统
|
||||
self.observations: List[ChattingObservation] = [] # 观察列表
|
||||
@@ -271,7 +271,7 @@ class SubHeartflow:
|
||||
request_type="sub_heart_flow",
|
||||
)
|
||||
|
||||
async def set_chat_state(self, new_state: 'ChatState'):
|
||||
async def set_chat_state(self, new_state: "ChatState"):
|
||||
"""更新sub_heartflow的聊天状态,并管理 HeartFChatting 实例"""
|
||||
|
||||
current_state = self.chat_state.chat_status
|
||||
@@ -288,10 +288,14 @@ class SubHeartflow:
|
||||
current_chat_count = self.parent_heartflow.count_subflows_by_state(ChatState.CHAT)
|
||||
|
||||
if current_chat_count >= normal_limit:
|
||||
logger.debug(f"{log_prefix} 拒绝从 {current_state.value} 转换到 CHAT。原因:CHAT 状态已达上限 ({normal_limit})。当前数量: {current_chat_count}")
|
||||
logger.debug(
|
||||
f"{log_prefix} 拒绝从 {current_state.value} 转换到 CHAT。原因:CHAT 状态已达上限 ({normal_limit})。当前数量: {current_chat_count}"
|
||||
)
|
||||
return # Block the state transition
|
||||
else:
|
||||
logger.debug(f"{log_prefix} 允许从 {current_state.value} 转换到 CHAT (上限: {normal_limit}, 当前: {current_chat_count})" )
|
||||
logger.debug(
|
||||
f"{log_prefix} 允许从 {current_state.value} 转换到 CHAT (上限: {normal_limit}, 当前: {current_chat_count})"
|
||||
)
|
||||
# If transitioning out of FOCUSED, shut down HeartFChatting first
|
||||
if current_state == ChatState.FOCUSED and self.heart_fc_instance:
|
||||
logger.info(f"{log_prefix} 从 FOCUSED 转换到 CHAT,正在关闭 HeartFChatting...")
|
||||
@@ -304,10 +308,14 @@ class SubHeartflow:
|
||||
current_focused_count = self.parent_heartflow.count_subflows_by_state(ChatState.FOCUSED)
|
||||
|
||||
if current_focused_count >= focused_limit:
|
||||
logger.debug(f"{log_prefix} 拒绝从 {current_state.value} 转换到 FOCUSED。原因:FOCUSED 状态已达上限 ({focused_limit})。当前数量: {current_focused_count}")
|
||||
logger.debug(
|
||||
f"{log_prefix} 拒绝从 {current_state.value} 转换到 FOCUSED。原因:FOCUSED 状态已达上限 ({focused_limit})。当前数量: {current_focused_count}"
|
||||
)
|
||||
return # Block the state transition
|
||||
else:
|
||||
logger.debug(f"{log_prefix} 允许从 {current_state.value} 转换到 FOCUSED (上限: {focused_limit}, 当前: {current_focused_count})" )
|
||||
logger.debug(
|
||||
f"{log_prefix} 允许从 {current_state.value} 转换到 FOCUSED (上限: {focused_limit}, 当前: {current_focused_count})"
|
||||
)
|
||||
if not self.heart_fc_instance:
|
||||
logger.info(f"{log_prefix} 状态转为 FOCUSED,创建并初始化 HeartFChatting 实例...")
|
||||
try:
|
||||
@@ -323,7 +331,9 @@ class SubHeartflow:
|
||||
await self.heart_fc_instance.add_time()
|
||||
logger.info(f"{log_prefix} HeartFChatting 实例已创建并启动。")
|
||||
else:
|
||||
logger.error(f"{log_prefix} HeartFChatting 实例初始化失败,状态回滚到 {current_state.value}")
|
||||
logger.error(
|
||||
f"{log_prefix} HeartFChatting 实例初始化失败,状态回滚到 {current_state.value}"
|
||||
)
|
||||
self.heart_fc_instance = None
|
||||
return # Prevent state change if HeartFChatting fails to init
|
||||
except Exception as e:
|
||||
@@ -341,13 +351,11 @@ class SubHeartflow:
|
||||
await self.heart_fc_instance.shutdown()
|
||||
self.heart_fc_instance = None
|
||||
|
||||
|
||||
# --- Update state and timestamp if transition is allowed --- # 更新状态必须放在所有检查和操作之后
|
||||
self.chat_state.chat_status = new_state
|
||||
self.last_active_time = time.time()
|
||||
logger.info(f"{log_prefix} 聊天状态从 {current_state.value} 变更为 {new_state.value}")
|
||||
|
||||
|
||||
async def subheartflow_start_working(self):
|
||||
while True:
|
||||
if self.should_stop:
|
||||
|
||||
@@ -4,6 +4,7 @@ import time
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
from src.common.logger import get_module_logger
|
||||
|
||||
# from ...common.database import db # 数据库依赖似乎不需要了,注释掉
|
||||
from ..message.api import global_api
|
||||
from .message import MessageSending, MessageThinking, MessageSet
|
||||
@@ -118,7 +119,7 @@ class MessageContainer:
|
||||
earliest_message = None
|
||||
for msg in self.messages:
|
||||
# 确保消息有 thinking_start_time 属性
|
||||
msg_time = getattr(msg, 'thinking_start_time', float('inf'))
|
||||
msg_time = getattr(msg, "thinking_start_time", float("inf"))
|
||||
if msg_time < earliest_time:
|
||||
earliest_time = msg_time
|
||||
earliest_message = msg
|
||||
@@ -172,7 +173,7 @@ class MessageManager:
|
||||
async def start(self):
|
||||
"""启动后台处理器任务。"""
|
||||
# 检查是否已有任务在运行,避免重复启动
|
||||
if hasattr(self, '_processor_task') and not self._processor_task.done():
|
||||
if hasattr(self, "_processor_task") and not self._processor_task.done():
|
||||
logger.warning("Processor task already running.")
|
||||
return
|
||||
self._processor_task = asyncio.create_task(self._start_processor_loop())
|
||||
@@ -181,13 +182,12 @@ class MessageManager:
|
||||
def stop(self):
|
||||
"""停止后台处理器任务。"""
|
||||
self._running = False
|
||||
if hasattr(self, '_processor_task') and not self._processor_task.done():
|
||||
if hasattr(self, "_processor_task") and not self._processor_task.done():
|
||||
self._processor_task.cancel()
|
||||
logger.info("MessageManager processor task stopping.")
|
||||
else:
|
||||
logger.info("MessageManager processor task not running or already stopped.")
|
||||
|
||||
|
||||
async def get_container(self, chat_id: str) -> MessageContainer:
|
||||
"""获取或创建聊天流的消息容器 (异步,使用锁)"""
|
||||
async with self._container_lock:
|
||||
@@ -211,7 +211,7 @@ class MessageManager:
|
||||
if container and container.has_messages():
|
||||
for message in container.get_all_messages():
|
||||
if isinstance(message, MessageSending):
|
||||
msg_id = getattr(message.message_info, 'message_id', None)
|
||||
msg_id = getattr(message.message_info, "message_id", None)
|
||||
# 检查 message_id 是否匹配 thinking_id 或以 "me" 开头 (emoji)
|
||||
if msg_id == thinking_id or (msg_id and msg_id.startswith("me")):
|
||||
# logger.debug(f"检查到存在相同thinking_id或emoji的消息: {msg_id} for {thinking_id}")
|
||||
@@ -235,7 +235,9 @@ class MessageManager:
|
||||
and (thinking_messages_count > 4 or thinking_messages_length > 250)
|
||||
and not message.is_private_message()
|
||||
):
|
||||
logger.debug(f"[{message.chat_stream.stream_id}] 应用 set_reply 逻辑: {message.processed_plain_text[:20]}...")
|
||||
logger.debug(
|
||||
f"[{message.chat_stream.stream_id}] 应用 set_reply 逻辑: {message.processed_plain_text[:20]}..."
|
||||
)
|
||||
message.set_reply()
|
||||
# --- 结束条件 set_reply ---
|
||||
|
||||
@@ -250,14 +252,15 @@ class MessageManager:
|
||||
# logger.debug(f"[{message.chat_stream.stream_id}] Sent and removed message: {message.message_info.message_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[{message.chat_stream.stream_id}] 处理发送消息 {getattr(message.message_info, 'message_id', 'N/A')} 时出错: {e}")
|
||||
logger.error(
|
||||
f"[{message.chat_stream.stream_id}] 处理发送消息 {getattr(message.message_info, 'message_id', 'N/A')} 时出错: {e}"
|
||||
)
|
||||
logger.exception("详细错误信息:")
|
||||
# 考虑是否移除出错的消息,防止无限循环
|
||||
removed = container.remove_message(message)
|
||||
if removed:
|
||||
logger.warning(f"[{message.chat_stream.stream_id}] 已移除处理出错的消息。")
|
||||
|
||||
|
||||
async def _process_chat_messages(self, chat_id: str):
|
||||
"""处理单个聊天流消息 (合并后的逻辑)"""
|
||||
container = await self.get_container(chat_id) # 获取容器是异步的了
|
||||
@@ -282,7 +285,9 @@ class MessageManager:
|
||||
|
||||
# 检查是否超时
|
||||
if thinking_time > global_config.thinking_timeout:
|
||||
logger.warning(f"[{chat_id}] 消息思考超时 ({thinking_time:.1f}秒),移除消息 {message_earliest.message_info.message_id}")
|
||||
logger.warning(
|
||||
f"[{chat_id}] 消息思考超时 ({thinking_time:.1f}秒),移除消息 {message_earliest.message_info.message_id}"
|
||||
)
|
||||
container.remove_message(message_earliest)
|
||||
print() # 超时后换行,避免覆盖下一条日志
|
||||
|
||||
@@ -308,7 +313,6 @@ class MessageManager:
|
||||
# logger.debug(f"[{chat_id}] 容器已空,准备移除。")
|
||||
# del self.containers[chat_id]
|
||||
|
||||
|
||||
async def _start_processor_loop(self):
|
||||
"""消息处理器主循环"""
|
||||
while self._running:
|
||||
@@ -337,6 +341,7 @@ class MessageManager:
|
||||
break # 退出循环
|
||||
logger.info("MessageManager processor loop finished.")
|
||||
|
||||
|
||||
# --- 创建全局实例 ---
|
||||
message_manager = MessageManager()
|
||||
message_sender = MessageSender()
|
||||
|
||||
@@ -13,6 +13,7 @@ from src.plugins.models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.plugins.chat.utils_image import image_path_to_base64 # Local import needed after move
|
||||
from src.plugins.utils.timer_calculater import Timer # <--- Import Timer
|
||||
|
||||
# --- Import necessary dependencies directly ---
|
||||
from .heartFC_generator import ResponseGenerator # Assuming this is the type for gpt
|
||||
from src.do_tool.tool_use import ToolUser
|
||||
@@ -74,7 +75,8 @@ class HeartFChatting:
|
||||
现在由其关联的 SubHeartflow 管理生命周期。
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
def __init__(
|
||||
self,
|
||||
chat_id: str,
|
||||
# 显式依赖注入
|
||||
gpt_instance: ResponseGenerator, # 文本回复生成器
|
||||
@@ -145,6 +147,7 @@ class HeartFChatting:
|
||||
|
||||
# <-- 在这里导入 heartflow 实例
|
||||
from src.heart_flow.heartflow import heartflow
|
||||
|
||||
self.sub_hf = heartflow.get_subheartflow(self.stream_id)
|
||||
if not self.sub_hf:
|
||||
logger.warning(f"{log_prefix} 获取SubHeartflow失败。一些功能可能受限。")
|
||||
@@ -391,7 +394,9 @@ class HeartFChatting:
|
||||
logger.info(f"{log_prefix} 等待新消息的 sleep 被中断。")
|
||||
raise # 重新抛出取消错误,以便外层循环处理
|
||||
else:
|
||||
logger.warning(f"{log_prefix} HeartFChatting: 无法获取 Observation 实例,无法等待新消息。")
|
||||
logger.warning(
|
||||
f"{log_prefix} HeartFChatting: 无法获取 Observation 实例,无法等待新消息。"
|
||||
)
|
||||
# --- 等待结束 ---
|
||||
|
||||
elif action == "error": # Action specifically set to error by planner
|
||||
@@ -413,9 +418,7 @@ class HeartFChatting:
|
||||
timer_strings.append(f"{name}: {formatted_time}")
|
||||
|
||||
if timer_strings: # 如果有有效计时器数据才打印
|
||||
logger.debug(
|
||||
f"{log_prefix} 该次决策耗时: {'; '.join(timer_strings)}"
|
||||
)
|
||||
logger.debug(f"{log_prefix} 该次决策耗时: {'; '.join(timer_strings)}")
|
||||
|
||||
# --- Timer Decrement --- #
|
||||
cycle_duration = time.monotonic() - loop_cycle_start_time
|
||||
@@ -484,8 +487,12 @@ class HeartFChatting:
|
||||
self.sub_hf = heartflow.get_subheartflow(self.stream_id)
|
||||
if not self.sub_hf:
|
||||
logger.error(f"{log_prefix}[Planner] SubHeartflow is not available. Cannot proceed.")
|
||||
return {"action": "error", "reasoning": "SubHeartflow unavailable", "llm_error": True, "observed_messages": []}
|
||||
|
||||
return {
|
||||
"action": "error",
|
||||
"reasoning": "SubHeartflow unavailable",
|
||||
"llm_error": True,
|
||||
"observed_messages": [],
|
||||
}
|
||||
|
||||
try:
|
||||
# Access observation via self.sub_hf
|
||||
@@ -503,9 +510,7 @@ class HeartFChatting:
|
||||
# --- (Moved from _replier_work) 1. 思考前使用工具 --- #
|
||||
try:
|
||||
# Access tool_user directly
|
||||
tool_result = await self.tool_user.use_tool(
|
||||
message_txt=observed_messages_str, sub_heartflow=self.sub_hf
|
||||
)
|
||||
tool_result = await self.tool_user.use_tool(message_txt=observed_messages_str, sub_heartflow=self.sub_hf)
|
||||
if tool_result.get("used_tools", False):
|
||||
tool_result_info = tool_result.get("structured_info", {})
|
||||
logger.debug(f"{log_prefix}[Planner] 规划前工具结果: {tool_result_info}")
|
||||
@@ -620,7 +625,6 @@ class HeartFChatting:
|
||||
"""
|
||||
|
||||
try:
|
||||
|
||||
# --- Create Placeholder --- #
|
||||
placeholder_id = f"mid_pf_{int(time.time() * 1000)}"
|
||||
placeholder_user = UserInfo(
|
||||
|
||||
@@ -11,6 +11,7 @@ from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
|
||||
from ..utils.timer_calculater import Timer
|
||||
|
||||
from src.plugins.moods.moods import MoodManager
|
||||
|
||||
# 定义日志配置
|
||||
llm_config = LogConfig(
|
||||
# 使用消息发送专用样式
|
||||
@@ -78,7 +79,6 @@ class ResponseGenerator:
|
||||
|
||||
sender_name = f"<{message.chat_stream.user_info.platform}:{message.chat_stream.user_info.user_id}:{message.chat_stream.user_info.user_nickname}:{message.chat_stream.user_info.user_cardname}>"
|
||||
|
||||
|
||||
with Timer() as t_build_prompt:
|
||||
prompt = await prompt_builder.build_prompt(
|
||||
build_mode="focus",
|
||||
@@ -86,7 +86,7 @@ class ResponseGenerator:
|
||||
current_mind_info=current_mind_info,
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=sender_name,
|
||||
chat_stream=message.chat_stream
|
||||
chat_stream=message.chat_stream,
|
||||
)
|
||||
logger.info(f"构建prompt时间: {t_build_prompt.human_readable}")
|
||||
|
||||
|
||||
@@ -77,23 +77,18 @@ class PromptBuilder:
|
||||
self.prompt_built = ""
|
||||
self.activate_messages = ""
|
||||
|
||||
|
||||
async def build_prompt(
|
||||
self, build_mode,reason,current_mind_info, message_txt: str, sender_name: str = "某人",chat_stream=None
|
||||
self, build_mode, reason, current_mind_info, message_txt: str, sender_name: str = "某人", chat_stream=None
|
||||
) -> tuple[str, str]:
|
||||
|
||||
if build_mode == "normal":
|
||||
return await self._build_prompt_normal(chat_stream, message_txt, sender_name)
|
||||
|
||||
elif build_mode == "focus":
|
||||
return await self._build_prompt_focus(reason, current_mind_info, chat_stream, message_txt, sender_name)
|
||||
|
||||
|
||||
|
||||
async def _build_prompt_focus(
|
||||
self, reason, current_mind_info, chat_stream, message_txt: str, sender_name: str = "某人"
|
||||
) -> tuple[str, str]:
|
||||
|
||||
individuality = Individuality.get_instance()
|
||||
prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1)
|
||||
prompt_identity = individuality.get_prompt(type="identity", x_person=2, level=1)
|
||||
@@ -107,8 +102,8 @@ class PromptBuilder:
|
||||
chat_in_group = False
|
||||
|
||||
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id =chat_stream.stream_id,
|
||||
timestamp = time.time(),
|
||||
chat_id=chat_stream.stream_id,
|
||||
timestamp=time.time(),
|
||||
limit=global_config.MAX_CONTEXT_SIZE,
|
||||
)
|
||||
|
||||
@@ -147,7 +142,6 @@ class PromptBuilder:
|
||||
if random.random() < 0.02:
|
||||
prompt_ger += "你喜欢用反问句"
|
||||
|
||||
|
||||
logger.debug("开始构建prompt")
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
@@ -176,11 +170,7 @@ class PromptBuilder:
|
||||
|
||||
return prompt
|
||||
|
||||
|
||||
|
||||
async def _build_prompt_normal(
|
||||
self, chat_stream, message_txt: str, sender_name: str = "某人"
|
||||
) -> tuple[str, str]:
|
||||
async def _build_prompt_normal(self, chat_stream, message_txt: str, sender_name: str = "某人") -> tuple[str, str]:
|
||||
# 开始构建prompt
|
||||
prompt_personality = "你"
|
||||
# person
|
||||
@@ -248,8 +238,8 @@ class PromptBuilder:
|
||||
chat_in_group = False
|
||||
|
||||
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id =chat_stream.stream_id,
|
||||
timestamp = time.time(),
|
||||
chat_id=chat_stream.stream_id,
|
||||
timestamp=time.time(),
|
||||
limit=global_config.MAX_CONTEXT_SIZE,
|
||||
)
|
||||
|
||||
@@ -261,8 +251,6 @@ class PromptBuilder:
|
||||
read_mark=0.0,
|
||||
)
|
||||
|
||||
|
||||
|
||||
# 关键词检测与反应
|
||||
keywords_reaction_prompt = ""
|
||||
for rule in global_config.keywords_reaction_rules:
|
||||
@@ -304,7 +292,7 @@ class PromptBuilder:
|
||||
|
||||
logger.debug("开始构建prompt")
|
||||
|
||||
schedule_prompt=await global_prompt_manager.format_prompt(
|
||||
schedule_prompt = await global_prompt_manager.format_prompt(
|
||||
"schedule_prompt", schedule_info=bot_schedule.get_current_num_task(num=1, time_info=False)
|
||||
)
|
||||
|
||||
@@ -558,6 +546,5 @@ class PromptBuilder:
|
||||
return "\n".join(str(result["content"]) for result in results)
|
||||
|
||||
|
||||
|
||||
init_prompt()
|
||||
prompt_builder = PromptBuilder()
|
||||
|
||||
@@ -22,6 +22,7 @@ from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
|
||||
from src.plugins.utils.timer_calculater import Timer
|
||||
from src.heart_flow.heartflow import heartflow
|
||||
from src.heart_flow.sub_heartflow import ChatState
|
||||
|
||||
# 定义日志配置
|
||||
chat_config = LogConfig(
|
||||
console_format=CHAT_STYLE_CONFIG["console_format"],
|
||||
@@ -129,7 +130,7 @@ class NormalChat:
|
||||
is_head=not mark_head,
|
||||
is_emoji=False,
|
||||
thinking_start_time=thinking_start_time,
|
||||
apply_set_reply_logic=True
|
||||
apply_set_reply_logic=True,
|
||||
)
|
||||
if not mark_head:
|
||||
mark_head = True
|
||||
@@ -165,7 +166,7 @@ class NormalChat:
|
||||
reply=message,
|
||||
is_head=False,
|
||||
is_emoji=True,
|
||||
apply_set_reply_logic=True
|
||||
apply_set_reply_logic=True,
|
||||
)
|
||||
await message_manager.add_message(bot_message)
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ from ..utils.timer_calculater import Timer
|
||||
from src.common.logger import get_module_logger, LogConfig, LLM_STYLE_CONFIG
|
||||
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
|
||||
from src.heart_flow.sub_heartflow import SubHeartflow
|
||||
|
||||
# 定义日志配置
|
||||
llm_config = LogConfig(
|
||||
# 使用消息发送专用样式
|
||||
@@ -40,7 +41,9 @@ class ResponseGenerator:
|
||||
self.current_model_type = "r1" # 默认使用 R1
|
||||
self.current_model_name = "unknown model"
|
||||
|
||||
async def generate_response(self, sub_hf: SubHeartflow, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]:
|
||||
async def generate_response(
|
||||
self, sub_hf: SubHeartflow, message: MessageThinking, thinking_id: str
|
||||
) -> Optional[Union[str, List[str]]]:
|
||||
"""根据当前模型类型选择对应的生成函数"""
|
||||
# 从global_config中获取模型概率值并选择模型
|
||||
if random.random() < global_config.model_reasoning_probability:
|
||||
@@ -67,7 +70,9 @@ class ResponseGenerator:
|
||||
logger.info(f"{self.current_model_type}思考,失败")
|
||||
return None
|
||||
|
||||
async def _generate_response_with_model(self, sub_hf: SubHeartflow, message: MessageThinking, model: LLMRequest, thinking_id: str):
|
||||
async def _generate_response_with_model(
|
||||
self, sub_hf: SubHeartflow, message: MessageThinking, model: LLMRequest, thinking_id: str
|
||||
):
|
||||
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
|
||||
|
||||
if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
|
||||
@@ -85,7 +90,7 @@ class ResponseGenerator:
|
||||
with Timer() as t_build_prompt:
|
||||
prompt = await prompt_builder.build_prompt(
|
||||
build_mode="normal",
|
||||
reason= "",
|
||||
reason="",
|
||||
current_mind_info="",
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=sender_name,
|
||||
@@ -108,7 +113,6 @@ class ResponseGenerator:
|
||||
|
||||
return content
|
||||
|
||||
|
||||
async def _get_emotion_tags(self, content: str, processed_plain_text: str):
|
||||
"""提取情感标签,结合立场和情绪"""
|
||||
try:
|
||||
|
||||
@@ -315,4 +315,3 @@ async def build_readable_messages(
|
||||
else:
|
||||
# 理论上不应该发生,但作为保险
|
||||
return read_mark_line.strip() # 如果前后都无消息,只返回标记行
|
||||
|
||||
|
||||
Reference in New Issue
Block a user