refactor(chat): 重构HeartFChatting为模块化架构

将原本超长的heartFC_chat.py拆分为6个功能内聚的子模块:
- hfc_context:上下文数据容器
- cycle_tracker:循环状态记录
- energy_manager:能量值独立管理
- proactive_thinker:主动思考逻辑抽离
- cycle_processor:单次循环处理器
- response_handler / normal_mode_handler:响应策略

删除冗余常量、错误样板及旧逻辑;大幅减少类体积;降低耦合度,提升可维护性。
This commit is contained in:
minecraft1024a
2025-08-21 14:27:12 +08:00
parent 43563925e8
commit 9c3b750328
9 changed files with 927 additions and 1199 deletions

View File

@@ -0,0 +1,251 @@
import asyncio
import time
import traceback
from typing import Optional, Dict, Any
from src.common.logger import get_logger
from src.config.config import global_config
from src.chat.utils.timer_calculator import Timer
from src.chat.planner_actions.planner import ActionPlanner
from src.chat.planner_actions.action_modifier import ActionModifier
from src.plugin_system.core import events_manager
from src.plugin_system.base.component_types import EventType, ChatMode
from src.mais4u.mai_think import mai_thinking_manager
from src.mais4u.constant_s4u import ENABLE_S4U
from src.chat.chat_loop.hfc_utils import send_typing, stop_typing
from .hfc_context import HfcContext
from .response_handler import ResponseHandler
from .cycle_tracker import CycleTracker
logger = get_logger("hfc.processor")
class CycleProcessor:
def __init__(self, context: HfcContext, response_handler: ResponseHandler, cycle_tracker: CycleTracker):
self.context = context
self.response_handler = response_handler
self.cycle_tracker = cycle_tracker
self.action_planner = ActionPlanner(chat_id=self.context.stream_id, action_manager=self.context.action_manager)
self.action_modifier = ActionModifier(action_manager=self.context.action_manager, chat_id=self.context.stream_id)
async def observe(self, message_data: Optional[Dict[str, Any]] = None) -> bool:
if not message_data:
message_data = {}
cycle_timers, thinking_id = self.cycle_tracker.start_cycle()
logger.info(f"{self.context.log_prefix} 开始第{self.context.cycle_counter}次思考[模式:{self.context.loop_mode}]")
if ENABLE_S4U:
await send_typing()
loop_start_time = time.time()
try:
await self.action_modifier.modify_actions()
available_actions = self.context.action_manager.get_using_actions()
except Exception as e:
logger.error(f"{self.context.log_prefix} 动作修改失败: {e}")
available_actions = {}
is_mentioned_bot = message_data.get("is_mentioned", False)
at_bot_mentioned = (global_config.chat.mentioned_bot_inevitable_reply and is_mentioned_bot) or \
(global_config.chat.at_bot_inevitable_reply and is_mentioned_bot)
if self.context.loop_mode == ChatMode.FOCUS and at_bot_mentioned and "no_reply" in available_actions:
available_actions = {k: v for k, v in available_actions.items() if k != "no_reply"}
skip_planner = False
if self.context.loop_mode == ChatMode.NORMAL:
non_reply_actions = {k: v for k, v in available_actions.items() if k not in ["reply", "no_reply", "no_action"]}
if not non_reply_actions:
skip_planner = True
plan_result = self._get_direct_reply_plan(loop_start_time)
target_message = message_data
gen_task = None
if not skip_planner and self.context.loop_mode == ChatMode.NORMAL:
reply_to_str = await self._build_reply_to_str(message_data)
gen_task = asyncio.create_task(
self.response_handler.generate_response(
message_data=message_data,
available_actions=available_actions,
reply_to=reply_to_str,
request_type="chat.replyer.normal",
)
)
if not skip_planner:
plan_result, target_message = await self.action_planner.plan(mode=self.context.loop_mode)
action_result = plan_result.get("action_result", {}) if isinstance(plan_result, dict) else {}
if not isinstance(action_result, dict):
action_result = {}
action_type = action_result.get("action_type", "error")
action_data = action_result.get("action_data", {})
reasoning = action_result.get("reasoning", "未提供理由")
is_parallel = action_result.get("is_parallel", True)
action_data["loop_start_time"] = loop_start_time
is_private_chat = self.context.chat_stream.group_info is None if self.context.chat_stream else False
if self.context.loop_mode == ChatMode.FOCUS and is_private_chat and action_type == "no_reply":
action_type = "reply"
if action_type == "reply":
await self._handle_reply_action(
message_data, available_actions, gen_task, loop_start_time, cycle_timers, thinking_id, plan_result
)
else:
await self._handle_other_actions(
action_type, reasoning, action_data, is_parallel, gen_task, target_message or message_data,
cycle_timers, thinking_id, plan_result, loop_start_time
)
if ENABLE_S4U:
await stop_typing()
return True
async def _handle_reply_action(self, message_data, available_actions, gen_task, loop_start_time, cycle_timers, thinking_id, plan_result):
if self.context.loop_mode == ChatMode.NORMAL:
if not gen_task:
reply_to_str = await self._build_reply_to_str(message_data)
gen_task = asyncio.create_task(
self.response_handler.generate_response(
message_data=message_data,
available_actions=available_actions,
reply_to=reply_to_str,
request_type="chat.replyer.normal",
)
)
try:
response_set = await asyncio.wait_for(gen_task, timeout=global_config.chat.thinking_timeout)
except asyncio.TimeoutError:
response_set = None
else:
reply_to_str = await self._build_reply_to_str(message_data)
response_set = await self.response_handler.generate_response(
message_data=message_data,
available_actions=available_actions,
reply_to=reply_to_str,
request_type="chat.replyer.focus",
)
if response_set:
loop_info, _, _ = await self.response_handler.generate_and_send_reply(
response_set, reply_to_str, loop_start_time, message_data, cycle_timers, thinking_id, plan_result
)
self.cycle_tracker.end_cycle(loop_info, cycle_timers)
async def _handle_other_actions(self, action_type, reasoning, action_data, is_parallel, gen_task, action_message, cycle_timers, thinking_id, plan_result, loop_start_time):
background_reply_task = None
if self.context.loop_mode == ChatMode.NORMAL and is_parallel and gen_task:
background_reply_task = asyncio.create_task(self._handle_parallel_reply(gen_task, loop_start_time, action_message, cycle_timers, thinking_id, plan_result))
background_action_task = asyncio.create_task(self._handle_action(action_type, reasoning, action_data, cycle_timers, thinking_id, action_message))
reply_loop_info, action_success, action_reply_text, action_command = None, False, "", ""
if background_reply_task:
results = await asyncio.gather(background_reply_task, background_action_task, return_exceptions=True)
reply_result, action_result_val = results
if not isinstance(reply_result, BaseException) and reply_result is not None:
reply_loop_info, _, _ = reply_result
else:
reply_loop_info = None
if not isinstance(action_result_val, BaseException) and action_result_val is not None:
action_success, action_reply_text, action_command = action_result_val
else:
action_success, action_reply_text, action_command = False, "", ""
else:
results = await asyncio.gather(background_action_task, return_exceptions=True)
if results and len(results) > 0:
action_result_val = results[0] # Get the actual result from the tuple
else:
action_result_val = (False, "", "")
if not isinstance(action_result_val, BaseException) and action_result_val is not None:
action_success, action_reply_text, action_command = action_result_val
else:
action_success, action_reply_text, action_command = False, "", ""
loop_info = self._build_final_loop_info(reply_loop_info, action_success, action_reply_text, action_command, plan_result)
self.cycle_tracker.end_cycle(loop_info, cycle_timers)
async def _handle_parallel_reply(self, gen_task, loop_start_time, action_message, cycle_timers, thinking_id, plan_result):
try:
response_set = await asyncio.wait_for(gen_task, timeout=global_config.chat.thinking_timeout)
except asyncio.TimeoutError:
return None, "", {}
if not response_set:
return None, "", {}
reply_to_str = await self._build_reply_to_str(action_message)
return await self.response_handler.generate_and_send_reply(
response_set, reply_to_str, loop_start_time, action_message, cycle_timers, thinking_id, plan_result
)
async def _handle_action(self, action, reasoning, action_data, cycle_timers, thinking_id, action_message) -> tuple[bool, str, str]:
if not self.context.chat_stream:
return False, "", ""
try:
action_handler = self.context.action_manager.create_action(
action_name=action,
action_data=action_data,
reasoning=reasoning,
cycle_timers=cycle_timers,
thinking_id=thinking_id,
chat_stream=self.context.chat_stream,
log_prefix=self.context.log_prefix,
action_message=action_message,
)
if not action_handler:
return False, "", ""
success, reply_text = await action_handler.handle_action()
return success, reply_text, ""
except Exception as e:
logger.error(f"{self.context.log_prefix} 处理{action}时出错: {e}")
traceback.print_exc()
return False, "", ""
def _get_direct_reply_plan(self, loop_start_time):
return {
"action_result": {
"action_type": "reply",
"action_data": {"loop_start_time": loop_start_time},
"reasoning": "",
"timestamp": time.time(),
"is_parallel": False,
},
"action_prompt": "",
}
async def _build_reply_to_str(self, message_data: dict):
from src.person_info.person_info import get_person_info_manager
person_info_manager = get_person_info_manager()
platform = message_data.get("chat_info_platform") or message_data.get("user_platform") or (self.context.chat_stream.platform if self.context.chat_stream else "default")
user_id = message_data.get("user_id", "")
person_id = person_info_manager.get_person_id(platform, user_id)
person_name = await person_info_manager.get_value(person_id, "person_name")
return f"{person_name}:{message_data.get('processed_plain_text')}"
def _build_final_loop_info(self, reply_loop_info, action_success, action_reply_text, action_command, plan_result):
if reply_loop_info:
loop_info = reply_loop_info
loop_info["loop_action_info"].update({
"action_taken": action_success,
"command": action_command,
"taken_time": time.time(),
})
else:
loop_info = {
"loop_plan_info": {"action_result": plan_result.get("action_result", {})},
"loop_action_info": {
"action_taken": action_success,
"reply_text": action_reply_text,
"command": action_command,
"taken_time": time.time(),
},
}
return loop_info

View File

@@ -0,0 +1,45 @@
import time
from typing import Dict, Any, Tuple
from src.common.logger import get_logger
from src.chat.chat_loop.hfc_utils import CycleDetail
from .hfc_context import HfcContext
logger = get_logger("hfc.cycle")
class CycleTracker:
def __init__(self, context: HfcContext):
self.context = context
def start_cycle(self) -> Tuple[Dict[str, float], str]:
self.context.cycle_counter += 1
self.context.current_cycle_detail = CycleDetail(self.context.cycle_counter)
self.context.current_cycle_detail.thinking_id = f"tid{str(round(time.time(), 2))}"
cycle_timers = {}
return cycle_timers, self.context.current_cycle_detail.thinking_id
def end_cycle(self, loop_info: Dict[str, Any], cycle_timers: Dict[str, float]):
if self.context.current_cycle_detail:
self.context.current_cycle_detail.set_loop_info(loop_info)
self.context.history_loop.append(self.context.current_cycle_detail)
self.context.current_cycle_detail.timers = cycle_timers
self.context.current_cycle_detail.end_time = time.time()
self.print_cycle_info(cycle_timers)
def print_cycle_info(self, cycle_timers: Dict[str, float]):
if not self.context.current_cycle_detail:
return
timer_strings = []
for name, elapsed in cycle_timers.items():
formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}"
timer_strings.append(f"{name}: {formatted_time}")
if self.context.current_cycle_detail.end_time and self.context.current_cycle_detail.start_time:
duration = self.context.current_cycle_detail.end_time - self.context.current_cycle_detail.start_time
logger.info(
f"{self.context.log_prefix}{self.context.current_cycle_detail.cycle_id}次思考,"
f"耗时: {duration:.1f}秒, "
f"选择动作: {self.context.current_cycle_detail.loop_plan_info.get('action_result', {}).get('action_type', '未知动作')}"
+ (f"\n详情: {'; '.join(timer_strings)}" if timer_strings else "")
)

View File

@@ -0,0 +1,82 @@
import asyncio
import time
from typing import Optional
from src.common.logger import get_logger
from src.config.config import global_config
from src.plugin_system.base.component_types import ChatMode
from .hfc_context import HfcContext
logger = get_logger("hfc.energy")
class EnergyManager:
def __init__(self, context: HfcContext):
self.context = context
self._energy_task: Optional[asyncio.Task] = None
self.last_energy_log_time = 0
self.energy_log_interval = 90
async def start(self):
if self.context.running and not self._energy_task:
self._energy_task = asyncio.create_task(self._energy_loop())
self._energy_task.add_done_callback(self._handle_energy_completion)
logger.info(f"{self.context.log_prefix} 能量管理器已启动")
async def stop(self):
if self._energy_task and not self._energy_task.done():
self._energy_task.cancel()
await asyncio.sleep(0)
logger.info(f"{self.context.log_prefix} 能量管理器已停止")
def _handle_energy_completion(self, task: asyncio.Task):
try:
if exception := task.exception():
logger.error(f"{self.context.log_prefix} 能量循环异常: {exception}")
else:
logger.info(f"{self.context.log_prefix} 能量循环正常结束")
except asyncio.CancelledError:
logger.info(f"{self.context.log_prefix} 能量循环被取消")
async def _energy_loop(self):
while self.context.running:
await asyncio.sleep(10)
if not self.context.chat_stream:
continue
is_group_chat = self.context.chat_stream.group_info is not None
if is_group_chat and global_config.chat.group_chat_mode != "auto":
if global_config.chat.group_chat_mode == "focus":
self.context.loop_mode = ChatMode.FOCUS
self.context.energy_value = 35
elif global_config.chat.group_chat_mode == "normal":
self.context.loop_mode = ChatMode.NORMAL
self.context.energy_value = 15
continue
if self.context.loop_mode == ChatMode.NORMAL:
self.context.energy_value -= 0.3
self.context.energy_value = max(self.context.energy_value, 0.3)
if self.context.loop_mode == ChatMode.FOCUS:
self.context.energy_value -= 0.6
self.context.energy_value = max(self.context.energy_value, 0.3)
self._log_energy_change("能量值衰减")
def _should_log_energy(self) -> bool:
current_time = time.time()
if current_time - self.last_energy_log_time >= self.energy_log_interval:
self.last_energy_log_time = current_time
return True
return False
def _log_energy_change(self, action: str, reason: str = ""):
if self._should_log_energy():
log_message = f"{self.context.log_prefix} {action},当前能量值:{self.context.energy_value:.1f}"
if reason:
log_message = f"{self.context.log_prefix} {action}{reason},当前能量值:{self.context.energy_value:.1f}"
logger.info(log_message)
else:
log_message = f"{self.context.log_prefix} {action},当前能量值:{self.context.energy_value:.1f}"
if reason:
log_message = f"{self.context.log_prefix} {action}{reason},当前能量值:{self.context.energy_value:.1f}"
logger.debug(log_message)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,34 @@
from typing import List, Optional, Dict, Any
import time
from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager
from src.person_info.relationship_builder_manager import RelationshipBuilder
from src.chat.express.expression_learner import ExpressionLearner
from src.plugin_system.base.component_types import ChatMode
from src.chat.planner_actions.action_manager import ActionManager
from src.chat.chat_loop.hfc_utils import CycleDetail
class HfcContext:
def __init__(self, chat_id: str):
self.stream_id: str = chat_id
self.chat_stream: Optional[ChatStream] = get_chat_manager().get_stream(self.stream_id)
if not self.chat_stream:
raise ValueError(f"无法找到聊天流: {self.stream_id}")
self.log_prefix = f"[{get_chat_manager().get_stream_name(self.stream_id) or self.stream_id}]"
self.relationship_builder: Optional[RelationshipBuilder] = None
self.expression_learner: Optional[ExpressionLearner] = None
self.loop_mode = ChatMode.NORMAL
self.energy_value = 5.0
self.last_message_time = time.time()
self.last_read_time = time.time() - 1
self.action_manager = ActionManager()
self.running: bool = False
self.history_loop: List[CycleDetail] = []
self.cycle_counter = 0
self.current_cycle_detail: Optional[CycleDetail] = None

View File

@@ -0,0 +1,54 @@
import random
from typing import Dict, Any, TYPE_CHECKING
from src.common.logger import get_logger
from src.config.config import global_config
from src.chat.willing.willing_manager import get_willing_manager
from .hfc_context import HfcContext
if TYPE_CHECKING:
from .cycle_processor import CycleProcessor
logger = get_logger("hfc.normal_mode")
class NormalModeHandler:
def __init__(self, context: HfcContext, cycle_processor: "CycleProcessor"):
self.context = context
self.cycle_processor = cycle_processor
self.willing_manager = get_willing_manager()
async def handle_message(self, message_data: Dict[str, Any]) -> bool:
if not self.context.chat_stream:
return False
interested_rate = message_data.get("interest_value") or 0.0
self.willing_manager.setup(message_data, self.context.chat_stream)
reply_probability = await self.willing_manager.get_reply_probability(message_data.get("message_id", ""))
if reply_probability < 1:
additional_config = message_data.get("additional_config", {})
if additional_config and "maimcore_reply_probability_gain" in additional_config:
reply_probability += additional_config["maimcore_reply_probability_gain"]
reply_probability = min(max(reply_probability, 0), 1)
talk_frequency = global_config.chat.get_current_talk_frequency(self.context.stream_id)
reply_probability = talk_frequency * reply_probability
if message_data.get("is_emoji") or message_data.get("is_picid"):
reply_probability = 0
mes_name = self.context.chat_stream.group_info.group_name if self.context.chat_stream.group_info else "私聊"
if reply_probability > 0.05:
logger.info(
f"[{mes_name}]"
f"{message_data.get('user_nickname')}:"
f"{message_data.get('processed_plain_text')}[兴趣:{interested_rate:.2f}][回复概率:{reply_probability * 100:.1f}%]"
)
if random.random() < reply_probability:
await self.willing_manager.before_generate_reply_handle(message_data.get("message_id", ""))
await self.cycle_processor.observe(message_data=message_data)
return True
self.willing_manager.delete(message_data.get("message_id", ""))
return False

View File

@@ -0,0 +1,180 @@
import asyncio
import time
import traceback
from typing import Optional, Dict, Any, TYPE_CHECKING
from src.common.logger import get_logger
from src.config.config import global_config
from src.plugin_system.base.component_types import ChatMode
from .hfc_context import HfcContext
if TYPE_CHECKING:
from .cycle_processor import CycleProcessor
logger = get_logger("hfc.proactive")
class ProactiveThinker:
def __init__(self, context: HfcContext, cycle_processor: "CycleProcessor"):
self.context = context
self.cycle_processor = cycle_processor
self._proactive_thinking_task: Optional[asyncio.Task] = None
self.proactive_thinking_prompts = {
"private": """现在你和你朋友的私聊里面已经隔了{time}没有发送消息了,请你结合上下文以及你和你朋友之前聊过的话题和你的人设来决定要不要主动发送消息,你可以选择:
1. 继续保持沉默(当{time}以前已经结束了一个话题并且你不想挑起新话题时)
2. 选择回复(当{time}以前你发送了一条消息且没有人回复你时、你想主动挑起一个话题时)
请根据当前情况做出选择。如果选择回复,请直接发送你想说的内容;如果选择保持沉默,请只回复"沉默"(注意:这个词不会被发送到群聊中)。""",
"group": """现在群里面已经隔了{time}没有人发送消息了,请你结合上下文以及群聊里面之前聊过的话题和你的人设来决定要不要主动发送消息,你可以选择:
1. 继续保持沉默(当{time}以前已经结束了一个话题并且你不想挑起新话题时)
2. 选择回复(当{time}以前你发送了一条消息且没有人回复你时、你想主动挑起一个话题时)
请根据当前情况做出选择。如果选择回复,请直接发送你想说的内容;如果选择保持沉默,请只回复"沉默"(注意:这个词不会被发送到群聊中)。""",
}
async def start(self):
if self.context.running and not self._proactive_thinking_task and global_config.chat.enable_proactive_thinking:
self._proactive_thinking_task = asyncio.create_task(self._proactive_thinking_loop())
self._proactive_thinking_task.add_done_callback(self._handle_proactive_thinking_completion)
logger.info(f"{self.context.log_prefix} 主动思考器已启动")
async def stop(self):
if self._proactive_thinking_task and not self._proactive_thinking_task.done():
self._proactive_thinking_task.cancel()
await asyncio.sleep(0)
logger.info(f"{self.context.log_prefix} 主动思考器已停止")
def _handle_proactive_thinking_completion(self, task: asyncio.Task):
try:
if exception := task.exception():
logger.error(f"{self.context.log_prefix} 主动思考循环异常: {exception}")
else:
logger.info(f"{self.context.log_prefix} 主动思考循环正常结束")
except asyncio.CancelledError:
logger.info(f"{self.context.log_prefix} 主动思考循环被取消")
async def _proactive_thinking_loop(self):
while self.context.running:
await asyncio.sleep(15)
if self.context.loop_mode != ChatMode.FOCUS:
continue
if not self._should_enable_proactive_thinking():
continue
current_time = time.time()
silence_duration = current_time - self.context.last_message_time
target_interval = self._get_dynamic_thinking_interval()
if silence_duration >= target_interval:
try:
await self._execute_proactive_thinking(silence_duration)
self.context.last_message_time = current_time
except Exception as e:
logger.error(f"{self.context.log_prefix} 主动思考执行出错: {e}")
logger.error(traceback.format_exc())
def _should_enable_proactive_thinking(self) -> bool:
if not self.context.chat_stream:
return False
try:
chat_id = int(self.context.stream_id.split(':')[-1])
except (ValueError, IndexError):
chat_id = None
proactive_thinking_ids = getattr(global_config.chat, 'proactive_thinking_enable_ids', [])
if proactive_thinking_ids and (chat_id is None or chat_id not in proactive_thinking_ids):
return False
is_group_chat = self.context.chat_stream.group_info is not None
if is_group_chat and not global_config.chat.proactive_thinking_in_group:
return False
if not is_group_chat and not global_config.chat.proactive_thinking_in_private:
return False
return True
def _get_dynamic_thinking_interval(self) -> float:
try:
from src.utils.timing_utils import get_normal_distributed_interval
base_interval = global_config.chat.proactive_thinking_interval
delta_sigma = getattr(global_config.chat, 'delta_sigma', 120)
if base_interval < 0:
base_interval = abs(base_interval)
if delta_sigma < 0:
delta_sigma = abs(delta_sigma)
if base_interval == 0 and delta_sigma == 0:
return 300
elif base_interval == 0:
sigma_percentage = delta_sigma / 1000
return get_normal_distributed_interval(0, sigma_percentage, 1, 86400, use_3sigma_rule=True)
elif delta_sigma == 0:
return base_interval
sigma_percentage = delta_sigma / base_interval
return get_normal_distributed_interval(base_interval, sigma_percentage, 1, 86400, use_3sigma_rule=True)
except ImportError:
logger.warning(f"{self.context.log_prefix} timing_utils不可用使用固定间隔")
return max(300, abs(global_config.chat.proactive_thinking_interval))
except Exception as e:
logger.error(f"{self.context.log_prefix} 动态间隔计算出错: {e},使用固定间隔")
return max(300, abs(global_config.chat.proactive_thinking_interval))
def _format_duration(self, seconds: float) -> str:
hours = int(seconds // 3600)
minutes = int((seconds % 3600) // 60)
secs = int(seconds % 60)
parts = []
if hours > 0:
parts.append(f"{hours}小时")
if minutes > 0:
parts.append(f"{minutes}")
if secs > 0 or not parts:
parts.append(f"{secs}")
return "".join(parts)
async def _execute_proactive_thinking(self, silence_duration: float):
formatted_time = self._format_duration(silence_duration)
logger.info(f"{self.context.log_prefix} 触发主动思考,已沉默{formatted_time}")
try:
proactive_prompt = self._get_proactive_prompt(formatted_time)
thinking_message = {
"processed_plain_text": proactive_prompt,
"user_id": "system_proactive_thinking",
"user_platform": "system",
"timestamp": time.time(),
"message_type": "proactive_thinking",
"user_nickname": "系统主动思考",
"chat_info_platform": "system",
"message_id": f"proactive_{int(time.time())}",
}
logger.info(f"{self.context.log_prefix} 开始主动思考...")
await self.cycle_processor.observe(message_data=thinking_message)
logger.info(f"{self.context.log_prefix} 主动思考完成")
except Exception as e:
logger.error(f"{self.context.log_prefix} 主动思考执行异常: {e}")
logger.error(traceback.format_exc())
def _get_proactive_prompt(self, formatted_time: str) -> str:
if hasattr(global_config.chat, 'proactive_thinking_prompt_template') and global_config.chat.proactive_thinking_prompt_template.strip():
return global_config.chat.proactive_thinking_prompt_template.format(time=formatted_time)
chat_type = "group" if self.context.chat_stream and self.context.chat_stream.group_info else "private"
prompt_template = self.proactive_thinking_prompts.get(chat_type, self.proactive_thinking_prompts["group"])
return prompt_template.format(time=formatted_time)

View File

@@ -0,0 +1,152 @@
import time
import random
import traceback
from typing import Optional, Dict, Any, List, Tuple
from src.common.logger import get_logger
from src.config.config import global_config
from src.plugin_system.apis import generator_api, send_api, message_api, database_api
from src.person_info.person_info import get_person_info_manager
from .hfc_context import HfcContext
logger = get_logger("hfc.response")
class ResponseHandler:
def __init__(self, context: HfcContext):
self.context = context
async def generate_and_send_reply(
self,
response_set,
reply_to_str,
loop_start_time,
action_message,
cycle_timers: Dict[str, float],
thinking_id,
plan_result,
) -> Tuple[Dict[str, Any], str, Dict[str, float]]:
reply_text = await self._send_response(response_set, reply_to_str, loop_start_time, action_message)
person_info_manager = get_person_info_manager()
platform = "default"
if self.context.chat_stream:
platform = (
action_message.get("chat_info_platform") or action_message.get("user_platform") or self.context.chat_stream.platform
)
user_id = action_message.get("user_id", "")
person_id = person_info_manager.get_person_id(platform, user_id)
person_name = await person_info_manager.get_value(person_id, "person_name")
action_prompt_display = f"你对{person_name}进行了回复:{reply_text}"
await database_api.store_action_info(
chat_stream=self.context.chat_stream,
action_build_into_prompt=False,
action_prompt_display=action_prompt_display,
action_done=True,
thinking_id=thinking_id,
action_data={"reply_text": reply_text, "reply_to": reply_to_str},
action_name="reply",
)
loop_info: Dict[str, Any] = {
"loop_plan_info": {
"action_result": plan_result.get("action_result", {}),
},
"loop_action_info": {
"action_taken": True,
"reply_text": reply_text,
"command": "",
"taken_time": time.time(),
},
}
return loop_info, reply_text, cycle_timers
async def _send_response(self, reply_set, reply_to, thinking_start_time, message_data) -> str:
current_time = time.time()
new_message_count = message_api.count_new_messages(
chat_id=self.context.stream_id, start_time=thinking_start_time, end_time=current_time
)
platform = message_data.get("user_platform", "")
user_id = message_data.get("user_id", "")
reply_to_platform_id = f"{platform}:{user_id}"
need_reply = new_message_count >= random.randint(2, 4)
reply_text = ""
is_proactive_thinking = message_data.get("message_type") == "proactive_thinking"
first_replied = False
for reply_seg in reply_set:
# 调试日志验证reply_seg的格式
logger.debug(f"Processing reply_seg type: {type(reply_seg)}, content: {reply_seg}")
# 修正:正确处理元组格式 (格式为: (type, content))
if isinstance(reply_seg, tuple) and len(reply_seg) >= 2:
reply_type, data = reply_seg
else:
# 向下兼容:如果已经是字符串,则直接使用
data = str(reply_seg)
reply_type = "text"
reply_text += data
if is_proactive_thinking and data.strip() == "沉默":
logger.info(f"{self.context.log_prefix} 主动思考决定保持沉默,不发送消息")
continue
if not first_replied:
if need_reply:
await send_api.text_to_stream(
text=data,
stream_id=self.context.stream_id,
reply_to=reply_to,
reply_to_platform_id=reply_to_platform_id,
typing=False,
)
else:
await send_api.text_to_stream(
text=data,
stream_id=self.context.stream_id,
reply_to_platform_id=reply_to_platform_id,
typing=False,
)
first_replied = True
else:
await send_api.text_to_stream(
text=data,
stream_id=self.context.stream_id,
reply_to_platform_id=reply_to_platform_id,
typing=True,
)
return reply_text
async def generate_response(
self,
message_data: dict,
available_actions: Optional[Dict[str, Any]],
reply_to: str,
request_type: str = "chat.replyer.normal",
) -> Optional[list]:
try:
success, reply_set, _ = await generator_api.generate_reply(
chat_stream=self.context.chat_stream,
reply_to=reply_to,
available_actions=available_actions,
enable_tool=global_config.tool.enable_tool,
request_type=request_type,
from_plugin=False,
)
if not success or not reply_set:
logger.info(f"{message_data.get('processed_plain_text')} 的回复生成失败")
return None
return reply_set
except Exception as e:
logger.error(f"{self.context.log_prefix}回复生成出现错误:{str(e)} {traceback.format_exc()}")
return None

View File

@@ -322,7 +322,7 @@ class ScheduleManager:
now = datetime.now().time() now = datetime.now().time()
# 修复:应该获取列表的第一个元素 # 修复:应该获取列表的第一个元素
first_item = self.today_schedule first_item = self.today_schedule[0]
last_item = self.today_schedule[-1] last_item = self.today_schedule[-1]
for item in [first_item, last_item]: for item in [first_item, last_item]: