This commit is contained in:
SengokuCola
2025-04-30 17:50:47 +08:00
parent 7d19a6728f
commit 5963214d95
11 changed files with 108 additions and 94 deletions

View File

@@ -170,18 +170,16 @@ class BotConfig:
SCHEDULE_TEMPERATURE: float = 0.5 # 日程表温度建议0.5-1.0 SCHEDULE_TEMPERATURE: float = 0.5 # 日程表温度建议0.5-1.0
TIME_ZONE: str = "Asia/Shanghai" # 时区 TIME_ZONE: str = "Asia/Shanghai" # 时区
# chat # chat
allow_focus_mode: bool = True # 是否允许专注聊天状态 allow_focus_mode: bool = True # 是否允许专注聊天状态
base_normal_chat_num: int = 3 # 最多允许多少个群进行普通聊天 base_normal_chat_num: int = 3 # 最多允许多少个群进行普通聊天
base_focused_chat_num: int = 2 # 最多允许多少个群进行专注聊天 base_focused_chat_num: int = 2 # 最多允许多少个群进行专注聊天
observation_context_size: int = 12 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩 observation_context_size: int = 12 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
message_buffer: bool = True # 消息缓冲器 message_buffer: bool = True # 消息缓冲器
ban_words = set() ban_words = set()
ban_msgs_regex = set() ban_msgs_regex = set()
@@ -190,11 +188,9 @@ class BotConfig:
default_decay_rate_per_second: float = 0.98 # 默认衰减率,越大衰减越慢 default_decay_rate_per_second: float = 0.98 # 默认衰减率,越大衰减越慢
consecutive_no_reply_threshold = 3 consecutive_no_reply_threshold = 3
compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5 compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5
compress_length_limit: int = 5 # 最多压缩份数,超过该数值的压缩上下文会被删除 compress_length_limit: int = 5 # 最多压缩份数,超过该数值的压缩上下文会被删除
# normal_chat # normal_chat
model_reasoning_probability: float = 0.7 # 麦麦回答时选择推理模型(主要)模型概率 model_reasoning_probability: float = 0.7 # 麦麦回答时选择推理模型(主要)模型概率
model_normal_probability: float = 0.3 # 麦麦回答时选择一般模型(次要)模型概率 model_normal_probability: float = 0.3 # 麦麦回答时选择一般模型(次要)模型概率
@@ -434,7 +430,6 @@ class BotConfig:
config.emoji_chance = normal_chat_config.get("emoji_chance", config.emoji_chance) config.emoji_chance = normal_chat_config.get("emoji_chance", config.emoji_chance)
config.thinking_timeout = normal_chat_config.get("thinking_timeout", config.thinking_timeout) config.thinking_timeout = normal_chat_config.get("thinking_timeout", config.thinking_timeout)
config.willing_mode = normal_chat_config.get("willing_mode", config.willing_mode) config.willing_mode = normal_chat_config.get("willing_mode", config.willing_mode)
config.response_willing_amplifier = normal_chat_config.get( config.response_willing_amplifier = normal_chat_config.get(
"response_willing_amplifier", config.response_willing_amplifier "response_willing_amplifier", config.response_willing_amplifier
@@ -457,9 +452,7 @@ class BotConfig:
def focus_chat(parent: dict): def focus_chat(parent: dict):
focus_chat_config = parent["focus_chat"] focus_chat_config = parent["focus_chat"]
config.compressed_length = focus_chat_config.get("compressed_length", config.compressed_length) config.compressed_length = focus_chat_config.get("compressed_length", config.compressed_length)
config.compress_length_limit = focus_chat_config.get( config.compress_length_limit = focus_chat_config.get("compress_length_limit", config.compress_length_limit)
"compress_length_limit", config.compress_length_limit
)
config.reply_trigger_threshold = focus_chat_config.get( config.reply_trigger_threshold = focus_chat_config.get(
"reply_trigger_threshold", config.reply_trigger_threshold "reply_trigger_threshold", config.reply_trigger_threshold
) )
@@ -470,7 +463,6 @@ class BotConfig:
"consecutive_no_reply_threshold", config.consecutive_no_reply_threshold "consecutive_no_reply_threshold", config.consecutive_no_reply_threshold
) )
def model(parent: dict): def model(parent: dict):
# 加载模型配置 # 加载模型配置
model_config: dict = parent["model"] model_config: dict = parent["model"]
@@ -634,7 +626,9 @@ class BotConfig:
"enable_kaomoji_protection", config.enable_kaomoji_protection "enable_kaomoji_protection", config.enable_kaomoji_protection
) )
if config.INNER_VERSION in SpecifierSet(">=1.6.0"): if config.INNER_VERSION in SpecifierSet(">=1.6.0"):
config.model_max_output_length = response_splitter_config.get("model_max_output_length", config.model_max_output_length) config.model_max_output_length = response_splitter_config.get(
"model_max_output_length", config.model_max_output_length
)
def groups(parent: dict): def groups(parent: dict):
groups_config = parent["groups"] groups_config = parent["groups"]

View File

@@ -24,6 +24,7 @@ CLEANUP_INTERVAL_SECONDS = 1200
STATE_UPDATE_INTERVAL_SECONDS = 60 STATE_UPDATE_INTERVAL_SECONDS = 60
LOG_INTERVAL_SECONDS = 3 LOG_INTERVAL_SECONDS = 3
class BackgroundTaskManager: class BackgroundTaskManager:
"""管理 Heartflow 的后台周期性任务。""" """管理 Heartflow 的后台周期性任务。"""
@@ -32,14 +33,13 @@ class BackgroundTaskManager:
mai_state_info: MaiStateInfo, # Needs current state info mai_state_info: MaiStateInfo, # Needs current state info
mai_state_manager: MaiStateManager, mai_state_manager: MaiStateManager,
subheartflow_manager: SubHeartflowManager, subheartflow_manager: SubHeartflowManager,
interest_logger: InterestLogger interest_logger: InterestLogger,
): ):
self.mai_state_info = mai_state_info self.mai_state_info = mai_state_info
self.mai_state_manager = mai_state_manager self.mai_state_manager = mai_state_manager
self.subheartflow_manager = subheartflow_manager self.subheartflow_manager = subheartflow_manager
self.interest_logger = interest_logger self.interest_logger = interest_logger
# Task references # Task references
self._state_update_task: Optional[asyncio.Task] = None self._state_update_task: Optional[asyncio.Task] = None
self._cleanup_task: Optional[asyncio.Task] = None self._cleanup_task: Optional[asyncio.Task] = None
@@ -100,7 +100,7 @@ class BackgroundTaskManager:
] ]
# 统一启动所有任务 # 统一启动所有任务
for task_func,log_level, log_msg, task_attr_name in task_configs: for task_func, log_level, log_msg, task_attr_name in task_configs:
# 检查任务变量是否存在且未完成 # 检查任务变量是否存在且未完成
current_task_var = getattr(self, task_attr_name) current_task_var = getattr(self, task_attr_name)
if current_task_var is None or current_task_var.done(): if current_task_var is None or current_task_var.done():

View File

@@ -14,6 +14,7 @@ from src.heart_flow.background_tasks import BackgroundTaskManager # Import Back
logger = get_logger("heartflow") logger = get_logger("heartflow")
class Heartflow: class Heartflow:
"""主心流协调器,负责初始化并协调各个子系统: """主心流协调器,负责初始化并协调各个子系统:
- 状态管理 (MaiState) - 状态管理 (MaiState)

View File

@@ -5,6 +5,7 @@ from typing import List, Tuple, Optional
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
from src.plugins.moods.moods import MoodManager from src.plugins.moods.moods import MoodManager
from src.config.config import global_config from src.config.config import global_config
logger = get_logger("mai_state") logger = get_logger("mai_state")
@@ -19,13 +20,12 @@ base_normal_chat_num = global_config.base_normal_chat_num
base_focused_chat_num = global_config.base_focused_chat_num base_focused_chat_num = global_config.base_focused_chat_num
MAX_NORMAL_CHAT_NUM_PEEKING = int(base_normal_chat_num / 2)
MAX_NORMAL_CHAT_NUM_PEEKING = int(base_normal_chat_num/2)
MAX_NORMAL_CHAT_NUM_NORMAL = base_normal_chat_num MAX_NORMAL_CHAT_NUM_NORMAL = base_normal_chat_num
MAX_NORMAL_CHAT_NUM_FOCUSED = base_normal_chat_num + 1 MAX_NORMAL_CHAT_NUM_FOCUSED = base_normal_chat_num + 1
# 不同状态下专注聊天的最大消息数 # 不同状态下专注聊天的最大消息数
MAX_FOCUSED_CHAT_NUM_PEEKING = int(base_focused_chat_num/2) MAX_FOCUSED_CHAT_NUM_PEEKING = int(base_focused_chat_num / 2)
MAX_FOCUSED_CHAT_NUM_NORMAL = base_focused_chat_num MAX_FOCUSED_CHAT_NUM_NORMAL = base_focused_chat_num
MAX_FOCUSED_CHAT_NUM_FOCUSED = base_focused_chat_num + 2 MAX_FOCUSED_CHAT_NUM_FOCUSED = base_focused_chat_num + 2

View File

@@ -5,7 +5,6 @@ import time
from typing import Optional, List, Dict, Tuple, Callable, Coroutine from typing import Optional, List, Dict, Tuple, Callable, Coroutine
import traceback import traceback
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
import random
from src.plugins.chat.message import MessageRecv from src.plugins.chat.message import MessageRecv
from src.plugins.chat.chat_stream import chat_manager from src.plugins.chat.chat_stream import chat_manager
import math import math
@@ -153,8 +152,6 @@ class InterestChatting:
"above_threshold": self.above_threshold, "above_threshold": self.above_threshold,
} }
# --- 新增后台更新任务相关方法 --- # --- 新增后台更新任务相关方法 ---
async def _run_update_loop(self, update_interval: float = 1.0): async def _run_update_loop(self, update_interval: float = 1.0):
"""后台循环,定期更新兴趣和回复概率。""" """后台循环,定期更新兴趣和回复概率。"""

View File

@@ -29,7 +29,8 @@ logger = get_logger("subheartflow_manager")
# 子心流管理相关常量 # 子心流管理相关常量
INACTIVE_THRESHOLD_SECONDS = 3600 # 子心流不活跃超时时间(秒) INACTIVE_THRESHOLD_SECONDS = 3600 # 子心流不活跃超时时间(秒)
NORMAL_CHAT_TIMEOUT_SECONDS = 30 * 60 # 30分钟 NORMAL_CHAT_TIMEOUT_SECONDS = 30 * 60 # 30分钟
class SubHeartflowManager: class SubHeartflowManager:
"""管理所有活跃的 SubHeartflow 实例。""" """管理所有活跃的 SubHeartflow 实例。"""
@@ -341,13 +342,12 @@ class SubHeartflowManager:
async with self._lock: async with self._lock:
# 1. 筛选出所有 ABSENT 状态的子心流 # 1. 筛选出所有 ABSENT 状态的子心流
absent_subflows = [ absent_subflows = [
hf for hf in self.subheartflows.values() hf for hf in self.subheartflows.values() if hf.chat_state.chat_status == ChatState.ABSENT
if hf.chat_state.chat_status == ChatState.ABSENT
] ]
if not absent_subflows: if not absent_subflows:
logger.debug("没有摸鱼的子心流可以评估。") # 日志太频繁,注释掉 logger.debug("没有摸鱼的子心流可以评估。") # 日志太频繁,注释掉
return # 没有目标,直接返回 return # 没有目标,直接返回
# 2. 随机选一个幸运儿 # 2. 随机选一个幸运儿
sub_hf_to_evaluate = random.choice(absent_subflows) sub_hf_to_evaluate = random.choice(absent_subflows)
@@ -358,8 +358,10 @@ class SubHeartflowManager:
# 3. 检查 CHAT 上限 # 3. 检查 CHAT 上限
current_chat_count = self.count_subflows_by_state_nolock(ChatState.CHAT) current_chat_count = self.count_subflows_by_state_nolock(ChatState.CHAT)
if current_chat_count >= chat_limit: if current_chat_count >= chat_limit:
logger.debug(f"{log_prefix} 想看看能不能聊,但是聊天太多了, ({current_chat_count}/{chat_limit}) 满了。") logger.debug(
return # 满了,这次就算了 f"{log_prefix} 想看看能不能聊,但是聊天太多了, ({current_chat_count}/{chat_limit}) 满了。"
)
return # 满了,这次就算了
# --- 获取 FOCUSED 计数 --- # --- 获取 FOCUSED 计数 ---
current_focused_count = self.count_subflows_by_state_nolock(ChatState.FOCUSED) current_focused_count = self.count_subflows_by_state_nolock(ChatState.FOCUSED)
@@ -369,7 +371,7 @@ class SubHeartflowManager:
chatting_group_names = [] chatting_group_names = []
focused_group_names = [] focused_group_names = []
for flow_id, hf in self.subheartflows.items(): for flow_id, hf in self.subheartflows.items():
stream_name = chat_manager.get_stream_name(flow_id) or str(flow_id) # 保证有名字 stream_name = chat_manager.get_stream_name(flow_id) or str(flow_id) # 保证有名字
if hf.chat_state.chat_status == ChatState.CHAT: if hf.chat_state.chat_status == ChatState.CHAT:
chatting_group_names.append(stream_name) chatting_group_names.append(stream_name)
elif hf.chat_state.chat_status == ChatState.FOCUSED: elif hf.chat_state.chat_status == ChatState.FOCUSED:
@@ -384,30 +386,34 @@ class SubHeartflowManager:
mai_state_description = f"你当前状态: {current_mai_state.value}" mai_state_description = f"你当前状态: {current_mai_state.value}"
individuality = Individuality.get_instance() individuality = Individuality.get_instance()
personality_prompt = individuality.get_prompt(x_person=2, level = 2) personality_prompt = individuality.get_prompt(x_person=2, level=2)
prompt_personality = f"你正在扮演名为{individuality.name}的人类,{personality_prompt}" prompt_personality = f"你正在扮演名为{individuality.name}的人类,{personality_prompt}"
# --- 修改:在 prompt 中加入当前聊天计数和群名信息 (条件显示) --- # --- 修改:在 prompt 中加入当前聊天计数和群名信息 (条件显示) ---
chat_status_lines = [] chat_status_lines = []
if chatting_group_names: if chatting_group_names:
chat_status_lines.append(f"正在闲聊 ({current_chat_count}/{chat_limit}): {', '.join(chatting_group_names)}") chat_status_lines.append(
f"正在闲聊 ({current_chat_count}/{chat_limit}): {', '.join(chatting_group_names)}"
)
if focused_group_names: if focused_group_names:
chat_status_lines.append(f"正在专注 ({current_focused_count}/{focused_limit}): {', '.join(focused_group_names)}") chat_status_lines.append(
f"正在专注 ({current_focused_count}/{focused_limit}): {', '.join(focused_group_names)}"
)
chat_status_prompt = "当前没有在任何群聊中。" # 默认消息喵~ chat_status_prompt = "当前没有在任何群聊中。" # 默认消息喵~
if chat_status_lines: if chat_status_lines:
chat_status_prompt = "当前聊天情况:\n" + "\n".join(chat_status_lines) # 拼接状态信息 chat_status_prompt = "当前聊天情况:\n" + "\n".join(chat_status_lines) # 拼接状态信息
prompt = ( prompt = (
f"{prompt_personality}\\n" f"{prompt_personality}\\n"
f"你当前没在 [{stream_name}] 群聊天。\\n" f"你当前没在 [{stream_name}] 群聊天。\\n"
f"{mai_state_description}\\n" f"{mai_state_description}\\n"
f"{chat_status_prompt}\\n" # <-- 喵!用了新的状态信息~ f"{chat_status_prompt}\\n" # <-- 喵!用了新的状态信息~
f"{_observation_summary}\\n---\\n" f"{_observation_summary}\\n---\\n"
f"基于以上信息,你想不想开始在这个群闲聊?\\n" f"基于以上信息,你想不想开始在这个群闲聊?\\n"
f"请说明理由,并以 JSON 格式回答,包含 'decision' (布尔值) 和 'reason' (字符串)。\\n" f"请说明理由,并以 JSON 格式回答,包含 'decision' (布尔值) 和 'reason' (字符串)。\\n"
f'例如:{{\"decision\": true, \"reason\": \"看起来挺热闹的,插个话\"}}\\n' f'例如:{{"decision": true, "reason": "看起来挺热闹的,插个话"}}\\n'
f'例如:{{\"decision\": false, \"reason\": \"已经聊了好多,休息一下\"}}\\n' f'例如:{{"decision": false, "reason": "已经聊了好多,休息一下"}}\\n'
f"请只输出有效的 JSON 对象。" f"请只输出有效的 JSON 对象。"
) )
# --- 结束修改 --- # --- 结束修改 ---
@@ -417,11 +423,11 @@ class SubHeartflowManager:
if yao_kai_shi_liao_ma is None: if yao_kai_shi_liao_ma is None:
logger.debug(f"{log_prefix} 问AI想不想聊失败了这次算了。") logger.debug(f"{log_prefix} 问AI想不想聊失败了这次算了。")
return # 评估失败,结束 return # 评估失败,结束
if not yao_kai_shi_liao_ma: if not yao_kai_shi_liao_ma:
logger.info(f"{log_prefix} 现在不想聊这个群。") logger.info(f"{log_prefix} 现在不想聊这个群。")
return # 不想聊,结束 return # 不想聊,结束
# --- 5. AI想聊再次检查额度并尝试转换 --- # --- 5. AI想聊再次检查额度并尝试转换 ---
# 再次检查以防万一 # 再次检查以防万一
@@ -435,7 +441,9 @@ class SubHeartflowManager:
if sub_hf_to_evaluate.chat_state.chat_status == ChatState.CHAT: if sub_hf_to_evaluate.chat_state.chat_status == ChatState.CHAT:
logger.debug(f"{log_prefix} 成功进入聊天状态!本次评估圆满结束。") logger.debug(f"{log_prefix} 成功进入聊天状态!本次评估圆满结束。")
else: else:
logger.warning(f"{log_prefix} 奇怪,尝试进入聊天状态失败了。当前状态: {sub_hf_to_evaluate.chat_state.chat_status.value}") logger.warning(
f"{log_prefix} 奇怪,尝试进入聊天状态失败了。当前状态: {sub_hf_to_evaluate.chat_state.chat_status.value}"
)
else: else:
logger.warning( logger.warning(
f"{log_prefix} AI说想聊但是刚问完就没空位了 ({current_chat_count_before_change}/{chat_limit})。真不巧,下次再说吧。" f"{log_prefix} AI说想聊但是刚问完就没空位了 ({current_chat_count_before_change}/{chat_limit})。真不巧,下次再说吧。"
@@ -483,15 +491,19 @@ class SubHeartflowManager:
if time_since_last_bb > NORMAL_CHAT_TIMEOUT_SECONDS: if time_since_last_bb > NORMAL_CHAT_TIMEOUT_SECONDS:
should_deactivate = True should_deactivate = True
reason = f"超过 {NORMAL_CHAT_TIMEOUT_SECONDS / 60:.0f} 分钟没 BB" reason = f"超过 {NORMAL_CHAT_TIMEOUT_SECONDS / 60:.0f} 分钟没 BB"
logger.info(f"{log_prefix} 检测到超时 ({reason}),准备转为 ABSENT。上次活动时间: {last_bot_dong_zuo_time:.0f}") logger.info(
f"{log_prefix} 检测到超时 ({reason}),准备转为 ABSENT。上次活动时间: {last_bot_dong_zuo_time:.0f}"
)
# else: # else:
# logger.debug(f"{log_prefix} Bot活动时间未超时 ({time_since_last_bb:.0f}s < {NORMAL_CHAT_TIMEOUT_SECONDS}s),保持 CHAT 状态。") # logger.debug(f"{log_prefix} Bot活动时间未超时 ({time_since_last_bb:.0f}s < {NORMAL_CHAT_TIMEOUT_SECONDS}s),保持 CHAT 状态。")
# else: # else:
# 如果没有记录到Bot的活动时间暂时不因为超时而转换状态 # 如果没有记录到Bot的活动时间暂时不因为超时而转换状态
# logger.debug(f"{log_prefix} 未找到有效的 Bot 最后活动时间记录,不执行超时检查。") # logger.debug(f"{log_prefix} 未找到有效的 Bot 最后活动时间记录,不执行超时检查。")
except AttributeError: except AttributeError:
logger.error(f"{log_prefix} 无法获取 Bot 最后 BB 时间,请确保 SubHeartflow 相关实现正确。跳过超时检查。") logger.error(
f"{log_prefix} 无法获取 Bot 最后 BB 时间,请确保 SubHeartflow 相关实现正确。跳过超时检查。"
)
except Exception as e: except Exception as e:
logger.error(f"{log_prefix} 检查 Bot 超时状态时出错: {e}", exc_info=True) logger.error(f"{log_prefix} 检查 Bot 超时状态时出错: {e}", exc_info=True)
@@ -507,9 +519,12 @@ class SubHeartflowManager:
logger.warning(f"{log_prefix} 尝试因超时转换为 ABSENT 失败。") logger.warning(f"{log_prefix} 尝试因超时转换为 ABSENT 失败。")
if transitioned_to_absent > 0: if transitioned_to_absent > 0:
logger.info(f"{log_prefix_task} 完成,共检查 {checked_count} 个子心流,{transitioned_to_absent} 个因超时转为 ABSENT。") logger.info(
f"{log_prefix_task} 完成,共检查 {checked_count} 个子心流,{transitioned_to_absent} 个因超时转为 ABSENT。"
)
# else: # else:
# logger.debug(f"{log_prefix_task} 完成,共检查 {checked_count} 个子心流,无超时转换。") # logger.debug(f"{log_prefix_task} 完成,共检查 {checked_count} 个子心流,无超时转换。")
# --- 结束新增 --- # --- 结束新增 ---
async def _llm_evaluate_state_transition(self, prompt: str) -> Optional[bool]: async def _llm_evaluate_state_transition(self, prompt: str) -> Optional[bool]:
@@ -653,7 +668,7 @@ class SubHeartflowManager:
# 仅当子心流处于 FOCUSED 状态时才进行转换 # 仅当子心流处于 FOCUSED 状态时才进行转换
# 因为 HeartFChatting 只在 FOCUSED 状态下运行 # 因为 HeartFChatting 只在 FOCUSED 状态下运行
if current_state == ChatState.FOCUSED: if current_state == ChatState.FOCUSED:
target_state = ChatState.ABSENT # 默认目标状态 target_state = ChatState.ABSENT # 默认目标状态
log_reason = "默认转换" log_reason = "默认转换"
# 决定是去 ABSENT 还是 CHAT # 决定是去 ABSENT 还是 CHAT
@@ -671,14 +686,20 @@ class SubHeartflowManager:
if current_chat_count < chat_limit: if current_chat_count < chat_limit:
target_state = ChatState.CHAT target_state = ChatState.CHAT
log_reason = f"随机选择 CHAT (当前 {current_chat_count}/{chat_limit})" log_reason = f"随机选择 CHAT (当前 {current_chat_count}/{chat_limit})"
logger.debug(f"[状态转换请求] {stream_name} ({current_state.value}) 随机决定进入 CHAT未达上限 ({current_chat_count}/{chat_limit})") logger.debug(
f"[状态转换请求] {stream_name} ({current_state.value}) 随机决定进入 CHAT未达上限 ({current_chat_count}/{chat_limit})"
)
else: else:
target_state = ChatState.ABSENT target_state = ChatState.ABSENT
log_reason = f"随机选择 CHAT 但已达上限 ({current_chat_count}/{chat_limit}),转为 ABSENT" log_reason = f"随机选择 CHAT 但已达上限 ({current_chat_count}/{chat_limit}),转为 ABSENT"
logger.debug(f"[状态转换请求] {stream_name} ({current_state.value}) 随机决定进入 CHAT但已达上限 ({current_chat_count}/{chat_limit}),改为进入 ABSENT") logger.debug(
f"[状态转换请求] {stream_name} ({current_state.value}) 随机决定进入 CHAT但已达上限 ({current_chat_count}/{chat_limit}),改为进入 ABSENT"
)
#开始转换 # 开始转换
logger.info(f"[状态转换请求] 接收到请求,将 {stream_name} (当前: {current_state.value}) 尝试转换为 {target_state.value} ({log_reason})") logger.info(
f"[状态转换请求] 接收到请求,将 {stream_name} (当前: {current_state.value}) 尝试转换为 {target_state.value} ({log_reason})"
)
try: try:
await subflow.change_chat_state(target_state) await subflow.change_chat_state(target_state)
# 检查最终状态 # 检查最终状态
@@ -686,9 +707,13 @@ class SubHeartflowManager:
if final_state == target_state: if final_state == target_state:
logger.debug(f"[状态转换请求] {stream_name} 状态已成功转换为 {final_state.value}") logger.debug(f"[状态转换请求] {stream_name} 状态已成功转换为 {final_state.value}")
else: else:
logger.warning(f"[状态转换请求] 尝试将 {stream_name} 转换为 {target_state.value} 后,状态实际为 {final_state.value}") logger.warning(
f"[状态转换请求] 尝试将 {stream_name} 转换为 {target_state.value} 后,状态实际为 {final_state.value}"
)
except Exception as e: except Exception as e:
logger.error(f"[状态转换请求] 转换 {stream_name}{target_state.value} 时出错: {e}", exc_info=True) logger.error(
f"[状态转换请求] 转换 {stream_name}{target_state.value} 时出错: {e}", exc_info=True
)
elif current_state == ChatState.ABSENT: elif current_state == ChatState.ABSENT:
logger.debug(f"[状态转换请求] {stream_name} 已处于 ABSENT 状态,无需转换") logger.debug(f"[状态转换请求] {stream_name} 已处于 ABSENT 状态,无需转换")
else: else:

View File

@@ -257,11 +257,11 @@ class ImageManager:
frame = gif.convert("RGB") frame = gif.convert("RGB")
all_frames.append(frame.copy()) all_frames.append(frame.copy())
except EOFError: except EOFError:
pass # 读完啦 pass # 读完啦
if not all_frames: if not all_frames:
logger.warning("GIF中没有找到任何帧") logger.warning("GIF中没有找到任何帧")
return None # 空的GIF直接返回None return None # 空的GIF直接返回None
# --- 新的帧选择逻辑 --- # --- 新的帧选择逻辑 ---
selected_frames = [] selected_frames = []
@@ -295,8 +295,8 @@ class ImageManager:
# 如果选择后连一帧都没有比如GIF只有一帧且后续处理失败或者原始GIF就没帧也返回None # 如果选择后连一帧都没有比如GIF只有一帧且后续处理失败或者原始GIF就没帧也返回None
if not selected_frames: if not selected_frames:
logger.warning("处理后没有选中任何帧") logger.warning("处理后没有选中任何帧")
return None return None
# logger.debug(f"总帧数: {len(all_frames)}, 选中帧数: {len(selected_frames)}") # logger.debug(f"总帧数: {len(all_frames)}, 选中帧数: {len(selected_frames)}")
@@ -307,14 +307,13 @@ class ImageManager:
target_height = 200 # 固定高度 target_height = 200 # 固定高度
# 防止除以零 # 防止除以零
if frame_height == 0: if frame_height == 0:
logger.error("帧高度为0无法计算缩放尺寸") logger.error("帧高度为0无法计算缩放尺寸")
return None return None
target_width = int((target_height / frame_height) * frame_width) target_width = int((target_height / frame_height) * frame_width)
# 宽度也不能是0 # 宽度也不能是0
if target_width == 0: if target_width == 0:
logger.warning(f"计算出的目标宽度为0 (原始尺寸 {frame_width}x{frame_height})调整为1") logger.warning(f"计算出的目标宽度为0 (原始尺寸 {frame_width}x{frame_height})调整为1")
target_width = 1 target_width = 1
# 调整所有选中帧的大小 # 调整所有选中帧的大小
resized_frames = [ resized_frames = [
@@ -325,13 +324,12 @@ class ImageManager:
total_width = target_width * len(resized_frames) total_width = target_width * len(resized_frames)
# 防止总宽度为0 # 防止总宽度为0
if total_width == 0 and len(resized_frames) > 0: if total_width == 0 and len(resized_frames) > 0:
logger.warning("计算出的总宽度为0但有选中帧可能目标宽度太小") logger.warning("计算出的总宽度为0但有选中帧可能目标宽度太小")
# 至少给点宽度吧 # 至少给点宽度吧
total_width = len(resized_frames) total_width = len(resized_frames)
elif total_width == 0: elif total_width == 0:
logger.error("计算出的总宽度为0且无选中帧") logger.error("计算出的总宽度为0且无选中帧")
return None return None
combined_image = Image.new("RGB", (total_width, target_height)) combined_image = Image.new("RGB", (total_width, target_height))
@@ -341,17 +339,17 @@ class ImageManager:
# 转换为base64 # 转换为base64
buffer = io.BytesIO() buffer = io.BytesIO()
combined_image.save(buffer, format="JPEG", quality=85) # 保存为JPEG combined_image.save(buffer, format="JPEG", quality=85) # 保存为JPEG
result_base64 = base64.b64encode(buffer.getvalue()).decode("utf-8") result_base64 = base64.b64encode(buffer.getvalue()).decode("utf-8")
return result_base64 return result_base64
except MemoryError: except MemoryError:
logger.error("GIF转换失败: 内存不足可能是GIF太大或帧数太多") logger.error("GIF转换失败: 内存不足可能是GIF太大或帧数太多")
return None # 内存不够啦 return None # 内存不够啦
except Exception as e: except Exception as e:
logger.error(f"GIF转换失败: {str(e)}", exc_info=True) # 记录详细错误信息 logger.error(f"GIF转换失败: {str(e)}", exc_info=True) # 记录详细错误信息
return None # 其他错误也返回None return None # 其他错误也返回None
# 创建全局单例 # 创建全局单例

View File

@@ -32,7 +32,7 @@ from src.individuality.individuality import Individuality
WAITING_TIME_THRESHOLD = 300 # 等待新消息时间阈值,单位秒 WAITING_TIME_THRESHOLD = 300 # 等待新消息时间阈值,单位秒
EMOJI_SEND_PRO = 0.3 # 设置一个概率,比如 30% 才真的发 EMOJI_SEND_PRO = 0.3 # 设置一个概率,比如 30% 才真的发
CONSECUTIVE_NO_REPLY_THRESHOLD = 3 # 连续不回复的阈值 CONSECUTIVE_NO_REPLY_THRESHOLD = 3 # 连续不回复的阈值
@@ -982,13 +982,14 @@ class HeartFChatting:
# --- 新增:概率性忽略文本回复附带的表情(正确的位置)--- # --- 新增:概率性忽略文本回复附带的表情(正确的位置)---
if action == "text_reply" and emoji_query: if action == "text_reply" and emoji_query:
logger.debug(f"{self.log_prefix}[Planner] 大模型想让麦麦发文字时带表情: '{emoji_query}'") logger.debug(f"{self.log_prefix}[Planner] 大模型想让麦麦发文字时带表情: '{emoji_query}'")
# 掷骰子看看要不要听它的 # 掷骰子看看要不要听它的
if random.random() > EMOJI_SEND_PRO: if random.random() > EMOJI_SEND_PRO:
logger.info(f"{self.log_prefix}[Planner] 但是麦麦这次不想加表情 ({1-EMOJI_SEND_PRO:.0%}),忽略表情 '{emoji_query}'") logger.info(
emoji_query = "" # 把表情请求清空,就不发了 f"{self.log_prefix}[Planner] 但是麦麦这次不想加表情 ({1 - EMOJI_SEND_PRO:.0%}),忽略表情 '{emoji_query}'"
)
emoji_query = "" # 把表情请求清空,就不发了
else: else:
logger.info(f"{self.log_prefix}[Planner] 好吧,加上表情 '{emoji_query}'") logger.info(f"{self.log_prefix}[Planner] 好吧,加上表情 '{emoji_query}'")
# --- 结束:概率性忽略 --- # --- 结束:概率性忽略 ---

View File

@@ -44,7 +44,5 @@ class MemoryConfig:
consolidate_memory_percentage=getattr(global_config, "consolidate_memory_percentage", 0.01), consolidate_memory_percentage=getattr(global_config, "consolidate_memory_percentage", 0.01),
consolidate_memory_interval=getattr(global_config, "consolidate_memory_interval", 1000), consolidate_memory_interval=getattr(global_config, "consolidate_memory_interval", 1000),
llm_topic_judge=getattr(global_config, "llm_topic_judge", "default_judge_model"), # 添加默认模型名 llm_topic_judge=getattr(global_config, "llm_topic_judge", "default_judge_model"), # 添加默认模型名
llm_summary=getattr( llm_summary=getattr(global_config, "llm_summary", "default_summary_model"), # 添加默认模型名
global_config, "llm_summary", "default_summary_model"
), # 添加默认模型名
) )