ruff fix
This commit is contained in:
@@ -170,7 +170,6 @@ class BotConfig:
|
||||
SCHEDULE_TEMPERATURE: float = 0.5 # 日程表温度,建议0.5-1.0
|
||||
TIME_ZONE: str = "Asia/Shanghai" # 时区
|
||||
|
||||
|
||||
# chat
|
||||
allow_focus_mode: bool = True # 是否允许专注聊天状态
|
||||
|
||||
@@ -181,7 +180,6 @@ class BotConfig:
|
||||
|
||||
message_buffer: bool = True # 消息缓冲器
|
||||
|
||||
|
||||
ban_words = set()
|
||||
ban_msgs_regex = set()
|
||||
|
||||
@@ -190,11 +188,9 @@ class BotConfig:
|
||||
default_decay_rate_per_second: float = 0.98 # 默认衰减率,越大衰减越慢
|
||||
consecutive_no_reply_threshold = 3
|
||||
|
||||
|
||||
compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
|
||||
compress_length_limit: int = 5 # 最多压缩份数,超过该数值的压缩上下文会被删除
|
||||
|
||||
|
||||
# normal_chat
|
||||
model_reasoning_probability: float = 0.7 # 麦麦回答时选择推理模型(主要)模型概率
|
||||
model_normal_probability: float = 0.3 # 麦麦回答时选择一般模型(次要)模型概率
|
||||
@@ -434,7 +430,6 @@ class BotConfig:
|
||||
config.emoji_chance = normal_chat_config.get("emoji_chance", config.emoji_chance)
|
||||
config.thinking_timeout = normal_chat_config.get("thinking_timeout", config.thinking_timeout)
|
||||
|
||||
|
||||
config.willing_mode = normal_chat_config.get("willing_mode", config.willing_mode)
|
||||
config.response_willing_amplifier = normal_chat_config.get(
|
||||
"response_willing_amplifier", config.response_willing_amplifier
|
||||
@@ -457,9 +452,7 @@ class BotConfig:
|
||||
def focus_chat(parent: dict):
|
||||
focus_chat_config = parent["focus_chat"]
|
||||
config.compressed_length = focus_chat_config.get("compressed_length", config.compressed_length)
|
||||
config.compress_length_limit = focus_chat_config.get(
|
||||
"compress_length_limit", config.compress_length_limit
|
||||
)
|
||||
config.compress_length_limit = focus_chat_config.get("compress_length_limit", config.compress_length_limit)
|
||||
config.reply_trigger_threshold = focus_chat_config.get(
|
||||
"reply_trigger_threshold", config.reply_trigger_threshold
|
||||
)
|
||||
@@ -470,7 +463,6 @@ class BotConfig:
|
||||
"consecutive_no_reply_threshold", config.consecutive_no_reply_threshold
|
||||
)
|
||||
|
||||
|
||||
def model(parent: dict):
|
||||
# 加载模型配置
|
||||
model_config: dict = parent["model"]
|
||||
@@ -634,7 +626,9 @@ class BotConfig:
|
||||
"enable_kaomoji_protection", config.enable_kaomoji_protection
|
||||
)
|
||||
if config.INNER_VERSION in SpecifierSet(">=1.6.0"):
|
||||
config.model_max_output_length = response_splitter_config.get("model_max_output_length", config.model_max_output_length)
|
||||
config.model_max_output_length = response_splitter_config.get(
|
||||
"model_max_output_length", config.model_max_output_length
|
||||
)
|
||||
|
||||
def groups(parent: dict):
|
||||
groups_config = parent["groups"]
|
||||
|
||||
@@ -24,6 +24,7 @@ CLEANUP_INTERVAL_SECONDS = 1200
|
||||
STATE_UPDATE_INTERVAL_SECONDS = 60
|
||||
LOG_INTERVAL_SECONDS = 3
|
||||
|
||||
|
||||
class BackgroundTaskManager:
|
||||
"""管理 Heartflow 的后台周期性任务。"""
|
||||
|
||||
@@ -32,14 +33,13 @@ class BackgroundTaskManager:
|
||||
mai_state_info: MaiStateInfo, # Needs current state info
|
||||
mai_state_manager: MaiStateManager,
|
||||
subheartflow_manager: SubHeartflowManager,
|
||||
interest_logger: InterestLogger
|
||||
interest_logger: InterestLogger,
|
||||
):
|
||||
self.mai_state_info = mai_state_info
|
||||
self.mai_state_manager = mai_state_manager
|
||||
self.subheartflow_manager = subheartflow_manager
|
||||
self.interest_logger = interest_logger
|
||||
|
||||
|
||||
# Task references
|
||||
self._state_update_task: Optional[asyncio.Task] = None
|
||||
self._cleanup_task: Optional[asyncio.Task] = None
|
||||
@@ -100,7 +100,7 @@ class BackgroundTaskManager:
|
||||
]
|
||||
|
||||
# 统一启动所有任务
|
||||
for task_func,log_level, log_msg, task_attr_name in task_configs:
|
||||
for task_func, log_level, log_msg, task_attr_name in task_configs:
|
||||
# 检查任务变量是否存在且未完成
|
||||
current_task_var = getattr(self, task_attr_name)
|
||||
if current_task_var is None or current_task_var.done():
|
||||
|
||||
@@ -14,6 +14,7 @@ from src.heart_flow.background_tasks import BackgroundTaskManager # Import Back
|
||||
|
||||
logger = get_logger("heartflow")
|
||||
|
||||
|
||||
class Heartflow:
|
||||
"""主心流协调器,负责初始化并协调各个子系统:
|
||||
- 状态管理 (MaiState)
|
||||
|
||||
@@ -5,6 +5,7 @@ from typing import List, Tuple, Optional
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.plugins.moods.moods import MoodManager
|
||||
from src.config.config import global_config
|
||||
|
||||
logger = get_logger("mai_state")
|
||||
|
||||
|
||||
@@ -19,13 +20,12 @@ base_normal_chat_num = global_config.base_normal_chat_num
|
||||
base_focused_chat_num = global_config.base_focused_chat_num
|
||||
|
||||
|
||||
|
||||
MAX_NORMAL_CHAT_NUM_PEEKING = int(base_normal_chat_num/2)
|
||||
MAX_NORMAL_CHAT_NUM_PEEKING = int(base_normal_chat_num / 2)
|
||||
MAX_NORMAL_CHAT_NUM_NORMAL = base_normal_chat_num
|
||||
MAX_NORMAL_CHAT_NUM_FOCUSED = base_normal_chat_num + 1
|
||||
|
||||
# 不同状态下专注聊天的最大消息数
|
||||
MAX_FOCUSED_CHAT_NUM_PEEKING = int(base_focused_chat_num/2)
|
||||
MAX_FOCUSED_CHAT_NUM_PEEKING = int(base_focused_chat_num / 2)
|
||||
MAX_FOCUSED_CHAT_NUM_NORMAL = base_focused_chat_num
|
||||
MAX_FOCUSED_CHAT_NUM_FOCUSED = base_focused_chat_num + 2
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import time
|
||||
from typing import Optional, List, Dict, Tuple, Callable, Coroutine
|
||||
import traceback
|
||||
from src.common.logger_manager import get_logger
|
||||
import random
|
||||
from src.plugins.chat.message import MessageRecv
|
||||
from src.plugins.chat.chat_stream import chat_manager
|
||||
import math
|
||||
@@ -153,8 +152,6 @@ class InterestChatting:
|
||||
"above_threshold": self.above_threshold,
|
||||
}
|
||||
|
||||
|
||||
|
||||
# --- 新增后台更新任务相关方法 ---
|
||||
async def _run_update_loop(self, update_interval: float = 1.0):
|
||||
"""后台循环,定期更新兴趣和回复概率。"""
|
||||
|
||||
@@ -31,6 +31,7 @@ logger = get_logger("subheartflow_manager")
|
||||
INACTIVE_THRESHOLD_SECONDS = 3600 # 子心流不活跃超时时间(秒)
|
||||
NORMAL_CHAT_TIMEOUT_SECONDS = 30 * 60 # 30分钟
|
||||
|
||||
|
||||
class SubHeartflowManager:
|
||||
"""管理所有活跃的 SubHeartflow 实例。"""
|
||||
|
||||
@@ -341,8 +342,7 @@ class SubHeartflowManager:
|
||||
async with self._lock:
|
||||
# 1. 筛选出所有 ABSENT 状态的子心流
|
||||
absent_subflows = [
|
||||
hf for hf in self.subheartflows.values()
|
||||
if hf.chat_state.chat_status == ChatState.ABSENT
|
||||
hf for hf in self.subheartflows.values() if hf.chat_state.chat_status == ChatState.ABSENT
|
||||
]
|
||||
|
||||
if not absent_subflows:
|
||||
@@ -358,7 +358,9 @@ class SubHeartflowManager:
|
||||
# 3. 检查 CHAT 上限
|
||||
current_chat_count = self.count_subflows_by_state_nolock(ChatState.CHAT)
|
||||
if current_chat_count >= chat_limit:
|
||||
logger.debug(f"{log_prefix} 想看看能不能聊,但是聊天太多了, ({current_chat_count}/{chat_limit}) 满了。")
|
||||
logger.debug(
|
||||
f"{log_prefix} 想看看能不能聊,但是聊天太多了, ({current_chat_count}/{chat_limit}) 满了。"
|
||||
)
|
||||
return # 满了,这次就算了
|
||||
|
||||
# --- 获取 FOCUSED 计数 ---
|
||||
@@ -384,15 +386,19 @@ class SubHeartflowManager:
|
||||
|
||||
mai_state_description = f"你当前状态: {current_mai_state.value}。"
|
||||
individuality = Individuality.get_instance()
|
||||
personality_prompt = individuality.get_prompt(x_person=2, level = 2)
|
||||
personality_prompt = individuality.get_prompt(x_person=2, level=2)
|
||||
prompt_personality = f"你正在扮演名为{individuality.name}的人类,{personality_prompt}"
|
||||
|
||||
# --- 修改:在 prompt 中加入当前聊天计数和群名信息 (条件显示) ---
|
||||
chat_status_lines = []
|
||||
if chatting_group_names:
|
||||
chat_status_lines.append(f"正在闲聊 ({current_chat_count}/{chat_limit}): {', '.join(chatting_group_names)}")
|
||||
chat_status_lines.append(
|
||||
f"正在闲聊 ({current_chat_count}/{chat_limit}): {', '.join(chatting_group_names)}"
|
||||
)
|
||||
if focused_group_names:
|
||||
chat_status_lines.append(f"正在专注 ({current_focused_count}/{focused_limit}): {', '.join(focused_group_names)}")
|
||||
chat_status_lines.append(
|
||||
f"正在专注 ({current_focused_count}/{focused_limit}): {', '.join(focused_group_names)}"
|
||||
)
|
||||
|
||||
chat_status_prompt = "当前没有在任何群聊中。" # 默认消息喵~
|
||||
if chat_status_lines:
|
||||
@@ -406,8 +412,8 @@ class SubHeartflowManager:
|
||||
f"{_observation_summary}\\n---\\n"
|
||||
f"基于以上信息,你想不想开始在这个群闲聊?\\n"
|
||||
f"请说明理由,并以 JSON 格式回答,包含 'decision' (布尔值) 和 'reason' (字符串)。\\n"
|
||||
f'例如:{{\"decision\": true, \"reason\": \"看起来挺热闹的,插个话\"}}\\n'
|
||||
f'例如:{{\"decision\": false, \"reason\": \"已经聊了好多,休息一下\"}}\\n'
|
||||
f'例如:{{"decision": true, "reason": "看起来挺热闹的,插个话"}}\\n'
|
||||
f'例如:{{"decision": false, "reason": "已经聊了好多,休息一下"}}\\n'
|
||||
f"请只输出有效的 JSON 对象。"
|
||||
)
|
||||
# --- 结束修改 ---
|
||||
@@ -435,7 +441,9 @@ class SubHeartflowManager:
|
||||
if sub_hf_to_evaluate.chat_state.chat_status == ChatState.CHAT:
|
||||
logger.debug(f"{log_prefix} 成功进入聊天状态!本次评估圆满结束。")
|
||||
else:
|
||||
logger.warning(f"{log_prefix} 奇怪,尝试进入聊天状态失败了。当前状态: {sub_hf_to_evaluate.chat_state.chat_status.value}")
|
||||
logger.warning(
|
||||
f"{log_prefix} 奇怪,尝试进入聊天状态失败了。当前状态: {sub_hf_to_evaluate.chat_state.chat_status.value}"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"{log_prefix} AI说想聊,但是刚问完就没空位了 ({current_chat_count_before_change}/{chat_limit})。真不巧,下次再说吧。"
|
||||
@@ -483,7 +491,9 @@ class SubHeartflowManager:
|
||||
if time_since_last_bb > NORMAL_CHAT_TIMEOUT_SECONDS:
|
||||
should_deactivate = True
|
||||
reason = f"超过 {NORMAL_CHAT_TIMEOUT_SECONDS / 60:.0f} 分钟没 BB"
|
||||
logger.info(f"{log_prefix} 检测到超时 ({reason}),准备转为 ABSENT。上次活动时间: {last_bot_dong_zuo_time:.0f}")
|
||||
logger.info(
|
||||
f"{log_prefix} 检测到超时 ({reason}),准备转为 ABSENT。上次活动时间: {last_bot_dong_zuo_time:.0f}"
|
||||
)
|
||||
# else:
|
||||
# logger.debug(f"{log_prefix} Bot活动时间未超时 ({time_since_last_bb:.0f}s < {NORMAL_CHAT_TIMEOUT_SECONDS}s),保持 CHAT 状态。")
|
||||
# else:
|
||||
@@ -491,7 +501,9 @@ class SubHeartflowManager:
|
||||
# logger.debug(f"{log_prefix} 未找到有效的 Bot 最后活动时间记录,不执行超时检查。")
|
||||
|
||||
except AttributeError:
|
||||
logger.error(f"{log_prefix} 无法获取 Bot 最后 BB 时间,请确保 SubHeartflow 相关实现正确。跳过超时检查。")
|
||||
logger.error(
|
||||
f"{log_prefix} 无法获取 Bot 最后 BB 时间,请确保 SubHeartflow 相关实现正确。跳过超时检查。"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"{log_prefix} 检查 Bot 超时状态时出错: {e}", exc_info=True)
|
||||
|
||||
@@ -507,9 +519,12 @@ class SubHeartflowManager:
|
||||
logger.warning(f"{log_prefix} 尝试因超时转换为 ABSENT 失败。")
|
||||
|
||||
if transitioned_to_absent > 0:
|
||||
logger.info(f"{log_prefix_task} 完成,共检查 {checked_count} 个子心流,{transitioned_to_absent} 个因超时转为 ABSENT。")
|
||||
logger.info(
|
||||
f"{log_prefix_task} 完成,共检查 {checked_count} 个子心流,{transitioned_to_absent} 个因超时转为 ABSENT。"
|
||||
)
|
||||
# else:
|
||||
# logger.debug(f"{log_prefix_task} 完成,共检查 {checked_count} 个子心流,无超时转换。")
|
||||
|
||||
# --- 结束新增 ---
|
||||
|
||||
async def _llm_evaluate_state_transition(self, prompt: str) -> Optional[bool]:
|
||||
@@ -671,14 +686,20 @@ class SubHeartflowManager:
|
||||
if current_chat_count < chat_limit:
|
||||
target_state = ChatState.CHAT
|
||||
log_reason = f"随机选择 CHAT (当前 {current_chat_count}/{chat_limit})"
|
||||
logger.debug(f"[状态转换请求] {stream_name} ({current_state.value}) 随机决定进入 CHAT,未达上限 ({current_chat_count}/{chat_limit})")
|
||||
logger.debug(
|
||||
f"[状态转换请求] {stream_name} ({current_state.value}) 随机决定进入 CHAT,未达上限 ({current_chat_count}/{chat_limit})"
|
||||
)
|
||||
else:
|
||||
target_state = ChatState.ABSENT
|
||||
log_reason = f"随机选择 CHAT 但已达上限 ({current_chat_count}/{chat_limit}),转为 ABSENT"
|
||||
logger.debug(f"[状态转换请求] {stream_name} ({current_state.value}) 随机决定进入 CHAT,但已达上限 ({current_chat_count}/{chat_limit}),改为进入 ABSENT")
|
||||
logger.debug(
|
||||
f"[状态转换请求] {stream_name} ({current_state.value}) 随机决定进入 CHAT,但已达上限 ({current_chat_count}/{chat_limit}),改为进入 ABSENT"
|
||||
)
|
||||
|
||||
#开始转换
|
||||
logger.info(f"[状态转换请求] 接收到请求,将 {stream_name} (当前: {current_state.value}) 尝试转换为 {target_state.value} ({log_reason})")
|
||||
# 开始转换
|
||||
logger.info(
|
||||
f"[状态转换请求] 接收到请求,将 {stream_name} (当前: {current_state.value}) 尝试转换为 {target_state.value} ({log_reason})"
|
||||
)
|
||||
try:
|
||||
await subflow.change_chat_state(target_state)
|
||||
# 检查最终状态
|
||||
@@ -686,9 +707,13 @@ class SubHeartflowManager:
|
||||
if final_state == target_state:
|
||||
logger.debug(f"[状态转换请求] {stream_name} 状态已成功转换为 {final_state.value}")
|
||||
else:
|
||||
logger.warning(f"[状态转换请求] 尝试将 {stream_name} 转换为 {target_state.value} 后,状态实际为 {final_state.value}")
|
||||
logger.warning(
|
||||
f"[状态转换请求] 尝试将 {stream_name} 转换为 {target_state.value} 后,状态实际为 {final_state.value}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"[状态转换请求] 转换 {stream_name} 到 {target_state.value} 时出错: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"[状态转换请求] 转换 {stream_name} 到 {target_state.value} 时出错: {e}", exc_info=True
|
||||
)
|
||||
elif current_state == ChatState.ABSENT:
|
||||
logger.debug(f"[状态转换请求] {stream_name} 已处于 ABSENT 状态,无需转换")
|
||||
else:
|
||||
|
||||
@@ -315,7 +315,6 @@ class ImageManager:
|
||||
logger.warning(f"计算出的目标宽度为0 (原始尺寸 {frame_width}x{frame_height}),调整为1")
|
||||
target_width = 1
|
||||
|
||||
|
||||
# 调整所有选中帧的大小
|
||||
resized_frames = [
|
||||
frame.resize((target_width, target_height), Image.Resampling.LANCZOS) for frame in selected_frames
|
||||
@@ -332,7 +331,6 @@ class ImageManager:
|
||||
logger.error("计算出的总宽度为0且无选中帧")
|
||||
return None
|
||||
|
||||
|
||||
combined_image = Image.new("RGB", (total_width, target_height))
|
||||
|
||||
# 水平拼接图像
|
||||
|
||||
@@ -982,12 +982,13 @@ class HeartFChatting:
|
||||
|
||||
# --- 新增:概率性忽略文本回复附带的表情(正确的位置)---
|
||||
|
||||
|
||||
if action == "text_reply" and emoji_query:
|
||||
logger.debug(f"{self.log_prefix}[Planner] 大模型想让麦麦发文字时带表情: '{emoji_query}'")
|
||||
# 掷骰子看看要不要听它的
|
||||
if random.random() > EMOJI_SEND_PRO:
|
||||
logger.info(f"{self.log_prefix}[Planner] 但是麦麦这次不想加表情 ({1-EMOJI_SEND_PRO:.0%}),忽略表情 '{emoji_query}'")
|
||||
logger.info(
|
||||
f"{self.log_prefix}[Planner] 但是麦麦这次不想加表情 ({1 - EMOJI_SEND_PRO:.0%}),忽略表情 '{emoji_query}'"
|
||||
)
|
||||
emoji_query = "" # 把表情请求清空,就不发了
|
||||
else:
|
||||
logger.info(f"{self.log_prefix}[Planner] 好吧,加上表情 '{emoji_query}'")
|
||||
|
||||
@@ -44,7 +44,5 @@ class MemoryConfig:
|
||||
consolidate_memory_percentage=getattr(global_config, "consolidate_memory_percentage", 0.01),
|
||||
consolidate_memory_interval=getattr(global_config, "consolidate_memory_interval", 1000),
|
||||
llm_topic_judge=getattr(global_config, "llm_topic_judge", "default_judge_model"), # 添加默认模型名
|
||||
llm_summary=getattr(
|
||||
global_config, "llm_summary", "default_summary_model"
|
||||
), # 添加默认模型名
|
||||
llm_summary=getattr(global_config, "llm_summary", "default_summary_model"), # 添加默认模型名
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user