better:优化logger显示

This commit is contained in:
SengokuCola
2025-05-28 22:24:08 +08:00
parent 9bb2fe2d52
commit 6e39965c56
13 changed files with 93 additions and 75 deletions

View File

@@ -635,7 +635,7 @@ class EmojiManager:
"""获取所有表情包并初始化为MaiEmoji类对象更新 self.emoji_objects"""
try:
self._ensure_db()
logger.info("[数据库] 开始加载所有表情包记录 (Peewee)...")
logger.debug("[数据库] 开始加载所有表情包记录 (Peewee)...")
emoji_peewee_instances = Emoji.select()
emoji_objects, load_errors = _to_emoji_objects(emoji_peewee_instances)

View File

@@ -87,8 +87,6 @@ class BackgroundTaskManager:
),
]
)
else:
logger.info("聊天模式为 normal跳过启动清理任务、私聊激活任务和专注评估任务")
# 统一启动所有任务
for task_func, log_level, log_msg, task_attr_name in task_configs:

View File

@@ -78,18 +78,13 @@ class SubHeartflow:
logger.debug(
f"SubHeartflow {self.chat_id} initialized: is_group={self.is_group_chat}, target_info={self.chat_target_info}"
)
# --- End using utility function ---
# Initialize interest system (existing logic)
# await self.interest_chatting.initialize()
# logger.debug(f"{self.log_prefix} InterestChatting 实例已初始化。")
# 根据配置决定初始状态
if global_config.chat.chat_mode == "focus":
logger.info(f"{self.log_prefix} 配置为 focus 模式,将直接尝试进入 FOCUSED 状态。")
logger.debug(f"{self.log_prefix} 配置为 focus 模式,将直接尝试进入 FOCUSED 状态。")
await self.change_chat_state(ChatState.FOCUSED)
else: # "auto" 或其他模式保持原有逻辑或默认为 NORMAL
logger.info(f"{self.log_prefix} 配置为 auto 或其他模式,将尝试进入 NORMAL 状态。")
logger.debug(f"{self.log_prefix} 配置为 auto 或其他模式,将尝试进入 NORMAL 状态。")
await self.change_chat_state(ChatState.NORMAL)
def update_last_chat_state_time(self):
@@ -281,9 +276,9 @@ class SubHeartflow:
self.update_last_chat_state_time()
self.history_chat_state.append((current_state, self.chat_state_last_time))
logger.info(
f"{log_prefix} 麦麦的聊天状态从 {current_state.value} (持续了 {int(self.chat_state_last_time)} 秒) 变更为 {new_state.value}"
)
# logger.info(
# f"{log_prefix} 麦麦的聊天状态从 {current_state.value} (持续了 {int(self.chat_state_last_time)} 秒) 变更为 {new_state.value}"
# )
self.chat_state.chat_status = new_state
self.chat_state_last_time = 0

View File

@@ -223,8 +223,9 @@ class MessageManager:
# f"[message.apply_set_reply_logic:{message.apply_set_reply_logic},message.is_head:{message.is_head},thinking_messages_count:{thinking_messages_count},thinking_messages_length:{thinking_messages_length},message.is_private_message():{message.is_private_message()}]"
# )
if (
message.apply_set_reply_logic # 检查标记
and message.is_head
# message.apply_set_reply_logic # 检查标记
# and message.is_head
message.is_head
and (thinking_messages_count > 3 or thinking_messages_length > 200)
and not message.is_private_message()
):

View File

@@ -39,6 +39,8 @@ class NormalChat:
self.chat_target_info: Optional[dict] = None
self.willing_amplifier = 1
self.start_time = time.time()
# Other sync initializations
self.gpt = NormalChatGenerator()
@@ -56,6 +58,8 @@ class NormalChat:
self._disabled = False # 增加停用标志
async def initialize(self):
"""异步初始化,获取聊天类型和目标信息。"""
if self._initialized:
@@ -64,7 +68,7 @@ class NormalChat:
self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.stream_id)
self.stream_name = chat_manager.get_stream_name(self.stream_id) or self.stream_id
self._initialized = True
logger.info(f"[{self.stream_name}] NormalChat 实例 initialize 完成 (异步部分)。")
logger.debug(f"[{self.stream_name}] NormalChat 初始化完成 (异步部分)。")
# 改为实例方法
async def _create_thinking_message(self, message: MessageRecv, timestamp: Optional[float] = None) -> str:
@@ -208,7 +212,11 @@ class NormalChat:
for msg_id, (message, interest_value, is_mentioned) in items_to_process:
try:
# 处理消息
self.adjust_reply_frequency()
if time.time() - self.start_time > 600:
self.adjust_reply_frequency(duration=600/60)
else:
self.adjust_reply_frequency(duration=(time.time() - self.start_time)/60)
await self.normal_response(
message=message,
@@ -256,7 +264,7 @@ class NormalChat:
logger.info(
f"[{mes_name}]"
f"{message.message_info.user_info.user_nickname}:" # 使用 self.chat_stream
f"{message.processed_plain_text}[回复概率:{reply_probability * 100:.1f}%]"
f"{message.processed_plain_text}[兴趣:{interested_rate:.2f}][回复概率:{reply_probability * 100:.1f}%]"
)
do_reply = False
response_set = None # 初始化 response_set
@@ -304,7 +312,7 @@ class NormalChat:
willing_manager.delete(message.message_info.message_id)
return # 不执行后续步骤
logger.info(f"[{self.stream_name}] 回复内容: {response_set}")
# logger.info(f"[{self.stream_name}] 回复内容: {response_set}")
if self._disabled:
logger.info(f"[{self.stream_name}] 已停用,忽略 normal_response。")
@@ -357,7 +365,7 @@ class NormalChat:
trigger_msg = message.processed_plain_text
response_msg = " ".join(response_set)
logger.info(
f"[{self.stream_name}] 触发消息: {trigger_msg[:20]}... | 推理消息: {response_msg[:20]}... | 性能计时: {timing_str}"
f"[{self.stream_name}]回复消息: {trigger_msg[:30]}... | 回复内容: {response_msg[:30]}... | 计时: {timing_str}"
)
elif not do_reply:
# 不回复处理
@@ -376,7 +384,7 @@ class NormalChat:
self._disabled = False # 启动时重置停用标志
if self._chat_task is None or self._chat_task.done():
logger.info(f"[{self.stream_name}] 开始处理兴趣消息...")
# logger.info(f"[{self.stream_name}] 开始处理兴趣消息...")
polling_task = asyncio.create_task(self._reply_interested_message())
polling_task.add_done_callback(lambda t: self._handle_task_completion(t))
self._chat_task = polling_task
@@ -483,21 +491,33 @@ class NormalChat:
调整回复频率
"""
# 获取最近30分钟内的消息统计
print(f"willing_amplifier: {self.willing_amplifier}")
stats = get_recent_message_stats(minutes=duration, chat_id=self.stream_id)
bot_reply_count = stats["bot_reply_count"]
print(f"[{self.stream_name}] 最近{duration}分钟内回复数量: {bot_reply_count}")
total_message_count = stats["total_message_count"]
print(f"[{self.stream_name}] 最近{duration}分钟内消息总数: {total_message_count}")
if total_message_count == 0:
return
logger.debug(f"[{self.stream_name}]({self.willing_amplifier}) 最近{duration}分钟 回复数量: {bot_reply_count},消息总数: {total_message_count}")
# 计算回复频率
_reply_frequency = bot_reply_count / total_message_count
differ = global_config.normal_chat.talk_frequency - (bot_reply_count / duration)
# 如果回复频率低于0.5,增加回复概率
if bot_reply_count / duration < global_config.normal_chat.talk_frequency:
# differ = global_config.normal_chat.talk_frequency - reply_frequency
logger.info(f"[{self.stream_name}] 回复频率低于{global_config.normal_chat.talk_frequency},增加回复概率")
self.willing_amplifier += 0.1
else:
logger.info(f"[{self.stream_name}] 回复频率高于{global_config.normal_chat.talk_frequency},减少回复概率")
self.willing_amplifier -= 0.1
if differ > 0.1:
mapped = 1 + (differ - 0.1) * 4 / 0.9
mapped = max(1, min(5, mapped))
logger.info(f"[{self.stream_name}] 回复频率低于{global_config.normal_chat.talk_frequency}增加回复概率differ={differ:.3f},映射值={mapped:.2f}")
self.willing_amplifier += mapped * 0.1 # 你可以根据实际需要调整系数
elif differ < -0.1:
mapped = 1 - (differ + 0.1) * 4 / 0.9
mapped = max(1, min(5, mapped))
logger.info(f"[{self.stream_name}] 回复频率高于{global_config.normal_chat.talk_frequency}减少回复概率differ={differ:.3f},映射值={mapped:.2f}")
self.willing_amplifier -= mapped * 0.1
if self.willing_amplifier > 5:
self.willing_amplifier = 5
elif self.willing_amplifier < 0.1:
self.willing_amplifier = 0.1

View File

@@ -11,7 +11,7 @@ from src.chat.utils.info_catcher import info_catcher_manager
from src.person_info.person_info import person_info_manager
logger = get_logger("llm")
logger = get_logger("normal_chat_response")
class NormalChatGenerator:
@@ -40,25 +40,25 @@ class NormalChatGenerator:
"""根据当前模型类型选择对应的生成函数"""
# 从global_config中获取模型概率值并选择模型
if random.random() < global_config.normal_chat.normal_chat_first_probability:
self.current_model_type = "深深地"
current_model = self.model_reasoning
self.current_model_name = current_model.model_name
else:
self.current_model_type = "浅浅的"
current_model = self.model_normal
self.current_model_name = current_model.model_name
logger.info(
f"{self.current_model_type}思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
f"{self.current_model_name}思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
) # noqa: E501
model_response = await self._generate_response_with_model(message, current_model, thinking_id)
if model_response:
logger.info(f"{global_config.bot.nickname}的回复是:{model_response}")
logger.debug(f"{global_config.bot.nickname}原始回复是:{model_response}")
model_response = await self._process_response(model_response)
return model_response
else:
logger.info(f"{self.current_model_type}思考,失败")
logger.info(f"{self.current_model_name}思考,失败")
return None
async def _generate_response_with_model(self, message: MessageThinking, model: LLMRequest, thinking_id: str):

View File

@@ -100,7 +100,7 @@ class InfoCatcher:
time_end = message_end.message_info.time
chat_id = message_start.chat_stream.stream_id
print(f"查询参数: time_start={time_start}, time_end={time_end}, chat_id={chat_id}")
# print(f"查询参数: time_start={time_start}, time_end={time_end}, chat_id={chat_id}")
messages_between_query = (
Messages.select()
@@ -109,10 +109,10 @@ class InfoCatcher:
)
result = list(messages_between_query)
print(f"查询结果数量: {len(result)}")
if result:
print(f"第一条消息时间: {result[0].time}")
print(f"最后一条消息时间: {result[-1].time}")
# print(f"查询结果数量: {len(result)}")
# if result:
# print(f"第一条消息时间: {result[0].time}")
# print(f"最后一条消息时间: {result[-1].time}")
return result
except Exception as e:
print(f"获取消息时出错: {str(e)}")