fix: improve logger

This commit is contained in:
SengokuCola
2025-04-13 21:42:08 +08:00
parent 4c21785eb5
commit b97c2b320d
8 changed files with 41 additions and 64 deletions

View File

@@ -38,7 +38,7 @@ class ChatBot:
async def _ensure_started(self):
"""确保所有任务已启动"""
if not self._started:
logger.info("确保ChatBot所有任务已启动")
logger.trace("确保ChatBot所有任务已启动")
self._started = True
@@ -84,7 +84,7 @@ class ChatBot:
message = MessageRecv(message_data)
groupinfo = message.message_info.group_info
userinfo = message.message_info.user_info
logger.debug(f"处理消息:{str(message_data)[:120]}...")
logger.trace(f"处理消息:{str(message_data)[:120]}...")
if userinfo.user_id in global_config.ban_user_id:
logger.debug(f"用户{userinfo.user_id}被禁止回复")

View File

@@ -70,9 +70,9 @@ class Message_Sender:
thinking_start_time=message.thinking_start_time,
is_emoji=message.is_emoji,
)
logger.debug(f"{message.processed_plain_text},{typing_time},计算输入时间结束")
logger.trace(f"{message.processed_plain_text},{typing_time},计算输入时间结束")
await asyncio.sleep(typing_time)
logger.debug(f"{message.processed_plain_text},{typing_time},等待输入时间结束")
logger.trace(f"{message.processed_plain_text},{typing_time},等待输入时间结束")
message_json = message.to_dict()

View File

@@ -184,24 +184,24 @@ class ThinkFlowChat:
heartflow.create_subheartflow(chat.stream_id)
await message.process()
logger.debug(f"消息处理成功{message.processed_plain_text}")
logger.trace(f"消息处理成功{message.processed_plain_text}")
# 过滤词/正则表达式过滤
if self._check_ban_words(message.processed_plain_text, chat, userinfo) or self._check_ban_regex(
message.raw_message, chat, userinfo
):
return
logger.debug(f"过滤词/正则表达式过滤成功{message.processed_plain_text}")
logger.trace(f"过滤词/正则表达式过滤成功{message.processed_plain_text}")
await self.storage.store_message(message, chat)
logger.debug(f"存储成功{message.processed_plain_text}")
logger.trace(f"存储成功{message.processed_plain_text}")
# 记忆激活
with Timer("记忆激活", timing_results):
interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
message.processed_plain_text, fast_retrieval=True
)
logger.debug(f"记忆激活: {interested_rate}")
logger.trace(f"记忆激活: {interested_rate}")
# 查询缓冲器结果会整合前面跳过的消息改变processed_plain_text
buffer_result = await message_buffer.query_buffer_result(message)

View File

@@ -1301,7 +1301,7 @@ class Hippocampus:
# 对每个关键词进行扩散式检索
for keyword in valid_keywords:
logger.debug(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
logger.trace(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
# 初始化激活值
activation_values = {keyword: 1.0}
# 记录已访问的节点
@@ -1352,7 +1352,7 @@ class Hippocampus:
# 计算激活节点数与总节点数的比值
total_activation = sum(activate_map.values())
logger.info(f"总激活值: {total_activation:.2f}")
logger.trace(f"总激活值: {total_activation:.2f}")
total_nodes = len(self.memory_graph.G.nodes())
# activated_nodes = len(activate_map)
activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0

View File

@@ -264,17 +264,17 @@ class PersonInfoManager:
msg_interval = int(round(np.percentile(filtered, 80)))
await self.update_one_field(person_id, "msg_interval", msg_interval)
logger.debug(f"用户{person_id}的msg_interval已经被更新为{msg_interval}")
logger.trace(f"用户{person_id}的msg_interval已经被更新为{msg_interval}")
except Exception as e:
logger.debug(f"用户{person_id}消息间隔计算失败: {type(e).__name__}: {str(e)}")
logger.trace(f"用户{person_id}消息间隔计算失败: {type(e).__name__}: {str(e)}")
continue
# 其他...
if msg_interval_map:
logger.info("已保存分布图到: logs/person_info")
logger.trace("已保存分布图到: logs/person_info")
current_time = datetime.datetime.now()
logger.info(f"个人信息推断结束: {current_time.strftime('%Y-%m-%d %H:%M:%S')}")
logger.trace(f"个人信息推断结束: {current_time.strftime('%Y-%m-%d %H:%M:%S')}")
await asyncio.sleep(86400)
except Exception as e: