fix: 将日志级别从trace更改为debug
This commit is contained in:
@@ -75,7 +75,7 @@ async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool]:
|
|||||||
message.processed_plain_text,
|
message.processed_plain_text,
|
||||||
fast_retrieval=True,
|
fast_retrieval=True,
|
||||||
)
|
)
|
||||||
logger.trace(f"记忆激活率: {interested_rate:.2f}")
|
logger.debug(f"记忆激活率: {interested_rate:.2f}")
|
||||||
|
|
||||||
text_len = len(message.processed_plain_text)
|
text_len = len(message.processed_plain_text)
|
||||||
# 根据文本长度调整兴趣度,长度越大兴趣度越高,但增长率递减,最低0.01,最高0.05
|
# 根据文本长度调整兴趣度,长度越大兴趣度越高,但增长率递减,最低0.01,最高0.05
|
||||||
|
|||||||
@@ -287,7 +287,7 @@ class ChattingObservation(Observation):
|
|||||||
|
|
||||||
# print(f"构建中:self.person_list: {self.person_list}")
|
# print(f"构建中:self.person_list: {self.person_list}")
|
||||||
|
|
||||||
logger.trace(
|
logger.debug(
|
||||||
f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}"
|
f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -409,7 +409,7 @@ class Hippocampus:
|
|||||||
activation_values[neighbor] = new_activation
|
activation_values[neighbor] = new_activation
|
||||||
visited_nodes.add(neighbor)
|
visited_nodes.add(neighbor)
|
||||||
nodes_to_process.append((neighbor, new_activation, current_depth + 1))
|
nodes_to_process.append((neighbor, new_activation, current_depth + 1))
|
||||||
logger.trace(
|
logger.debug(
|
||||||
f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})"
|
f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})"
|
||||||
) # noqa: E501
|
) # noqa: E501
|
||||||
|
|
||||||
@@ -580,7 +580,7 @@ class Hippocampus:
|
|||||||
activation_values[neighbor] = new_activation
|
activation_values[neighbor] = new_activation
|
||||||
visited_nodes.add(neighbor)
|
visited_nodes.add(neighbor)
|
||||||
nodes_to_process.append((neighbor, new_activation, current_depth + 1))
|
nodes_to_process.append((neighbor, new_activation, current_depth + 1))
|
||||||
logger.trace(
|
logger.debug(
|
||||||
f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})"
|
f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})"
|
||||||
) # noqa: E501
|
) # noqa: E501
|
||||||
|
|
||||||
@@ -733,7 +733,7 @@ class Hippocampus:
|
|||||||
|
|
||||||
# 对每个关键词进行扩散式检索
|
# 对每个关键词进行扩散式检索
|
||||||
for keyword in valid_keywords:
|
for keyword in valid_keywords:
|
||||||
logger.trace(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
|
logger.debug(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
|
||||||
# 初始化激活值
|
# 初始化激活值
|
||||||
activation_values = {keyword: 1.0}
|
activation_values = {keyword: 1.0}
|
||||||
# 记录已访问的节点
|
# 记录已访问的节点
|
||||||
@@ -784,7 +784,7 @@ class Hippocampus:
|
|||||||
|
|
||||||
# 计算激活节点数与总节点数的比值
|
# 计算激活节点数与总节点数的比值
|
||||||
total_activation = sum(activate_map.values())
|
total_activation = sum(activate_map.values())
|
||||||
logger.trace(f"总激活值: {total_activation:.2f}")
|
logger.debug(f"总激活值: {total_activation:.2f}")
|
||||||
total_nodes = len(self.memory_graph.G.nodes())
|
total_nodes = len(self.memory_graph.G.nodes())
|
||||||
# activated_nodes = len(activate_map)
|
# activated_nodes = len(activate_map)
|
||||||
activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0
|
activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0
|
||||||
@@ -1605,8 +1605,8 @@ class ParahippocampalGyrus:
|
|||||||
|
|
||||||
if similarity >= similarity_threshold:
|
if similarity >= similarity_threshold:
|
||||||
logger.debug(f"[整合] 节点 '{node}' 中发现相似项 (相似度: {similarity:.2f}):")
|
logger.debug(f"[整合] 节点 '{node}' 中发现相似项 (相似度: {similarity:.2f}):")
|
||||||
logger.trace(f" - '{item1}'")
|
logger.debug(f" - '{item1}'")
|
||||||
logger.trace(f" - '{item2}'")
|
logger.debug(f" - '{item2}'")
|
||||||
|
|
||||||
# 比较信息量
|
# 比较信息量
|
||||||
info1 = calculate_information_content(item1)
|
info1 = calculate_information_content(item1)
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ class ChatBot:
|
|||||||
async def _ensure_started(self):
|
async def _ensure_started(self):
|
||||||
"""确保所有任务已启动"""
|
"""确保所有任务已启动"""
|
||||||
if not self._started:
|
if not self._started:
|
||||||
logger.trace("确保ChatBot所有任务已启动")
|
logger.debug("确保ChatBot所有任务已启动")
|
||||||
|
|
||||||
self._started = True
|
self._started = True
|
||||||
|
|
||||||
@@ -166,23 +166,23 @@ class ChatBot:
|
|||||||
template_group_name = None
|
template_group_name = None
|
||||||
|
|
||||||
async def preprocess():
|
async def preprocess():
|
||||||
logger.trace("开始预处理消息...")
|
logger.debug("开始预处理消息...")
|
||||||
# 如果在私聊中
|
# 如果在私聊中
|
||||||
if group_info is None:
|
if group_info is None:
|
||||||
logger.trace("检测到私聊消息")
|
logger.debug("检测到私聊消息")
|
||||||
if global_config.experimental.pfc_chatting:
|
if global_config.experimental.pfc_chatting:
|
||||||
logger.trace("进入PFC私聊处理流程")
|
logger.debug("进入PFC私聊处理流程")
|
||||||
# 创建聊天流
|
# 创建聊天流
|
||||||
logger.trace(f"为{user_info.user_id}创建/获取聊天流")
|
logger.debug(f"为{user_info.user_id}创建/获取聊天流")
|
||||||
await self.only_process_chat.process_message(message)
|
await self.only_process_chat.process_message(message)
|
||||||
await self._create_pfc_chat(message)
|
await self._create_pfc_chat(message)
|
||||||
# 禁止PFC,进入普通的心流消息处理逻辑
|
# 禁止PFC,进入普通的心流消息处理逻辑
|
||||||
else:
|
else:
|
||||||
logger.trace("进入普通心流私聊处理")
|
logger.debug("进入普通心流私聊处理")
|
||||||
await self.heartflow_message_receiver.process_message(message_data)
|
await self.heartflow_message_receiver.process_message(message_data)
|
||||||
# 群聊默认进入心流消息处理逻辑
|
# 群聊默认进入心流消息处理逻辑
|
||||||
else:
|
else:
|
||||||
logger.trace(f"检测到群聊消息,群ID: {group_info.group_id}")
|
logger.debug(f"检测到群聊消息,群ID: {group_info.group_id}")
|
||||||
await self.heartflow_message_receiver.process_message(message_data)
|
await self.heartflow_message_receiver.process_message(message_data)
|
||||||
|
|
||||||
if template_group_name:
|
if template_group_name:
|
||||||
|
|||||||
@@ -41,9 +41,9 @@ async def send_message(
|
|||||||
thinking_start_time=message.thinking_start_time,
|
thinking_start_time=message.thinking_start_time,
|
||||||
is_emoji=message.is_emoji,
|
is_emoji=message.is_emoji,
|
||||||
)
|
)
|
||||||
# logger.trace(f"{message.processed_plain_text},{typing_time},计算输入时间结束") # 减少日志
|
# logger.debug(f"{message.processed_plain_text},{typing_time},计算输入时间结束") # 减少日志
|
||||||
await asyncio.sleep(typing_time)
|
await asyncio.sleep(typing_time)
|
||||||
# logger.trace(f"{message.processed_plain_text},{typing_time},等待输入时间结束") # 减少日志
|
# logger.debug(f"{message.processed_plain_text},{typing_time},等待输入时间结束") # 减少日志
|
||||||
# --- 结束打字延迟 ---
|
# --- 结束打字延迟 ---
|
||||||
|
|
||||||
message_preview = truncate_message(message.processed_plain_text)
|
message_preview = truncate_message(message.processed_plain_text)
|
||||||
|
|||||||
@@ -328,7 +328,7 @@ def process_llm_response(text: str) -> list[str]:
|
|||||||
# 先保护颜文字
|
# 先保护颜文字
|
||||||
if global_config.response_splitter.enable_kaomoji_protection:
|
if global_config.response_splitter.enable_kaomoji_protection:
|
||||||
protected_text, kaomoji_mapping = protect_kaomoji(text)
|
protected_text, kaomoji_mapping = protect_kaomoji(text)
|
||||||
logger.trace(f"保护颜文字后的文本: {protected_text}")
|
logger.debug(f"保护颜文字后的文本: {protected_text}")
|
||||||
else:
|
else:
|
||||||
protected_text = text
|
protected_text = text
|
||||||
kaomoji_mapping = {}
|
kaomoji_mapping = {}
|
||||||
|
|||||||
@@ -228,7 +228,7 @@ class ImageManager:
|
|||||||
description=description,
|
description=description,
|
||||||
timestamp=current_timestamp,
|
timestamp=current_timestamp,
|
||||||
)
|
)
|
||||||
logger.trace(f"保存图片元数据: {file_path}")
|
logger.debug(f"保存图片元数据: {file_path}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"保存图片文件或元数据失败: {str(e)}")
|
logger.error(f"保存图片文件或元数据失败: {str(e)}")
|
||||||
|
|
||||||
@@ -288,7 +288,7 @@ class ImageManager:
|
|||||||
# 计算和上一张选中帧的差异(均方误差 MSE)
|
# 计算和上一张选中帧的差异(均方误差 MSE)
|
||||||
if last_selected_frame_np is not None:
|
if last_selected_frame_np is not None:
|
||||||
mse = np.mean((current_frame_np - last_selected_frame_np) ** 2)
|
mse = np.mean((current_frame_np - last_selected_frame_np) ** 2)
|
||||||
# logger.trace(f"帧 {i} 与上一选中帧的 MSE: {mse}") # 可以取消注释来看差异值
|
# logger.debug(f"帧 {i} 与上一选中帧的 MSE: {mse}") # 可以取消注释来看差异值
|
||||||
|
|
||||||
# 如果差异够大,就选它!
|
# 如果差异够大,就选它!
|
||||||
if mse > similarity_threshold:
|
if mse > similarity_threshold:
|
||||||
|
|||||||
@@ -183,7 +183,7 @@ class LLMRequest:
|
|||||||
status="success",
|
status="success",
|
||||||
timestamp=datetime.now(), # Peewee 会处理 DateTimeField
|
timestamp=datetime.now(), # Peewee 会处理 DateTimeField
|
||||||
)
|
)
|
||||||
logger.trace(
|
logger.debug(
|
||||||
f"Token使用情况 - 模型: {self.model_name}, "
|
f"Token使用情况 - 模型: {self.model_name}, "
|
||||||
f"用户: {user_id}, 类型: {request_type}, "
|
f"用户: {user_id}, 类型: {request_type}, "
|
||||||
f"提示词: {prompt_tokens}, 完成: {completion_tokens}, "
|
f"提示词: {prompt_tokens}, 完成: {completion_tokens}, "
|
||||||
|
|||||||
@@ -459,7 +459,7 @@ class PersonInfoManager:
|
|||||||
if field_name not in PersonInfo._meta.fields:
|
if field_name not in PersonInfo._meta.fields:
|
||||||
if field_name in person_info_default:
|
if field_name in person_info_default:
|
||||||
result[field_name] = copy.deepcopy(person_info_default[field_name])
|
result[field_name] = copy.deepcopy(person_info_default[field_name])
|
||||||
logger.trace(f"字段'{field_name}'不在Peewee模型中,使用默认配置值。")
|
logger.debug(f"字段'{field_name}'不在Peewee模型中,使用默认配置值。")
|
||||||
else:
|
else:
|
||||||
logger.debug(f"get_values查询失败:字段'{field_name}'未在Peewee模型和默认配置中定义。")
|
logger.debug(f"get_values查询失败:字段'{field_name}'未在Peewee模型和默认配置中定义。")
|
||||||
result[field_name] = None
|
result[field_name] = None
|
||||||
|
|||||||
Reference in New Issue
Block a user