fix:修复循环异常思索

This commit is contained in:
SengokuCola
2025-06-15 13:44:21 +08:00
parent 83ae078d6e
commit 11c36ef9e3
5 changed files with 241 additions and 8 deletions

View File

@@ -326,6 +326,23 @@ class HeartFChatting:
break break
except Exception as e: except Exception as e:
logger.error(f"{self.log_prefix} 处理上下文时出错: {e}") logger.error(f"{self.log_prefix} 处理上下文时出错: {e}")
# 为当前循环设置错误状态,防止后续重复报错
error_loop_info = {
"loop_observation_info": {},
"loop_processor_info": {},
"loop_plan_info": {
"action_result": {
"action_type": "error",
"action_data": {},
"reasoning": f"上下文处理失败: {e}"
},
"observed_messages": ""
},
"loop_action_info": {"action_taken": False, "reply_text": "", "command": ""},
}
self._current_cycle_detail.set_loop_info(error_loop_info)
self._current_cycle_detail.complete_cycle()
# 上下文处理失败,跳过当前循环 # 上下文处理失败,跳过当前循环
await asyncio.sleep(1) await asyncio.sleep(1)
continue continue
@@ -373,7 +390,7 @@ class HeartFChatting:
logger.info( logger.info(
f"{self.log_prefix}{self._current_cycle_detail.cycle_id}次思考," f"{self.log_prefix}{self._current_cycle_detail.cycle_id}次思考,"
f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒, " f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒, "
f"动作: {self._current_cycle_detail.loop_plan_info['action_result']['action_type']}" f"动作: {self._current_cycle_detail.loop_plan_info.get('action_result', {}).get('action_type', '未知动作')}"
+ (f"\n详情: {'; '.join(timer_strings)}" if timer_strings else "") + (f"\n详情: {'; '.join(timer_strings)}" if timer_strings else "")
+ processor_time_log + processor_time_log
) )
@@ -386,6 +403,28 @@ class HeartFChatting:
except Exception as e: except Exception as e:
logger.error(f"{self.log_prefix} 循环处理时出错: {e}") logger.error(f"{self.log_prefix} 循环处理时出错: {e}")
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())
# 如果_current_cycle_detail存在但未完成为其设置错误状态
if self._current_cycle_detail and not hasattr(self._current_cycle_detail, 'end_time'):
error_loop_info = {
"loop_observation_info": {},
"loop_processor_info": {},
"loop_plan_info": {
"action_result": {
"action_type": "error",
"action_data": {},
"reasoning": f"循环处理失败: {e}"
},
"observed_messages": ""
},
"loop_action_info": {"action_taken": False, "reply_text": "", "command": ""},
}
try:
self._current_cycle_detail.set_loop_info(error_loop_info)
self._current_cycle_detail.complete_cycle()
except Exception as inner_e:
logger.error(f"{self.log_prefix} 设置错误状态时出错: {inner_e}")
await asyncio.sleep(1) # 出错后等待一秒再继续 await asyncio.sleep(1) # 出错后等待一秒再继续
except asyncio.CancelledError: except asyncio.CancelledError:
@@ -580,7 +619,14 @@ class HeartFChatting:
return { return {
"loop_observation_info": {}, "loop_observation_info": {},
"loop_processor_info": {}, "loop_processor_info": {},
"loop_plan_info": {}, "loop_plan_info": {
"action_result": {
"action_type": "error",
"action_data": {},
"reasoning": f"处理失败: {e}"
},
"observed_messages": ""
},
"loop_action_info": {"action_taken": False, "reply_text": "", "command": ""}, "loop_action_info": {"action_taken": False, "reply_text": "", "command": ""},
} }

View File

@@ -582,7 +582,8 @@ class ActionModifier:
reply_sequence = [] # 记录最近的动作序列 reply_sequence = [] # 记录最近的动作序列
for cycle in recent_cycles: for cycle in recent_cycles:
action_type = cycle.loop_plan_info["action_result"]["action_type"] action_result = cycle.loop_plan_info.get("action_result", {})
action_type = action_result.get("action_type", "unknown")
if action_type == "no_reply": if action_type == "no_reply":
no_reply_count += 1 no_reply_count += 1
reply_sequence.append(action_type == "reply") reply_sequence.append(action_type == "reply")

View File

@@ -42,8 +42,9 @@ class HFCloopObservation:
# 检查这最近的活动循环中有多少是连续的文本回复 (从最近的开始看) # 检查这最近的活动循环中有多少是连续的文本回复 (从最近的开始看)
for cycle in recent_active_cycles: for cycle in recent_active_cycles:
action_type = cycle.loop_plan_info["action_result"]["action_type"] action_result = cycle.loop_plan_info.get("action_result", {})
action_reasoning = cycle.loop_plan_info["action_result"]["reasoning"] action_type = action_result.get("action_type", "unknown")
action_reasoning = action_result.get("reasoning", "未提供理由")
is_taken = cycle.loop_action_info["action_taken"] is_taken = cycle.loop_action_info["action_taken"]
action_taken_time = cycle.loop_action_info["taken_time"] action_taken_time = cycle.loop_action_info["taken_time"]
action_taken_time_str = datetime.fromtimestamp(action_taken_time).strftime("%H:%M:%S") action_taken_time_str = datetime.fromtimestamp(action_taken_time).strftime("%H:%M:%S")

View File

@@ -3,6 +3,11 @@ import logging.handlers
from pathlib import Path from pathlib import Path
from typing import Callable, Optional from typing import Callable, Optional
import json import json
import gzip
import shutil
import threading
import time
from datetime import datetime, timedelta
import structlog import structlog
import toml import toml
@@ -19,11 +24,14 @@ def get_file_handler():
"""获取文件handler单例""" """获取文件handler单例"""
global _file_handler global _file_handler
if _file_handler is None: if _file_handler is None:
_file_handler = logging.handlers.RotatingFileHandler( # 使用带压缩功能的handler使用硬编码的默认值
_file_handler = CompressedRotatingFileHandler(
LOG_DIR / "app.log.jsonl", LOG_DIR / "app.log.jsonl",
maxBytes=10 * 1024 * 1024, # 10MB maxBytes=10 * 1024 * 1024, # 10MB
backupCount=5, backupCount=5,
encoding="utf-8", encoding="utf-8",
compress=True,
compress_level=6,
) )
# 设置文件handler的日志级别 # 设置文件handler的日志级别
file_level = LOG_CONFIG.get("file_log_level", LOG_CONFIG.get("log_level", "INFO")) file_level = LOG_CONFIG.get("file_log_level", LOG_CONFIG.get("log_level", "INFO"))
@@ -40,6 +48,90 @@ def get_console_handler():
_console_handler.setLevel(getattr(logging, console_level.upper(), logging.INFO)) _console_handler.setLevel(getattr(logging, console_level.upper(), logging.INFO))
return _console_handler return _console_handler
class CompressedRotatingFileHandler(logging.handlers.RotatingFileHandler):
"""支持压缩的轮转文件处理器"""
def __init__(self, filename, maxBytes=0, backupCount=0, encoding=None,
compress=True, compress_level=6):
super().__init__(filename, 'a', maxBytes, backupCount, encoding)
self.compress = compress
self.compress_level = compress_level
def doRollover(self):
"""执行日志轮转,并压缩旧文件"""
if self.stream:
self.stream.close()
self.stream = None
# 如果有备份文件数量限制
if self.backupCount > 0:
# 删除最旧的压缩文件
old_gz = f"{self.baseFilename}.{self.backupCount}.gz"
old_file = f"{self.baseFilename}.{self.backupCount}"
if Path(old_gz).exists():
Path(old_gz).unlink()
if Path(old_file).exists():
Path(old_file).unlink()
# 重命名现有的备份文件
for i in range(self.backupCount - 1, 0, -1):
source_gz = f"{self.baseFilename}.{i}.gz"
dest_gz = f"{self.baseFilename}.{i + 1}.gz"
source_file = f"{self.baseFilename}.{i}"
dest_file = f"{self.baseFilename}.{i + 1}"
if Path(source_gz).exists():
Path(source_gz).rename(dest_gz)
elif Path(source_file).exists():
Path(source_file).rename(dest_file)
# 处理当前日志文件
dest_file = f"{self.baseFilename}.1"
if Path(self.baseFilename).exists():
Path(self.baseFilename).rename(dest_file)
# 在后台线程中压缩文件
if self.compress:
threading.Thread(
target=self._compress_file,
args=(dest_file,),
daemon=True
).start()
# 重新创建日志文件
if not self.delay:
self.stream = self._open()
def _compress_file(self, filepath):
"""在后台压缩文件"""
try:
source_path = Path(filepath)
if not source_path.exists():
return
compressed_path = Path(f"{filepath}.gz")
with open(source_path, 'rb') as f_in:
with gzip.open(compressed_path, 'wb', compresslevel=self.compress_level) as f_out:
shutil.copyfileobj(f_in, f_out)
# 删除原文件
source_path.unlink()
# 记录压缩完成使用标准print避免循环日志
if source_path.exists():
original_size = source_path.stat().st_size
else:
original_size = 0
compressed_size = compressed_path.stat().st_size
ratio = (1 - compressed_size / original_size) * 100 if original_size > 0 else 0
print(f"[日志压缩] {source_path.name} -> {compressed_path.name} (压缩率: {ratio:.1f}%)")
except Exception as e:
print(f"[日志压缩] 压缩失败 {filepath}: {e}")
def close_handlers(): def close_handlers():
"""安全关闭所有handler""" """安全关闭所有handler"""
global _file_handler, _console_handler global _file_handler, _console_handler
@@ -750,11 +842,19 @@ def initialize_logging():
configure_third_party_loggers() configure_third_party_loggers()
reconfigure_existing_loggers() reconfigure_existing_loggers()
# 启动日志清理任务
start_log_cleanup_task()
# 输出初始化信息 # 输出初始化信息
logger = get_logger("logger") logger = get_logger("logger")
console_level = LOG_CONFIG.get("console_log_level", LOG_CONFIG.get("log_level", "INFO")) console_level = LOG_CONFIG.get("console_log_level", LOG_CONFIG.get("log_level", "INFO"))
file_level = LOG_CONFIG.get("file_log_level", LOG_CONFIG.get("log_level", "INFO")) file_level = LOG_CONFIG.get("file_log_level", LOG_CONFIG.get("log_level", "INFO"))
logger.info(f"日志系统已重新初始化,控制台级别: {console_level},文件级别: {file_level}所有logger已统一配置")
logger.info(f"日志系统已重新初始化:")
logger.info(f" - 控制台级别: {console_level}")
logger.info(f" - 文件级别: {file_level}")
logger.info(f" - 压缩功能: 启用")
logger.info(f" - 自动清理: 30天前的日志")
def force_initialize_logging(): def force_initialize_logging():
@@ -811,6 +911,91 @@ def format_json_for_logging(data, indent=2, ensure_ascii=False):
return json.dumps(data, indent=indent, ensure_ascii=ensure_ascii) return json.dumps(data, indent=indent, ensure_ascii=ensure_ascii)
def cleanup_old_logs():
"""清理过期的日志文件"""
try:
cleanup_days = 30 # 硬编码30天
cutoff_date = datetime.now() - timedelta(days=cleanup_days)
deleted_count = 0
deleted_size = 0
# 遍历日志目录
for log_file in LOG_DIR.glob("*.log*"):
try:
file_time = datetime.fromtimestamp(log_file.stat().st_mtime)
if file_time < cutoff_date:
file_size = log_file.stat().st_size
log_file.unlink()
deleted_count += 1
deleted_size += file_size
except Exception as e:
logger = get_logger("logger")
logger.warning(f"清理日志文件 {log_file} 时出错: {e}")
if deleted_count > 0:
logger = get_logger("logger")
logger.info(f"清理了 {deleted_count} 个过期日志文件,释放空间 {deleted_size / 1024 / 1024:.2f} MB")
except Exception as e:
logger = get_logger("logger")
logger.error(f"清理旧日志文件时出错: {e}")
def start_log_cleanup_task():
"""启动日志清理任务"""
def cleanup_task():
while True:
time.sleep(24 * 60 * 60) # 每24小时执行一次
cleanup_old_logs()
cleanup_thread = threading.Thread(target=cleanup_task, daemon=True)
cleanup_thread.start()
logger = get_logger("logger")
logger.info("已启动日志清理任务将自动清理30天前的日志文件")
def get_log_stats():
"""获取日志文件统计信息"""
stats = {
"total_files": 0,
"total_size": 0,
"compressed_files": 0,
"uncompressed_files": 0,
"files": []
}
try:
if not LOG_DIR.exists():
return stats
for log_file in LOG_DIR.glob("*.log*"):
file_info = {
"name": log_file.name,
"size": log_file.stat().st_size,
"modified": datetime.fromtimestamp(log_file.stat().st_mtime).strftime("%Y-%m-%d %H:%M:%S"),
"compressed": log_file.suffix == ".gz"
}
stats["files"].append(file_info)
stats["total_files"] += 1
stats["total_size"] += file_info["size"]
if file_info["compressed"]:
stats["compressed_files"] += 1
else:
stats["uncompressed_files"] += 1
# 按修改时间排序
stats["files"].sort(key=lambda x: x["modified"], reverse=True)
except Exception as e:
logger = get_logger("logger")
logger.error(f"获取日志统计信息时出错: {e}")
return stats
def shutdown_logging(): def shutdown_logging():
"""优雅关闭日志系统,释放所有文件句柄""" """优雅关闭日志系统,释放所有文件句柄"""
logger = get_logger("logger") logger = get_logger("logger")

View File

@@ -1,5 +1,5 @@
[inner] [inner]
version = "2.21.0" version = "2.22.0"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请在修改后将version的值进行变更 #如果你想要修改配置文件请在修改后将version的值进行变更