Merge pull request #627 from lmst2/refactor

添加有关时区的设置,可以在bot_config里设置时区,来改变机器人作息,以及一些llm logger的小tweak
This commit is contained in:
SengokuCola
2025-04-04 14:15:12 +08:00
committed by GitHub
6 changed files with 51 additions and 35 deletions

4
.gitignore vendored
View File

@@ -6,6 +6,10 @@ log/
logs/ logs/
/test /test
/src/test /src/test
nonebot-maibot-adapter/
*.zip
run.bat
run.py
message_queue_content.txt message_queue_content.txt
message_queue_content.bat message_queue_content.bat
message_queue_window.bat message_queue_window.bat

View File

@@ -1,6 +1,7 @@
import os import os
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import Dict, List, Optional from typing import Dict, List, Optional
from dateutil import tz
import tomli import tomli
import tomlkit import tomlkit
@@ -151,6 +152,7 @@ class BotConfig:
PROMPT_SCHEDULE_GEN = "无日程" PROMPT_SCHEDULE_GEN = "无日程"
SCHEDULE_DOING_UPDATE_INTERVAL: int = 300 # 日程表更新间隔 单位秒 SCHEDULE_DOING_UPDATE_INTERVAL: int = 300 # 日程表更新间隔 单位秒
SCHEDULE_TEMPERATURE: float = 0.5 # 日程表温度建议0.5-1.0 SCHEDULE_TEMPERATURE: float = 0.5 # 日程表温度建议0.5-1.0
TIME_ZONE: str = "Asia/Shanghai" # 时区
# message # message
MAX_CONTEXT_SIZE: int = 15 # 上下文最大消息数 MAX_CONTEXT_SIZE: int = 15 # 上下文最大消息数
@@ -355,6 +357,11 @@ class BotConfig:
) )
if config.INNER_VERSION in SpecifierSet(">=1.0.2"): if config.INNER_VERSION in SpecifierSet(">=1.0.2"):
config.SCHEDULE_TEMPERATURE = schedule_config.get("schedule_temperature", config.SCHEDULE_TEMPERATURE) config.SCHEDULE_TEMPERATURE = schedule_config.get("schedule_temperature", config.SCHEDULE_TEMPERATURE)
time_zone = schedule_config.get("time_zone", config.TIME_ZONE)
if tz.gettz(time_zone) is None:
logger.error(f"无效的时区: {time_zone},使用默认值: {config.TIME_ZONE}")
else:
config.TIME_ZONE = time_zone
def emoji(parent: dict): def emoji(parent: dict):
emoji_config = parent["emoji"] emoji_config = parent["emoji"]

View File

@@ -14,7 +14,6 @@ from src.common.logger import get_module_logger, LogConfig, MEMORY_STYLE_CONFIG
from src.plugins.memory_system.sample_distribution import MemoryBuildScheduler # 分布生成器 from src.plugins.memory_system.sample_distribution import MemoryBuildScheduler # 分布生成器
from .memory_config import MemoryConfig from .memory_config import MemoryConfig
def get_closest_chat_from_db(length: int, timestamp: str): def get_closest_chat_from_db(length: int, timestamp: str):
# print(f"获取最接近指定时间戳的聊天记录,长度: {length}, 时间戳: {timestamp}") # print(f"获取最接近指定时间戳的聊天记录,长度: {length}, 时间戳: {timestamp}")
# print(f"当前时间: {timestamp},转换后时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timestamp))}") # print(f"当前时间: {timestamp},转换后时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timestamp))}")

View File

@@ -153,7 +153,7 @@ class LLM_request:
# 合并重试策略 # 合并重试策略
default_retry = { default_retry = {
"max_retries": 3, "max_retries": 10,
"base_wait": 15, "base_wait": 15,
"retry_codes": [429, 413, 500, 503], "retry_codes": [429, 413, 500, 503],
"abort_codes": [400, 401, 402, 403], "abort_codes": [400, 401, 402, 403],
@@ -179,9 +179,6 @@ class LLM_request:
# logger.debug(f"{logger_msg}发送请求到URL: {api_url}") # logger.debug(f"{logger_msg}发送请求到URL: {api_url}")
# logger.info(f"使用模型: {self.model_name}") # logger.info(f"使用模型: {self.model_name}")
# 流式输出标志
if stream_mode:
payload["stream"] = stream_mode
# 构建请求体 # 构建请求体
if image_base64: if image_base64:
@@ -189,6 +186,11 @@ class LLM_request:
elif payload is None: elif payload is None:
payload = await self._build_payload(prompt) payload = await self._build_payload(prompt)
# 流式输出标志
# 先构建payload再添加流式输出标志
if stream_mode:
payload["stream"] = stream_mode
for retry in range(policy["max_retries"]): for retry in range(policy["max_retries"]):
try: try:
# 使用上下文管理器处理会话 # 使用上下文管理器处理会话
@@ -203,21 +205,21 @@ class LLM_request:
# 处理需要重试的状态码 # 处理需要重试的状态码
if response.status in policy["retry_codes"]: if response.status in policy["retry_codes"]:
wait_time = policy["base_wait"] * (2**retry) wait_time = policy["base_wait"] * (2**retry)
logger.warning(f"错误码: {response.status}, 等待 {wait_time}秒后重试") logger.warning(f"模型 {self.model_name} 错误码: {response.status}, 等待 {wait_time}秒后重试")
if response.status == 413: if response.status == 413:
logger.warning("请求体过大,尝试压缩...") logger.warning("请求体过大,尝试压缩...")
image_base64 = compress_base64_image_by_scale(image_base64) image_base64 = compress_base64_image_by_scale(image_base64)
payload = await self._build_payload(prompt, image_base64, image_format) payload = await self._build_payload(prompt, image_base64, image_format)
elif response.status in [500, 503]: elif response.status in [500, 503]:
logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}") logger.error(f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}")
raise RuntimeError("服务器负载过高模型恢复失败QAQ") raise RuntimeError("服务器负载过高模型恢复失败QAQ")
else: else:
logger.warning(f"请求限制(429),等待{wait_time}秒后重试...") logger.warning(f"模型 {self.model_name} 请求限制(429),等待{wait_time}秒后重试...")
await asyncio.sleep(wait_time) await asyncio.sleep(wait_time)
continue continue
elif response.status in policy["abort_codes"]: elif response.status in policy["abort_codes"]:
logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}") logger.error(f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}")
# 尝试获取并记录服务器返回的详细错误信息 # 尝试获取并记录服务器返回的详细错误信息
try: try:
error_json = await response.json() error_json = await response.json()
@@ -319,9 +321,9 @@ class LLM_request:
flag_delta_content_finished = True flag_delta_content_finished = True
except Exception as e: except Exception as e:
logger.exception(f"解析流式输出错误: {str(e)}") logger.exception(f"模型 {self.model_name} 解析流式输出错误: {str(e)}")
except GeneratorExit: except GeneratorExit:
logger.warning("流式输出被中断,正在清理资源...") logger.warning("模型 {self.model_name} 流式输出被中断,正在清理资源...")
# 确保资源被正确清理 # 确保资源被正确清理
await response.release() await response.release()
# 返回已经累积的内容 # 返回已经累积的内容
@@ -335,7 +337,7 @@ class LLM_request:
else self._default_response_handler(result, user_id, request_type, endpoint) else self._default_response_handler(result, user_id, request_type, endpoint)
) )
except Exception as e: except Exception as e:
logger.error(f"处理流式输出时发生错误: {str(e)}") logger.error(f"模型 {self.model_name} 处理流式输出时发生错误: {str(e)}")
# 确保在发生错误时也能正确清理资源 # 确保在发生错误时也能正确清理资源
try: try:
await response.release() await response.release()
@@ -378,21 +380,21 @@ class LLM_request:
except (aiohttp.ClientError, asyncio.TimeoutError) as e: except (aiohttp.ClientError, asyncio.TimeoutError) as e:
if retry < policy["max_retries"] - 1: if retry < policy["max_retries"] - 1:
wait_time = policy["base_wait"] * (2**retry) wait_time = policy["base_wait"] * (2**retry)
logger.error(f"网络错误,等待{wait_time}秒后重试... 错误: {str(e)}") logger.error(f"模型 {self.model_name} 网络错误,等待{wait_time}秒后重试... 错误: {str(e)}")
await asyncio.sleep(wait_time) await asyncio.sleep(wait_time)
continue continue
else: else:
logger.critical(f"网络错误达到最大重试次数: {str(e)}") logger.critical(f"模型 {self.model_name} 网络错误达到最大重试次数: {str(e)}")
raise RuntimeError(f"网络请求失败: {str(e)}") from e raise RuntimeError(f"网络请求失败: {str(e)}") from e
except Exception as e: except Exception as e:
logger.critical(f"未预期的错误: {str(e)}") logger.critical(f"模型 {self.model_name} 未预期的错误: {str(e)}")
raise RuntimeError(f"请求过程中发生错误: {str(e)}") from e raise RuntimeError(f"请求过程中发生错误: {str(e)}") from e
except aiohttp.ClientResponseError as e: except aiohttp.ClientResponseError as e:
# 处理aiohttp抛出的响应错误 # 处理aiohttp抛出的响应错误
if retry < policy["max_retries"] - 1: if retry < policy["max_retries"] - 1:
wait_time = policy["base_wait"] * (2**retry) wait_time = policy["base_wait"] * (2**retry)
logger.error(f"HTTP响应错误等待{wait_time}秒后重试... 状态码: {e.status}, 错误: {e.message}") logger.error(f"模型 {self.model_name} HTTP响应错误等待{wait_time}秒后重试... 状态码: {e.status}, 错误: {e.message}")
try: try:
if hasattr(e, "response") and e.response and hasattr(e.response, "text"): if hasattr(e, "response") and e.response and hasattr(e.response, "text"):
error_text = await e.response.text() error_text = await e.response.text()
@@ -403,27 +405,27 @@ class LLM_request:
if "error" in error_item and isinstance(error_item["error"], dict): if "error" in error_item and isinstance(error_item["error"], dict):
error_obj = error_item["error"] error_obj = error_item["error"]
logger.error( logger.error(
f"服务器错误详情: 代码={error_obj.get('code')}, " f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
f"状态={error_obj.get('status')}, " f"状态={error_obj.get('status')}, "
f"消息={error_obj.get('message')}" f"消息={error_obj.get('message')}"
) )
elif isinstance(error_json, dict) and "error" in error_json: elif isinstance(error_json, dict) and "error" in error_json:
error_obj = error_json.get("error", {}) error_obj = error_json.get("error", {})
logger.error( logger.error(
f"服务器错误详情: 代码={error_obj.get('code')}, " f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
f"状态={error_obj.get('status')}, " f"状态={error_obj.get('status')}, "
f"消息={error_obj.get('message')}" f"消息={error_obj.get('message')}"
) )
else: else:
logger.error(f"服务器错误响应: {error_json}") logger.error(f"模型 {self.model_name} 服务器错误响应: {error_json}")
except (json.JSONDecodeError, TypeError) as json_err: except (json.JSONDecodeError, TypeError) as json_err:
logger.warning(f"响应不是有效的JSON: {str(json_err)}, 原始内容: {error_text[:200]}") logger.warning(f"模型 {self.model_name} 响应不是有效的JSON: {str(json_err)}, 原始内容: {error_text[:200]}")
except (AttributeError, TypeError, ValueError) as parse_err: except (AttributeError, TypeError, ValueError) as parse_err:
logger.warning(f"无法解析响应错误内容: {str(parse_err)}") logger.warning(f"模型 {self.model_name} 无法解析响应错误内容: {str(parse_err)}")
await asyncio.sleep(wait_time) await asyncio.sleep(wait_time)
else: else:
logger.critical(f"HTTP响应错误达到最大重试次数: 状态码: {e.status}, 错误: {e.message}") logger.critical(f"模型 {self.model_name} HTTP响应错误达到最大重试次数: 状态码: {e.status}, 错误: {e.message}")
# 安全地检查和记录请求详情 # 安全地检查和记录请求详情
if ( if (
image_base64 image_base64
@@ -440,14 +442,14 @@ class LLM_request:
f"{image_base64[:10]}...{image_base64[-10:]}" f"{image_base64[:10]}...{image_base64[-10:]}"
) )
logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {payload}") logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {payload}")
raise RuntimeError(f"API请求失败: 状态码 {e.status}, {e.message}") from e raise RuntimeError(f"模型 {self.model_name} API请求失败: 状态码 {e.status}, {e.message}") from e
except Exception as e: except Exception as e:
if retry < policy["max_retries"] - 1: if retry < policy["max_retries"] - 1:
wait_time = policy["base_wait"] * (2**retry) wait_time = policy["base_wait"] * (2**retry)
logger.error(f"请求失败,等待{wait_time}秒后重试... 错误: {str(e)}") logger.error(f"模型 {self.model_name} 请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
await asyncio.sleep(wait_time) await asyncio.sleep(wait_time)
else: else:
logger.critical(f"请求失败: {str(e)}") logger.critical(f"模型 {self.model_name} 请求失败: {str(e)}")
# 安全地检查和记录请求详情 # 安全地检查和记录请求详情
if ( if (
image_base64 image_base64
@@ -464,10 +466,10 @@ class LLM_request:
f"{image_base64[:10]}...{image_base64[-10:]}" f"{image_base64[:10]}...{image_base64[-10:]}"
) )
logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {payload}") logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {payload}")
raise RuntimeError(f"API请求失败: {str(e)}") from e raise RuntimeError(f"模型 {self.model_name} API请求失败: {str(e)}") from e
logger.error("达到最大重试次数,请求仍然失败") logger.error(f"模型 {self.model_name} 达到最大重试次数,请求仍然失败,错误: {str(e)}")
raise RuntimeError("达到最大重试次数API请求仍然失败") raise RuntimeError(f"模型 {self.model_name} 达到最大重试次数API请求仍然失败错误: {str(e)}")
async def _transform_parameters(self, params: dict) -> dict: async def _transform_parameters(self, params: dict) -> dict:
""" """

View File

@@ -3,6 +3,7 @@ import os
import sys import sys
from typing import Dict from typing import Dict
import asyncio import asyncio
from dateutil import tz
# 添加项目根目录到 Python 路径 # 添加项目根目录到 Python 路径
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")) root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
@@ -13,6 +14,8 @@ from src.common.logger import get_module_logger, SCHEDULE_STYLE_CONFIG, LogConfi
from src.plugins.models.utils_model import LLM_request # noqa: E402 from src.plugins.models.utils_model import LLM_request # noqa: E402
from src.plugins.config.config import global_config # noqa: E402 from src.plugins.config.config import global_config # noqa: E402
TIME_ZONE = tz.gettz(global_config.TIME_ZONE) # 设置时区
schedule_config = LogConfig( schedule_config = LogConfig(
# 使用海马体专用样式 # 使用海马体专用样式
@@ -44,7 +47,7 @@ class ScheduleGenerator:
self.personality = "" self.personality = ""
self.behavior = "" self.behavior = ""
self.start_time = datetime.datetime.now() self.start_time = datetime.datetime.now(TIME_ZONE)
self.schedule_doing_update_interval = 300 # 最好大于60 self.schedule_doing_update_interval = 300 # 最好大于60
@@ -74,7 +77,7 @@ class ScheduleGenerator:
while True: while True:
# print(self.get_current_num_task(1, True)) # print(self.get_current_num_task(1, True))
current_time = datetime.datetime.now() current_time = datetime.datetime.now(TIME_ZONE)
# 检查是否需要重新生成日程(日期变化) # 检查是否需要重新生成日程(日期变化)
if current_time.date() != self.start_time.date(): if current_time.date() != self.start_time.date():
@@ -100,7 +103,7 @@ class ScheduleGenerator:
Returns: Returns:
tuple: (today_schedule_text, today_schedule) 今天的日程文本和解析后的日程字典 tuple: (today_schedule_text, today_schedule) 今天的日程文本和解析后的日程字典
""" """
today = datetime.datetime.now() today = datetime.datetime.now(TIME_ZONE)
yesterday = today - datetime.timedelta(days=1) yesterday = today - datetime.timedelta(days=1)
# 先检查昨天的日程 # 先检查昨天的日程
@@ -156,7 +159,7 @@ class ScheduleGenerator:
"""打印完整的日程安排""" """打印完整的日程安排"""
if not self.today_schedule_text: if not self.today_schedule_text:
logger.warning("今日日程有误,将在下次运行时重新生成") logger.warning("今日日程有误,将在下次运行时重新生成")
db.schedule.delete_one({"date": datetime.datetime.now().strftime("%Y-%m-%d")}) db.schedule.delete_one({"date": datetime.datetime.now(TIME_ZONE).strftime("%Y-%m-%d")})
else: else:
logger.info("=== 今日日程安排 ===") logger.info("=== 今日日程安排 ===")
logger.info(self.today_schedule_text) logger.info(self.today_schedule_text)
@@ -165,7 +168,7 @@ class ScheduleGenerator:
async def update_today_done_list(self): async def update_today_done_list(self):
# 更新数据库中的 today_done_list # 更新数据库中的 today_done_list
today_str = datetime.datetime.now().strftime("%Y-%m-%d") today_str = datetime.datetime.now(TIME_ZONE).strftime("%Y-%m-%d")
existing_schedule = db.schedule.find_one({"date": today_str}) existing_schedule = db.schedule.find_one({"date": today_str})
if existing_schedule: if existing_schedule:
@@ -177,7 +180,7 @@ class ScheduleGenerator:
async def move_doing(self, mind_thinking: str = ""): async def move_doing(self, mind_thinking: str = ""):
try: try:
current_time = datetime.datetime.now() current_time = datetime.datetime.now(TIME_ZONE)
if mind_thinking: if mind_thinking:
doing_prompt = self.construct_doing_prompt(current_time, mind_thinking) doing_prompt = self.construct_doing_prompt(current_time, mind_thinking)
else: else:
@@ -246,7 +249,7 @@ class ScheduleGenerator:
def save_today_schedule_to_db(self): def save_today_schedule_to_db(self):
"""保存日程到数据库,同时初始化 today_done_list""" """保存日程到数据库,同时初始化 today_done_list"""
date_str = datetime.datetime.now().strftime("%Y-%m-%d") date_str = datetime.datetime.now(TIME_ZONE).strftime("%Y-%m-%d")
schedule_data = { schedule_data = {
"date": date_str, "date": date_str,
"schedule": self.today_schedule_text, "schedule": self.today_schedule_text,

View File

@@ -48,6 +48,7 @@ enable_schedule_gen = true # 是否启用日程表(尚未完成)
prompt_schedule_gen = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表" prompt_schedule_gen = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表"
schedule_doing_update_interval = 900 # 日程表更新间隔 单位秒 schedule_doing_update_interval = 900 # 日程表更新间隔 单位秒
schedule_temperature = 0.3 # 日程表温度建议0.3-0.6 schedule_temperature = 0.3 # 日程表温度建议0.3-0.6
time_zone = "Asia/Shanghai" # 给你的机器人设置时区,可以解决运行电脑时区和国内时区不同的情况,或者模拟国外留学生日程
[platforms] # 必填项目,填写每个平台适配器提供的链接 [platforms] # 必填项目,填写每个平台适配器提供的链接
nonebot-qq="http://127.0.0.1:18002/api/message" nonebot-qq="http://127.0.0.1:18002/api/message"