From 9c23a1b183b940a8ecfa03c2b95fb105fffa964b Mon Sep 17 00:00:00 2001 From: NepPure Date: Tue, 4 Mar 2025 11:03:57 +0800 Subject: [PATCH 1/2] =?UTF-8?q?ci:=20=E5=A2=9E=E5=8A=A0=E6=9E=84=E5=BB=BA?= =?UTF-8?q?=E7=BC=93=E5=AD=98=20(#43)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/docker-image.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 74e6a8cb4..669fb8a1e 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -34,3 +34,5 @@ jobs: ${{ secrets.DOCKERHUB_USERNAME }}/maimbot:${{ github.ref_name }} ${{ secrets.DOCKERHUB_USERNAME }}/maimbot:latest push: true + cache-from: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/maimbot:latest + cache-to: type=inline From f4c383caf36330887add4289a2d8270217335268 Mon Sep 17 00:00:00 2001 From: NepPure Date: Tue, 4 Mar 2025 15:27:37 +0800 Subject: [PATCH 2/2] Replace print with Loguru logger for improved logging (#47) * utils_model * llm_module_memory_make.py * from e --- .../memory_system/llm_module_memory_make.py | 15 +++--- src/plugins/models/utils_model.py | 50 ++++++++++++------- 2 files changed, 41 insertions(+), 24 deletions(-) diff --git a/src/plugins/memory_system/llm_module_memory_make.py b/src/plugins/memory_system/llm_module_memory_make.py index 89fe45cf0..41a5d7c0f 100644 --- a/src/plugins/memory_system/llm_module_memory_make.py +++ b/src/plugins/memory_system/llm_module_memory_make.py @@ -5,13 +5,13 @@ import time from nonebot import get_driver import aiohttp import asyncio +from loguru import logger from src.plugins.chat.config import BotConfig, global_config driver = get_driver() config = driver.config class LLMModel: - # def __init__(self, model_name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", **kwargs): def __init__(self, model_name=global_config.SILICONFLOW_MODEL_V3, **kwargs): self.model_name = model_name self.params = kwargs @@ -21,7 +21,7 @@ class LLMModel: if not self.api_key or not self.base_url: raise ValueError("环境变量未正确加载:SILICONFLOW_KEY 或 SILICONFLOW_BASE_URL 未设置") - print(f"API URL: {self.base_url}") # 打印 base_url 用于调试 + logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url async def generate_response(self, prompt: str) -> Tuple[str, str]: """根据输入的提示生成模型的响应""" @@ -38,8 +38,9 @@ class LLMModel: **self.params } - # 发送请求到完整的chat/completions端点 + # 发送请求到完整的 chat/completions 端点 api_url = f"{self.base_url.rstrip('/')}/chat/completions" + logger.info(f"Request URL: {api_url}") # 记录请求的 URL max_retries = 3 base_wait_time = 15 # 基础等待时间(秒) @@ -50,7 +51,7 @@ class LLMModel: async with session.post(api_url, headers=headers, json=data) as response: if response.status == 429: wait_time = base_wait_time * (2 ** retry) # 指数退避 - print(f"遇到请求限制(429),等待{wait_time}秒后重试...") + logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...") await asyncio.sleep(wait_time) continue @@ -66,9 +67,11 @@ class LLMModel: except Exception as e: if retry < max_retries - 1: # 如果还有重试机会 wait_time = base_wait_time * (2 ** retry) - print(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}") + logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}") await asyncio.sleep(wait_time) else: + logger.error(f"请求失败: {str(e)}") return f"请求失败: {str(e)}", "" - return "达到最大重试次数,请求仍然失败", "" \ No newline at end of file + logger.error("达到最大重试次数,请求仍然失败") + return "达到最大重试次数,请求仍然失败", "" diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py index 54be3be34..3021b11d7 100644 --- a/src/plugins/models/utils_model.py +++ b/src/plugins/models/utils_model.py @@ -4,18 +4,21 @@ import requests import time from typing import Tuple, Union from nonebot import get_driver +from loguru import logger from ..chat.config import global_config + driver = get_driver() config = driver.config class LLM_request: - def __init__(self, model ,**kwargs): + def __init__(self, model, **kwargs): # 将大写的配置键转换为小写并从config中获取实际值 try: self.api_key = getattr(config, model["key"]) self.base_url = getattr(config, model["base_url"]) except AttributeError as e: - raise ValueError(f"配置错误:找不到对应的配置项 - {str(e)}") + logger.error(f"配置错误:找不到对应的配置项 - {str(e)}") + raise ValueError(f"配置错误:找不到对应的配置项 - {str(e)}") from e self.model_name = model["name"] self.params = kwargs @@ -35,6 +38,7 @@ class LLM_request: # 发送请求到完整的chat/completions端点 api_url = f"{self.base_url.rstrip('/')}/chat/completions" + logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL max_retries = 3 base_wait_time = 15 @@ -45,7 +49,7 @@ class LLM_request: async with session.post(api_url, headers=headers, json=data) as response: if response.status == 429: wait_time = base_wait_time * (2 ** retry) # 指数退避 - print(f"遇到请求限制(429),等待{wait_time}秒后重试...") + logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...") await asyncio.sleep(wait_time) continue @@ -61,11 +65,13 @@ class LLM_request: except Exception as e: if retry < max_retries - 1: # 如果还有重试机会 wait_time = base_wait_time * (2 ** retry) - print(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}") + logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}", exc_info=True) await asyncio.sleep(wait_time) else: + logger.critical(f"请求失败: {str(e)}", exc_info=True) return f"请求失败: {str(e)}", "" + logger.error("达到最大重试次数,请求仍然失败") return "达到最大重试次数,请求仍然失败", "" async def generate_response_for_image(self, prompt: str, image_base64: str) -> Tuple[str, str]: @@ -100,6 +106,7 @@ class LLM_request: # 发送请求到完整的chat/completions端点 api_url = f"{self.base_url.rstrip('/')}/chat/completions" + logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL max_retries = 3 base_wait_time = 15 @@ -110,7 +117,7 @@ class LLM_request: async with session.post(api_url, headers=headers, json=data) as response: if response.status == 429: wait_time = base_wait_time * (2 ** retry) # 指数退避 - print(f"遇到请求限制(429),等待{wait_time}秒后重试...") + logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...") await asyncio.sleep(wait_time) continue @@ -126,11 +133,13 @@ class LLM_request: except Exception as e: if retry < max_retries - 1: # 如果还有重试机会 wait_time = base_wait_time * (2 ** retry) - print(f"[image回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}") + logger.error(f"[image回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}", exc_info=True) await asyncio.sleep(wait_time) else: + logger.critical(f"请求失败: {str(e)}", exc_info=True) return f"请求失败: {str(e)}", "" + logger.error("达到最大重试次数,请求仍然失败") return "达到最大重试次数,请求仍然失败", "" def generate_response_for_image_sync(self, prompt: str, image_base64: str) -> Tuple[str, str]: @@ -165,6 +174,7 @@ class LLM_request: # 发送请求到完整的chat/completions端点 api_url = f"{self.base_url.rstrip('/')}/chat/completions" + logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL max_retries = 2 base_wait_time = 6 @@ -174,8 +184,8 @@ class LLM_request: response = requests.post(api_url, headers=headers, json=data, timeout=30) if response.status_code == 429: - wait_time = base_wait_time * (2 ** retry) # 指数退避 - print(f"遇到请求限制(429),等待{wait_time}秒后重试...") + wait_time = base_wait_time * (2 ** retry) + logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...") time.sleep(wait_time) continue @@ -191,11 +201,13 @@ class LLM_request: except Exception as e: if retry < max_retries - 1: # 如果还有重试机会 wait_time = base_wait_time * (2 ** retry) - print(f"[image_sync回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}") + logger.error(f"[image_sync回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}", exc_info=True) time.sleep(wait_time) else: + logger.critical(f"请求失败: {str(e)}", exc_info=True) return f"请求失败: {str(e)}", "" + logger.error("达到最大重试次数,请求仍然失败") return "达到最大重试次数,请求仍然失败", "" def get_embedding_sync(self, text: str, model: str = "BAAI/bge-m3") -> Union[list, None]: @@ -220,6 +232,7 @@ class LLM_request: } api_url = f"{self.base_url.rstrip('/')}/embeddings" + logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL max_retries = 2 base_wait_time = 6 @@ -230,7 +243,7 @@ class LLM_request: if response.status_code == 429: wait_time = base_wait_time * (2 ** retry) - print(f"遇到请求限制(429),等待{wait_time}秒后重试...") + logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...") time.sleep(wait_time) continue @@ -244,13 +257,13 @@ class LLM_request: except Exception as e: if retry < max_retries - 1: wait_time = base_wait_time * (2 ** retry) - print(f"[embedding_sync]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}") + logger.error(f"[embedding_sync]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}", exc_info=True) time.sleep(wait_time) else: - print(f"embedding请求失败: {str(e)}") + logger.critical(f"embedding请求失败: {str(e)}", exc_info=True) return None - print("达到最大重试次数,embedding请求仍然失败") + logger.error("达到最大重试次数,embedding请求仍然失败") return None async def get_embedding(self, text: str, model: str = "BAAI/bge-m3") -> Union[list, None]: @@ -275,6 +288,7 @@ class LLM_request: } api_url = f"{self.base_url.rstrip('/')}/embeddings" + logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL max_retries = 3 base_wait_time = 15 @@ -285,7 +299,7 @@ class LLM_request: async with session.post(api_url, headers=headers, json=data) as response: if response.status == 429: wait_time = base_wait_time * (2 ** retry) - print(f"遇到请求限制(429),等待{wait_time}秒后重试...") + logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...") await asyncio.sleep(wait_time) continue @@ -299,11 +313,11 @@ class LLM_request: except Exception as e: if retry < max_retries - 1: wait_time = base_wait_time * (2 ** retry) - print(f"[embedding]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}") + logger.error(f"[embedding]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}", exc_info=True) await asyncio.sleep(wait_time) else: - print(f"embedding请求失败: {str(e)}") + logger.critical(f"embedding请求失败: {str(e)}", exc_info=True) return None - print("达到最大重试次数,embedding请求仍然失败") - return None + logger.error("达到最大重试次数,embedding请求仍然失败") + return None \ No newline at end of file