diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index ebc7027e1..be15d53e8 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -23,12 +23,14 @@ jobs: with: fetch-depth: 0 ref: ${{ github.head_ref || github.ref_name }} - - name: Install the latest version of ruff + - name: Install Ruff and Run Checks uses: astral-sh/ruff-action@v3 with: version: "latest" - - run: ruff check --fix - - run: ruff format + - name: Run Ruff Fix + run: ruff check --fix + - name: Run Ruff Format + run: ruff format - name: Commit changes if: success() run: | @@ -36,4 +38,4 @@ jobs: git config --local user.name "github-actions[bot]" git add -A git diff --quiet && git diff --staged --quiet || git commit -m "🤖 自动格式化代码 [skip ci]" - git push \ No newline at end of file + git push diff --git a/MaiMBot-LPMM b/MaiMBot-LPMM new file mode 160000 index 000000000..d5824d2f4 --- /dev/null +++ b/MaiMBot-LPMM @@ -0,0 +1 @@ +Subproject commit d5824d2f48c9415cf619d2b32608c2db6a1bbc39 diff --git a/docker-compose.yml b/docker-compose.yml index 2392f707f..67750d4c1 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -23,8 +23,8 @@ services: # image: infinitycat/maibot:dev environment: - TZ=Asia/Shanghai -# - EULA_AGREE=35362b6ea30f12891d46ef545122e84a # 同意EULA -# - PRIVACY_AGREE=2402af06e133d2d10d9c6c643fdc9333 # 同意EULA +# - EULA_AGREE=bda99dca873f5d8044e9987eac417e01 # 同意EULA +# - PRIVACY_AGREE=42dddb3cbe2b784b45a2781407b298a1 # 同意EULA # ports: # - "8000:8000" volumes: diff --git a/src/common/logger.py b/src/common/logger.py index 616b44871..614ccdb1d 100644 --- a/src/common/logger.py +++ b/src/common/logger.py @@ -1076,7 +1076,7 @@ def get_module_logger( # 文件处理器 log_dir = Path(current_config["log_dir"]) log_dir.mkdir(parents=True, exist_ok=True) - log_file = log_dir / module_name / "{time:YYYY-MM-DD}.log" + log_file = log_dir / "{time:YYYY-MM-DD}.log" log_file.parent.mkdir(parents=True, exist_ok=True) file_id = logger.add( diff --git a/src/common/tcp_connector.py b/src/common/tcp_connector.py new file mode 100644 index 000000000..dd966e648 --- /dev/null +++ b/src/common/tcp_connector.py @@ -0,0 +1,9 @@ +import ssl +import certifi +import aiohttp + +ssl_context = ssl.create_default_context(cafile=certifi.where()) + + +async def get_tcp_connector(): + return aiohttp.TCPConnector(ssl=ssl_context) diff --git a/src/individuality/not_using/offline_llm.py b/src/individuality/not_using/offline_llm.py index cc9560011..40ec0889d 100644 --- a/src/individuality/not_using/offline_llm.py +++ b/src/individuality/not_using/offline_llm.py @@ -6,6 +6,7 @@ from typing import Tuple, Union import aiohttp import requests from src.common.logger import get_module_logger +from src.common.tcp_connector import get_tcp_connector from rich.traceback import install install(extra_lines=3) @@ -94,7 +95,7 @@ class LLMRequestOff: max_retries = 3 base_wait_time = 15 - async with aiohttp.ClientSession() as session: + async with aiohttp.ClientSession(connector=await get_tcp_connector()) as session: for retry in range(max_retries): try: async with session.post(api_url, headers=headers, json=data) as response: diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py index 24cc9731a..4022f9367 100644 --- a/src/llm_models/utils_model.py +++ b/src/llm_models/utils_model.py @@ -13,6 +13,7 @@ import os from src.common.database.database import db # 确保 db 被导入用于 create_tables from src.common.database.database_model import LLMUsage # 导入 LLMUsage 模型 from src.config.config import global_config +from src.common.tcp_connector import get_tcp_connector from rich.traceback import install install(extra_lines=3) @@ -244,7 +245,7 @@ class LLMRequest: if stream_mode: payload["stream"] = stream_mode - + if self.temp != 0.7: payload["temperature"] = self.temp @@ -257,13 +258,12 @@ class LLMRequest: if self.max_tokens: payload["max_tokens"] = self.max_tokens - + # if "max_tokens" not in payload and "max_completion_tokens" not in payload: - # payload["max_tokens"] = global_config.model.model_max_output_length + # payload["max_tokens"] = global_config.model.model_max_output_length # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查 if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload: - payload["max_completion_tokens"] = payload.pop("max_tokens") - + payload["max_completion_tokens"] = payload.pop("max_tokens") return { "policy": policy, @@ -312,7 +312,7 @@ class LLMRequest: # 似乎是openai流式必须要的东西,不过阿里云的qwq-plus加了这个没有影响 if request_content["stream_mode"]: headers["Accept"] = "text/event-stream" - async with aiohttp.ClientSession() as session: + async with aiohttp.ClientSession(connector=await get_tcp_connector()) as session: async with session.post( request_content["api_url"], headers=headers, json=request_content["payload"] ) as response: @@ -653,7 +653,7 @@ class LLMRequest: ] else: messages = [{"role": "user", "content": prompt}] - + payload = { "model": self.model_name, "messages": messages, @@ -673,9 +673,9 @@ class LLMRequest: if self.max_tokens: payload["max_tokens"] = self.max_tokens - + # if "max_tokens" not in payload and "max_completion_tokens" not in payload: - # payload["max_tokens"] = global_config.model.model_max_output_length + # payload["max_tokens"] = global_config.model.model_max_output_length # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查 if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload: payload["max_completion_tokens"] = payload.pop("max_tokens")