This commit is contained in:
SengokuCola
2025-06-05 16:15:43 +08:00
7 changed files with 30 additions and 17 deletions

View File

@@ -23,12 +23,14 @@ jobs:
with:
fetch-depth: 0
ref: ${{ github.head_ref || github.ref_name }}
- name: Install the latest version of ruff
- name: Install Ruff and Run Checks
uses: astral-sh/ruff-action@v3
with:
version: "latest"
- run: ruff check --fix
- run: ruff format
- name: Run Ruff Fix
run: ruff check --fix
- name: Run Ruff Format
run: ruff format
- name: Commit changes
if: success()
run: |
@@ -36,4 +38,4 @@ jobs:
git config --local user.name "github-actions[bot]"
git add -A
git diff --quiet && git diff --staged --quiet || git commit -m "🤖 自动格式化代码 [skip ci]"
git push
git push

1
MaiMBot-LPMM Submodule

Submodule MaiMBot-LPMM added at d5824d2f48

View File

@@ -23,8 +23,8 @@ services:
# image: infinitycat/maibot:dev
environment:
- TZ=Asia/Shanghai
# - EULA_AGREE=35362b6ea30f12891d46ef545122e84a # 同意EULA
# - PRIVACY_AGREE=2402af06e133d2d10d9c6c643fdc9333 # 同意EULA
# - EULA_AGREE=bda99dca873f5d8044e9987eac417e01 # 同意EULA
# - PRIVACY_AGREE=42dddb3cbe2b784b45a2781407b298a1 # 同意EULA
# ports:
# - "8000:8000"
volumes:

View File

@@ -1076,7 +1076,7 @@ def get_module_logger(
# 文件处理器
log_dir = Path(current_config["log_dir"])
log_dir.mkdir(parents=True, exist_ok=True)
log_file = log_dir / module_name / "{time:YYYY-MM-DD}.log"
log_file = log_dir / "{time:YYYY-MM-DD}.log"
log_file.parent.mkdir(parents=True, exist_ok=True)
file_id = logger.add(

View File

@@ -0,0 +1,9 @@
import ssl
import certifi
import aiohttp
ssl_context = ssl.create_default_context(cafile=certifi.where())
async def get_tcp_connector():
return aiohttp.TCPConnector(ssl=ssl_context)

View File

@@ -6,6 +6,7 @@ from typing import Tuple, Union
import aiohttp
import requests
from src.common.logger import get_module_logger
from src.common.tcp_connector import get_tcp_connector
from rich.traceback import install
install(extra_lines=3)
@@ -94,7 +95,7 @@ class LLMRequestOff:
max_retries = 3
base_wait_time = 15
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(connector=await get_tcp_connector()) as session:
for retry in range(max_retries):
try:
async with session.post(api_url, headers=headers, json=data) as response:

View File

@@ -13,6 +13,7 @@ import os
from src.common.database.database import db # 确保 db 被导入用于 create_tables
from src.common.database.database_model import LLMUsage # 导入 LLMUsage 模型
from src.config.config import global_config
from src.common.tcp_connector import get_tcp_connector
from rich.traceback import install
install(extra_lines=3)
@@ -244,7 +245,7 @@ class LLMRequest:
if stream_mode:
payload["stream"] = stream_mode
if self.temp != 0.7:
payload["temperature"] = self.temp
@@ -257,13 +258,12 @@ class LLMRequest:
if self.max_tokens:
payload["max_tokens"] = self.max_tokens
# if "max_tokens" not in payload and "max_completion_tokens" not in payload:
# payload["max_tokens"] = global_config.model.model_max_output_length
# payload["max_tokens"] = global_config.model.model_max_output_length
# 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload:
payload["max_completion_tokens"] = payload.pop("max_tokens")
payload["max_completion_tokens"] = payload.pop("max_tokens")
return {
"policy": policy,
@@ -312,7 +312,7 @@ class LLMRequest:
# 似乎是openai流式必须要的东西,不过阿里云的qwq-plus加了这个没有影响
if request_content["stream_mode"]:
headers["Accept"] = "text/event-stream"
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(connector=await get_tcp_connector()) as session:
async with session.post(
request_content["api_url"], headers=headers, json=request_content["payload"]
) as response:
@@ -653,7 +653,7 @@ class LLMRequest:
]
else:
messages = [{"role": "user", "content": prompt}]
payload = {
"model": self.model_name,
"messages": messages,
@@ -673,9 +673,9 @@ class LLMRequest:
if self.max_tokens:
payload["max_tokens"] = self.max_tokens
# if "max_tokens" not in payload and "max_completion_tokens" not in payload:
# payload["max_tokens"] = global_config.model.model_max_output_length
# payload["max_tokens"] = global_config.model.model_max_output_length
# 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload:
payload["max_completion_tokens"] = payload.pop("max_tokens")