fix 移除无用代码
This commit is contained in:
@@ -1,106 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
from typing import Dict
|
|
||||||
from ..chat.chat_stream import ChatStream
|
|
||||||
|
|
||||||
|
|
||||||
class WillingManager:
|
|
||||||
def __init__(self):
|
|
||||||
self.chat_reply_willing: Dict[str, float] = {} # 存储每个聊天流的回复意愿
|
|
||||||
self._decay_task = None
|
|
||||||
self._started = False
|
|
||||||
|
|
||||||
async def _decay_reply_willing(self):
|
|
||||||
"""定期衰减回复意愿"""
|
|
||||||
while True:
|
|
||||||
await asyncio.sleep(3)
|
|
||||||
for chat_id in self.chat_reply_willing:
|
|
||||||
# 每分钟衰减10%的回复意愿
|
|
||||||
self.chat_reply_willing[chat_id] = max(0, self.chat_reply_willing[chat_id] * 0.6)
|
|
||||||
|
|
||||||
def get_willing(self, chat_stream: ChatStream) -> float:
|
|
||||||
"""获取指定聊天流的回复意愿"""
|
|
||||||
if chat_stream:
|
|
||||||
return self.chat_reply_willing.get(chat_stream.stream_id, 0)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def set_willing(self, chat_id: str, willing: float):
|
|
||||||
"""设置指定聊天流的回复意愿"""
|
|
||||||
self.chat_reply_willing[chat_id] = willing
|
|
||||||
|
|
||||||
async def change_reply_willing_received(
|
|
||||||
self,
|
|
||||||
chat_stream: ChatStream,
|
|
||||||
topic: str = None,
|
|
||||||
is_mentioned_bot: bool = False,
|
|
||||||
config=None,
|
|
||||||
is_emoji: bool = False,
|
|
||||||
interested_rate: float = 0,
|
|
||||||
sender_id: str = None,
|
|
||||||
) -> float:
|
|
||||||
"""改变指定聊天流的回复意愿并返回回复概率"""
|
|
||||||
chat_id = chat_stream.stream_id
|
|
||||||
current_willing = self.chat_reply_willing.get(chat_id, 0)
|
|
||||||
|
|
||||||
if topic and current_willing < 1:
|
|
||||||
current_willing += 0.2
|
|
||||||
elif topic:
|
|
||||||
current_willing += 0.05
|
|
||||||
|
|
||||||
if is_mentioned_bot and current_willing < 1.0:
|
|
||||||
current_willing += 0.9
|
|
||||||
elif is_mentioned_bot:
|
|
||||||
current_willing += 0.05
|
|
||||||
|
|
||||||
if is_emoji:
|
|
||||||
current_willing *= 0.2
|
|
||||||
|
|
||||||
self.chat_reply_willing[chat_id] = min(current_willing, 3.0)
|
|
||||||
|
|
||||||
reply_probability = (current_willing - 0.5) * 2
|
|
||||||
|
|
||||||
# 检查群组权限(如果是群聊)
|
|
||||||
if chat_stream.group_info and config:
|
|
||||||
if chat_stream.group_info.group_id not in config.talk_allowed_groups:
|
|
||||||
current_willing = 0
|
|
||||||
reply_probability = 0
|
|
||||||
|
|
||||||
if chat_stream.group_info.group_id in config.talk_frequency_down_groups:
|
|
||||||
reply_probability = reply_probability / config.down_frequency_rate
|
|
||||||
|
|
||||||
if is_mentioned_bot and sender_id == "1026294844":
|
|
||||||
reply_probability = 1
|
|
||||||
|
|
||||||
return reply_probability
|
|
||||||
|
|
||||||
def change_reply_willing_sent(self, chat_stream: ChatStream):
|
|
||||||
"""发送消息后降低聊天流的回复意愿"""
|
|
||||||
if chat_stream:
|
|
||||||
chat_id = chat_stream.stream_id
|
|
||||||
current_willing = self.chat_reply_willing.get(chat_id, 0)
|
|
||||||
self.chat_reply_willing[chat_id] = max(0, current_willing - 1.8)
|
|
||||||
|
|
||||||
def change_reply_willing_not_sent(self, chat_stream: ChatStream):
|
|
||||||
"""未发送消息后降低聊天流的回复意愿"""
|
|
||||||
if chat_stream:
|
|
||||||
chat_id = chat_stream.stream_id
|
|
||||||
current_willing = self.chat_reply_willing.get(chat_id, 0)
|
|
||||||
self.chat_reply_willing[chat_id] = max(0, current_willing - 0)
|
|
||||||
|
|
||||||
def change_reply_willing_after_sent(self, chat_stream: ChatStream):
|
|
||||||
"""发送消息后提高聊天流的回复意愿"""
|
|
||||||
if chat_stream:
|
|
||||||
chat_id = chat_stream.stream_id
|
|
||||||
current_willing = self.chat_reply_willing.get(chat_id, 0)
|
|
||||||
if current_willing < 1:
|
|
||||||
self.chat_reply_willing[chat_id] = min(1, current_willing + 0.4)
|
|
||||||
|
|
||||||
async def ensure_started(self):
|
|
||||||
"""确保衰减任务已启动"""
|
|
||||||
if not self._started:
|
|
||||||
if self._decay_task is None:
|
|
||||||
self._decay_task = asyncio.create_task(self._decay_reply_willing())
|
|
||||||
self._started = True
|
|
||||||
|
|
||||||
|
|
||||||
# 创建全局实例
|
|
||||||
willing_manager = WillingManager()
|
|
||||||
|
|||||||
@@ -1,123 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
from typing import Tuple, Union
|
|
||||||
|
|
||||||
import aiohttp
|
|
||||||
import requests
|
|
||||||
from src.common.logger import get_module_logger
|
|
||||||
|
|
||||||
logger = get_module_logger("offline_llm")
|
|
||||||
|
|
||||||
|
|
||||||
class LLMModel:
|
|
||||||
def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-V3", **kwargs):
|
|
||||||
self.model_name = model_name
|
|
||||||
self.params = kwargs
|
|
||||||
self.api_key = os.getenv("SILICONFLOW_KEY")
|
|
||||||
self.base_url = os.getenv("SILICONFLOW_BASE_URL")
|
|
||||||
|
|
||||||
if not self.api_key or not self.base_url:
|
|
||||||
raise ValueError("环境变量未正确加载:SILICONFLOW_KEY 或 SILICONFLOW_BASE_URL 未设置")
|
|
||||||
|
|
||||||
logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url
|
|
||||||
|
|
||||||
def generate_response(self, prompt: str) -> Union[str, Tuple[str, str]]:
|
|
||||||
"""根据输入的提示生成模型的响应"""
|
|
||||||
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
|
|
||||||
|
|
||||||
# 构建请求体
|
|
||||||
data = {
|
|
||||||
"model": self.model_name,
|
|
||||||
"messages": [{"role": "user", "content": prompt}],
|
|
||||||
"temperature": 0.5,
|
|
||||||
**self.params,
|
|
||||||
}
|
|
||||||
|
|
||||||
# 发送请求到完整的 chat/completions 端点
|
|
||||||
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
|
|
||||||
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
|
|
||||||
|
|
||||||
max_retries = 3
|
|
||||||
base_wait_time = 15 # 基础等待时间(秒)
|
|
||||||
|
|
||||||
for retry in range(max_retries):
|
|
||||||
try:
|
|
||||||
response = requests.post(api_url, headers=headers, json=data)
|
|
||||||
|
|
||||||
if response.status_code == 429:
|
|
||||||
wait_time = base_wait_time * (2**retry) # 指数退避
|
|
||||||
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
|
||||||
time.sleep(wait_time)
|
|
||||||
continue
|
|
||||||
|
|
||||||
response.raise_for_status() # 检查其他响应状态
|
|
||||||
|
|
||||||
result = response.json()
|
|
||||||
if "choices" in result and len(result["choices"]) > 0:
|
|
||||||
content = result["choices"][0]["message"]["content"]
|
|
||||||
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
|
|
||||||
return content, reasoning_content
|
|
||||||
return "没有返回结果", ""
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
if retry < max_retries - 1: # 如果还有重试机会
|
|
||||||
wait_time = base_wait_time * (2**retry)
|
|
||||||
logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
|
|
||||||
time.sleep(wait_time)
|
|
||||||
else:
|
|
||||||
logger.error(f"请求失败: {str(e)}")
|
|
||||||
return f"请求失败: {str(e)}", ""
|
|
||||||
|
|
||||||
logger.error("达到最大重试次数,请求仍然失败")
|
|
||||||
return "达到最大重试次数,请求仍然失败", ""
|
|
||||||
|
|
||||||
async def generate_response_async(self, prompt: str) -> Union[str, Tuple[str, str]]:
|
|
||||||
"""异步方式根据输入的提示生成模型的响应"""
|
|
||||||
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
|
|
||||||
|
|
||||||
# 构建请求体
|
|
||||||
data = {
|
|
||||||
"model": self.model_name,
|
|
||||||
"messages": [{"role": "user", "content": prompt}],
|
|
||||||
"temperature": 0.5,
|
|
||||||
**self.params,
|
|
||||||
}
|
|
||||||
|
|
||||||
# 发送请求到完整的 chat/completions 端点
|
|
||||||
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
|
|
||||||
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
|
|
||||||
|
|
||||||
max_retries = 3
|
|
||||||
base_wait_time = 15
|
|
||||||
|
|
||||||
async with aiohttp.ClientSession() as session:
|
|
||||||
for retry in range(max_retries):
|
|
||||||
try:
|
|
||||||
async with session.post(api_url, headers=headers, json=data) as response:
|
|
||||||
if response.status == 429:
|
|
||||||
wait_time = base_wait_time * (2**retry) # 指数退避
|
|
||||||
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
|
||||||
await asyncio.sleep(wait_time)
|
|
||||||
continue
|
|
||||||
|
|
||||||
response.raise_for_status() # 检查其他响应状态
|
|
||||||
|
|
||||||
result = await response.json()
|
|
||||||
if "choices" in result and len(result["choices"]) > 0:
|
|
||||||
content = result["choices"][0]["message"]["content"]
|
|
||||||
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
|
|
||||||
return content, reasoning_content
|
|
||||||
return "没有返回结果", ""
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
if retry < max_retries - 1: # 如果还有重试机会
|
|
||||||
wait_time = base_wait_time * (2**retry)
|
|
||||||
logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
|
|
||||||
await asyncio.sleep(wait_time)
|
|
||||||
else:
|
|
||||||
logger.error(f"请求失败: {str(e)}")
|
|
||||||
return f"请求失败: {str(e)}", ""
|
|
||||||
|
|
||||||
logger.error("达到最大重试次数,请求仍然失败")
|
|
||||||
return "达到最大重试次数,请求仍然失败", ""
|
|
||||||
Reference in New Issue
Block a user