Merge branch 'master' of https://github.com/MaiBot-Plus/MaiMbot-Pro-Max
This commit is contained in:
20
__main__.py
Normal file
20
__main__.py
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Bot项目的主入口点"""
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# 设置Python路径并执行bot.py
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# 添加当前目录到Python路径
|
||||||
|
current_dir = Path(__file__).parent
|
||||||
|
sys.path.insert(0, str(current_dir))
|
||||||
|
|
||||||
|
# 执行bot.py的代码
|
||||||
|
bot_file = current_dir / "bot.py"
|
||||||
|
with open(bot_file, 'r', encoding='utf-8') as f:
|
||||||
|
exec(f.read())
|
||||||
|
|
||||||
|
|
||||||
|
# 这个文件是为了适配一键包使用的,在一键包项目之外没有用
|
||||||
@@ -26,6 +26,8 @@ class WakeUpManager:
|
|||||||
self.angry_start_time = 0.0 # 愤怒状态开始时间
|
self.angry_start_time = 0.0 # 愤怒状态开始时间
|
||||||
self.last_decay_time = time.time() # 上次衰减时间
|
self.last_decay_time = time.time() # 上次衰减时间
|
||||||
self._decay_task: Optional[asyncio.Task] = None
|
self._decay_task: Optional[asyncio.Task] = None
|
||||||
|
self.last_log_time = 0
|
||||||
|
self.log_interval = 30
|
||||||
|
|
||||||
# 从配置文件获取参数
|
# 从配置文件获取参数
|
||||||
wakeup_config = global_config.wakeup_system
|
wakeup_config = global_config.wakeup_system
|
||||||
@@ -123,7 +125,12 @@ class WakeUpManager:
|
|||||||
# 群聊未被艾特,不增加唤醒度
|
# 群聊未被艾特,不增加唤醒度
|
||||||
return False
|
return False
|
||||||
|
|
||||||
logger.info(f"{self.context.log_prefix} 唤醒度变化: {old_value:.1f} -> {self.wakeup_value:.1f} (阈值: {self.wakeup_threshold})")
|
current_time = time.time()
|
||||||
|
if current_time - self.last_log_time > self.log_interval:
|
||||||
|
logger.info(f"{self.context.log_prefix} 唤醒度变化: {old_value:.1f} -> {self.wakeup_value:.1f} (阈值: {self.wakeup_threshold})")
|
||||||
|
self.last_log_time = current_time
|
||||||
|
else:
|
||||||
|
logger.debug(f"{self.context.log_prefix} 唤醒度变化: {old_value:.1f} -> {self.wakeup_value:.1f} (阈值: {self.wakeup_threshold})")
|
||||||
|
|
||||||
# 检查是否达到唤醒阈值
|
# 检查是否达到唤醒阈值
|
||||||
if self.wakeup_value >= self.wakeup_threshold:
|
if self.wakeup_value >= self.wakeup_threshold:
|
||||||
|
|||||||
@@ -130,6 +130,8 @@ class VideoAnalyzer:
|
|||||||
# 新增的线程池配置
|
# 新增的线程池配置
|
||||||
self.use_multiprocessing = getattr(config, 'use_multiprocessing', True)
|
self.use_multiprocessing = getattr(config, 'use_multiprocessing', True)
|
||||||
self.max_workers = getattr(config, 'max_workers', 2)
|
self.max_workers = getattr(config, 'max_workers', 2)
|
||||||
|
self.frame_extraction_mode = getattr(config, 'frame_extraction_mode', 'fixed_number')
|
||||||
|
self.frame_interval_seconds = getattr(config, 'frame_interval_seconds', 2.0)
|
||||||
|
|
||||||
# 将配置文件中的模式映射到内部使用的模式名称
|
# 将配置文件中的模式映射到内部使用的模式名称
|
||||||
config_mode = config.analysis_mode
|
config_mode = config.analysis_mode
|
||||||
@@ -163,6 +165,8 @@ class VideoAnalyzer:
|
|||||||
self.enable_frame_timing = True
|
self.enable_frame_timing = True
|
||||||
self.use_multiprocessing = True # 默认启用线程池
|
self.use_multiprocessing = True # 默认启用线程池
|
||||||
self.max_workers = 2 # 默认最大2个线程
|
self.max_workers = 2 # 默认最大2个线程
|
||||||
|
self.frame_extraction_mode = "fixed_number"
|
||||||
|
self.frame_interval_seconds = 2.0
|
||||||
self.batch_analysis_prompt = """请分析这个视频的内容。这些图片是从视频中按时间顺序提取的关键帧。
|
self.batch_analysis_prompt = """请分析这个视频的内容。这些图片是从视频中按时间顺序提取的关键帧。
|
||||||
|
|
||||||
请提供详细的分析,包括:
|
请提供详细的分析,包括:
|
||||||
@@ -314,6 +318,8 @@ class VideoAnalyzer:
|
|||||||
async def _extract_frames_fallback(self, video_path: str) -> List[Tuple[str, float]]:
|
async def _extract_frames_fallback(self, video_path: str) -> List[Tuple[str, float]]:
|
||||||
"""帧提取的降级方法 - 原始异步版本"""
|
"""帧提取的降级方法 - 原始异步版本"""
|
||||||
frames = []
|
frames = []
|
||||||
|
frame_count = 0
|
||||||
|
extracted_count = 0
|
||||||
cap = cv2.VideoCapture(video_path)
|
cap = cv2.VideoCapture(video_path)
|
||||||
fps = cap.get(cv2.CAP_PROP_FPS)
|
fps = cap.get(cv2.CAP_PROP_FPS)
|
||||||
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||||
@@ -321,60 +327,96 @@ class VideoAnalyzer:
|
|||||||
|
|
||||||
logger.info(f"视频信息: {total_frames}帧, {fps:.2f}FPS, {duration:.2f}秒")
|
logger.info(f"视频信息: {total_frames}帧, {fps:.2f}FPS, {duration:.2f}秒")
|
||||||
|
|
||||||
# 使用numpy优化帧间隔计算
|
|
||||||
if duration > 0:
|
if self.frame_extraction_mode == "time_interval":
|
||||||
frame_interval = max(1, int(duration / self.max_frames * fps))
|
# 新模式:按时间间隔抽帧
|
||||||
|
time_interval = self.frame_interval_seconds
|
||||||
|
next_frame_time = 0.0
|
||||||
|
|
||||||
|
while cap.isOpened():
|
||||||
|
ret, frame = cap.read()
|
||||||
|
if not ret:
|
||||||
|
break
|
||||||
|
|
||||||
|
current_time = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0
|
||||||
|
|
||||||
|
if current_time >= next_frame_time:
|
||||||
|
# 转换为PIL图像并压缩
|
||||||
|
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||||
|
pil_image = Image.fromarray(frame_rgb)
|
||||||
|
|
||||||
|
# 调整图像大小
|
||||||
|
if max(pil_image.size) > self.max_image_size:
|
||||||
|
ratio = self.max_image_size / max(pil_image.size)
|
||||||
|
new_size = tuple(int(dim * ratio) for dim in pil_image.size)
|
||||||
|
pil_image = pil_image.resize(new_size, Image.Resampling.LANCZOS)
|
||||||
|
|
||||||
|
# 转换为base64
|
||||||
|
buffer = io.BytesIO()
|
||||||
|
pil_image.save(buffer, format='JPEG', quality=self.frame_quality)
|
||||||
|
frame_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
||||||
|
|
||||||
|
frames.append((frame_base64, current_time))
|
||||||
|
extracted_count += 1
|
||||||
|
|
||||||
|
logger.debug(f"提取第{extracted_count}帧 (时间: {current_time:.2f}s)")
|
||||||
|
|
||||||
|
next_frame_time += time_interval
|
||||||
else:
|
else:
|
||||||
frame_interval = 30 # 默认间隔
|
# 使用numpy优化帧间隔计算
|
||||||
|
if duration > 0:
|
||||||
logger.info(f"计算得出帧间隔: {frame_interval} (将提取约{min(self.max_frames, total_frames // frame_interval + 1)}帧)")
|
frame_interval = max(1, int(duration / self.max_frames * fps))
|
||||||
|
|
||||||
# 使用numpy计算目标帧位置
|
|
||||||
target_frames = np.arange(0, min(self.max_frames, total_frames // frame_interval + 1)) * frame_interval
|
|
||||||
target_frames = target_frames[target_frames < total_frames].astype(int)
|
|
||||||
|
|
||||||
extracted_count = 0
|
|
||||||
|
|
||||||
for target_frame in target_frames:
|
|
||||||
# 跳转到目标帧
|
|
||||||
cap.set(cv2.CAP_PROP_POS_FRAMES, target_frame)
|
|
||||||
ret, frame = cap.read()
|
|
||||||
if not ret:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# 使用numpy优化图像处理
|
|
||||||
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
||||||
|
|
||||||
# 转换为PIL图像并使用numpy进行尺寸计算
|
|
||||||
height, width = frame_rgb.shape[:2]
|
|
||||||
max_dim = max(height, width)
|
|
||||||
|
|
||||||
if max_dim > self.max_image_size:
|
|
||||||
# 使用numpy计算缩放比例
|
|
||||||
ratio = self.max_image_size / max_dim
|
|
||||||
new_width = int(width * ratio)
|
|
||||||
new_height = int(height * ratio)
|
|
||||||
|
|
||||||
# 使用opencv进行高效缩放
|
|
||||||
frame_resized = cv2.resize(frame_rgb, (new_width, new_height), interpolation=cv2.INTER_LANCZOS4)
|
|
||||||
pil_image = Image.fromarray(frame_resized)
|
|
||||||
else:
|
else:
|
||||||
pil_image = Image.fromarray(frame_rgb)
|
frame_interval = 30 # 默认间隔
|
||||||
|
|
||||||
# 转换为base64
|
logger.info(f"计算得出帧间隔: {frame_interval} (将提取约{min(self.max_frames, total_frames // frame_interval + 1)}帧)")
|
||||||
buffer = io.BytesIO()
|
|
||||||
pil_image.save(buffer, format='JPEG', quality=self.frame_quality)
|
|
||||||
frame_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
|
||||||
|
|
||||||
# 计算时间戳
|
# 使用numpy计算目标帧位置
|
||||||
timestamp = target_frame / fps if fps > 0 else 0
|
target_frames = np.arange(0, min(self.max_frames, total_frames // frame_interval + 1)) * frame_interval
|
||||||
frames.append((frame_base64, timestamp))
|
target_frames = target_frames[target_frames < total_frames].astype(int)
|
||||||
extracted_count += 1
|
|
||||||
|
|
||||||
logger.debug(f"提取第{extracted_count}帧 (时间: {timestamp:.2f}s, 帧号: {target_frame})")
|
extracted_count = 0
|
||||||
|
|
||||||
# 每提取一帧让步一次
|
for target_frame in target_frames:
|
||||||
await asyncio.sleep(0.001)
|
# 跳转到目标帧
|
||||||
|
cap.set(cv2.CAP_PROP_POS_FRAMES, target_frame)
|
||||||
|
ret, frame = cap.read()
|
||||||
|
if not ret:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 使用numpy优化图像处理
|
||||||
|
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||||
|
|
||||||
|
# 转换为PIL图像并使用numpy进行尺寸计算
|
||||||
|
height, width = frame_rgb.shape[:2]
|
||||||
|
max_dim = max(height, width)
|
||||||
|
|
||||||
|
if max_dim > self.max_image_size:
|
||||||
|
# 使用numpy计算缩放比例
|
||||||
|
ratio = self.max_image_size / max_dim
|
||||||
|
new_width = int(width * ratio)
|
||||||
|
new_height = int(height * ratio)
|
||||||
|
|
||||||
|
# 使用opencv进行高效缩放
|
||||||
|
frame_resized = cv2.resize(frame_rgb, (new_width, new_height), interpolation=cv2.INTER_LANCZOS4)
|
||||||
|
pil_image = Image.fromarray(frame_resized)
|
||||||
|
else:
|
||||||
|
pil_image = Image.fromarray(frame_rgb)
|
||||||
|
|
||||||
|
# 转换为base64
|
||||||
|
buffer = io.BytesIO()
|
||||||
|
pil_image.save(buffer, format='JPEG', quality=self.frame_quality)
|
||||||
|
frame_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
||||||
|
|
||||||
|
# 计算时间戳
|
||||||
|
timestamp = target_frame / fps if fps > 0 else 0
|
||||||
|
frames.append((frame_base64, timestamp))
|
||||||
|
extracted_count += 1
|
||||||
|
|
||||||
|
logger.debug(f"提取第{extracted_count}帧 (时间: {timestamp:.2f}s, 帧号: {target_frame})")
|
||||||
|
|
||||||
|
# 每提取一帧让步一次
|
||||||
|
await asyncio.sleep(0.001)
|
||||||
|
|
||||||
cap.release()
|
cap.release()
|
||||||
logger.info(f"✅ 成功提取{len(frames)}帧")
|
logger.info(f"✅ 成功提取{len(frames)}帧")
|
||||||
|
|||||||
@@ -619,6 +619,8 @@ class VideoAnalysisConfig(ValidatedConfigBase):
|
|||||||
|
|
||||||
enable: bool = Field(default=True, description="启用")
|
enable: bool = Field(default=True, description="启用")
|
||||||
analysis_mode: str = Field(default="batch_frames", description="分析模式")
|
analysis_mode: str = Field(default="batch_frames", description="分析模式")
|
||||||
|
frame_extraction_mode: str = Field(default="fixed_number", description="抽帧模式")
|
||||||
|
frame_interval_seconds: float = Field(default=2.0, description="抽帧时间间隔")
|
||||||
max_frames: int = Field(default=8, description="最大帧数")
|
max_frames: int = Field(default=8, description="最大帧数")
|
||||||
frame_quality: int = Field(default=85, description="帧质量")
|
frame_quality: int = Field(default=85, description="帧质量")
|
||||||
max_image_size: int = Field(default=800, description="最大图像大小")
|
max_image_size: int = Field(default=800, description="最大图像大小")
|
||||||
|
|||||||
@@ -340,16 +340,22 @@ class LLMRequest:
|
|||||||
is_truncated = True
|
is_truncated = True
|
||||||
logger.warning("未检测到 [done] 标记,判定为截断")
|
logger.warning("未检测到 [done] 标记,判定为截断")
|
||||||
|
|
||||||
if (is_empty_reply or is_truncated) and empty_retry_count < max_empty_retry:
|
if is_empty_reply or is_truncated:
|
||||||
empty_retry_count += 1
|
if empty_retry_count < max_empty_retry:
|
||||||
reason = "空回复" if is_empty_reply else "截断"
|
empty_retry_count += 1
|
||||||
logger.warning(f"检测到{reason},正在进行第 {empty_retry_count}/{max_empty_retry} 次重新生成")
|
reason = "空回复" if is_empty_reply else "截断"
|
||||||
|
logger.warning(f"检测到{reason},正在进行第 {empty_retry_count}/{max_empty_retry} 次重新生成")
|
||||||
|
|
||||||
if empty_retry_interval > 0:
|
if empty_retry_interval > 0:
|
||||||
await asyncio.sleep(empty_retry_interval)
|
await asyncio.sleep(empty_retry_interval)
|
||||||
|
|
||||||
model_info, api_provider, client = self._select_model()
|
model_info, api_provider, client = self._select_model()
|
||||||
continue
|
continue
|
||||||
|
else:
|
||||||
|
# 已达到最大重试次数,但仍然是空回复或截断
|
||||||
|
reason = "空回复" if is_empty_reply else "截断"
|
||||||
|
# 抛出异常,由外层重试逻辑或最终的异常处理器捕获
|
||||||
|
raise RuntimeError(f"经过 {max_empty_retry + 1} 次尝试后仍然是{reason}的回复")
|
||||||
|
|
||||||
# 记录使用情况
|
# 记录使用情况
|
||||||
if usage := response.usage:
|
if usage := response.usage:
|
||||||
|
|||||||
@@ -418,7 +418,12 @@ class ScheduleManager:
|
|||||||
if is_in_time_range:
|
if is_in_time_range:
|
||||||
# 检查是否被唤醒
|
# 检查是否被唤醒
|
||||||
if wakeup_manager and wakeup_manager.is_in_angry_state():
|
if wakeup_manager and wakeup_manager.is_in_angry_state():
|
||||||
logger.info(f"在休眠活动 '{activity}' 期间,但已被唤醒。")
|
current_timestamp = datetime.now().timestamp()
|
||||||
|
if current_timestamp - self.last_sleep_log_time > self.sleep_log_interval:
|
||||||
|
logger.info(f"在休眠活动 '{activity}' 期间,但已被唤醒。")
|
||||||
|
self.last_sleep_log_time = current_timestamp
|
||||||
|
else:
|
||||||
|
logger.debug(f"在休眠活动 '{activity}' 期间,但已被唤醒。")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
current_timestamp = datetime.now().timestamp()
|
current_timestamp = datetime.now().timestamp()
|
||||||
|
|||||||
@@ -34,6 +34,8 @@ class TTSAction(BaseAction):
|
|||||||
# 动作使用场景
|
# 动作使用场景
|
||||||
action_require = [
|
action_require = [
|
||||||
"当需要发送语音信息时使用",
|
"当需要发送语音信息时使用",
|
||||||
|
"当用户要求你说话时使用",
|
||||||
|
"当用户要求听你声音时使用",
|
||||||
"当用户明确要求使用语音功能时使用",
|
"当用户明确要求使用语音功能时使用",
|
||||||
"当表达内容更适合用语音而不是文字传达时使用",
|
"当表达内容更适合用语音而不是文字传达时使用",
|
||||||
"当用户想听到语音回答而非阅读文本时使用",
|
"当用户想听到语音回答而非阅读文本时使用",
|
||||||
|
|||||||
@@ -381,7 +381,9 @@ enable_friend_chat = false # 是否启用好友聊天
|
|||||||
[video_analysis] # 视频分析配置
|
[video_analysis] # 视频分析配置
|
||||||
enable = true # 是否启用视频分析功能
|
enable = true # 是否启用视频分析功能
|
||||||
analysis_mode = "batch_frames" # 分析模式:"frame_by_frame"(逐帧分析,非常慢 "建议frames大于8时不要使用这个" ...但是详细)、"batch_frames"(批量分析,快但可能略简单 -其实效果也差不多)或 "auto"(自动选择)
|
analysis_mode = "batch_frames" # 分析模式:"frame_by_frame"(逐帧分析,非常慢 "建议frames大于8时不要使用这个" ...但是详细)、"batch_frames"(批量分析,快但可能略简单 -其实效果也差不多)或 "auto"(自动选择)
|
||||||
max_frames = 16 # 最大分析帧数
|
frame_extraction_mode = "fixed_number" # 抽帧模式: "fixed_number" (固定总帧数) 或 "time_interval" (按时间间隔)
|
||||||
|
frame_interval_seconds = 2.0 # 按时间间隔抽帧的秒数(仅在 mode = "time_interval" 时生效)
|
||||||
|
max_frames = 16 # 最大分析帧数(仅在 mode = "fixed_number" 时生效)
|
||||||
frame_quality = 80 # 帧图像JPEG质量 (1-100)
|
frame_quality = 80 # 帧图像JPEG质量 (1-100)
|
||||||
max_image_size = 800 # 单帧最大图像尺寸(像素)
|
max_image_size = 800 # 单帧最大图像尺寸(像素)
|
||||||
enable_frame_timing = true # 是否在分析中包含帧的时间信息
|
enable_frame_timing = true # 是否在分析中包含帧的时间信息
|
||||||
|
|||||||
Reference in New Issue
Block a user