from VideoSplitter import * import cv2 from PIL import Image import matplotlib.pyplot as plt def get_pil_images_from_frame_numbers(video_path, frame_numbers): """ 从视频中读取指定帧号的帧,并转换为PIL图像 Args: video_path: 视频文件路径 frame_numbers: 要读取的帧号列表 Returns: pil_images: PIL图像列表 """ cap = cv2.VideoCapture(video_path) fps = cap.get(cv2.CAP_PROP_FPS) pil_images = [] print(f"开始读取 {len(frame_numbers)} 个指定帧并转换为PIL图像...") for i, frame_number in enumerate(frame_numbers): # 设置视频位置到指定帧 cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number) ret, frame = cap.read() if ret: # 将OpenCV的BGR格式转换为RGB格式 frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # 转换为PIL图像 pil_image = Image.fromarray(frame_rgb) pil_images.append(pil_image) print(f"已读取并转换帧 {frame_number} ({i+1}/{len(frame_numbers)})") else: print(f"无法读取帧 {frame_number}") cap.release() print(f"完成! 共获得了 {len(pil_images)} 个PIL图像") return pil_images video_path = "/root/autodl-tmp/hot_video_analyse/source/sample_demo_1.mp4" frames_dir = "/root/autodl-tmp/hot_video_analyse/source/Splitter/hot_video_analyse/sample_demo_1/frames" output_dir = "/root/autodl-tmp/hot_video_analyse/source/pil_images" sample_rate = 1 method = "ssim" threshold = 0.8 # 步骤1: 提取帧 print("\n步骤1: 正在提取视频帧...") frames_info = extract_frames(video_path, frames_dir, sample_rate) # 步骤2: 检测场景变化 print("\n步骤2: 正在检测场景变化...") scenes, a = detect_scene_changes(frames_info, method, threshold) print(f"场景开始帧号: {a}") # 步骤3: 将场景开始帧转换为PIL图像 print("\n步骤3: 转换场景开始帧为PIL图像...") pil_images = get_pil_images_from_frame_numbers(video_path, a) # 显示结果 print(f"\n成功获得 {len(pil_images)} 个PIL图像") # 查看第一个PIL图像的信息 if pil_images: first_pil = pil_images[0] pil_output_path = os.path.join(output_dir, f"first_scene_frame_{a[0]}.jpg") first_pil.save(pil_output_path, quality=95) print(f"第一个PIL图像信息:") print(f" 尺寸: {first_pil.size}") print(f" 模式: {first_pil.mode}") """ 视频帧 音频转文字 全视频 """