Alpha3 Combo width - Ext JS

1. is there a way to let the new cool combo stretch automaticly? My data are clients company names, i fill the combo via json, a nae could be small or very long like "Jacks Ext One Million Dollar Company" .
So i dont know in my application, how long the width from the combo is. i have tried to set the underlaying input field to auto, but doesnt work.
2. Can i set the combo to something like readonly without allow typing in the combobox?

Micha
Reply With Quote
  # 2  
Old 03-10-2007, 08:50 AM
Default

1. Because of IE this is a no go. It triggers hasLayout and goes to 100% window width.

2. editable: false
Reply With Quote
import PySimpleGUI as sg import cv2 import numpy as np import os from moviepy.editor import VideoFileClip, AudioFileClip, CompositeAudioClip import random from PIL import Image, ImageDraw import subprocess import sys import json from datetime import datetime, timedelta import hashlib import glob import math # 1. 定义核心处理函数 def add_invisible_overlay(frame, strength): """ 核心功能:添加全透明扰动层(对抗哈希检测) frame: 视频的每一帧 strength: 强度值 (0-100) """ # 将强度从0-100映射到更合理的扰动范围 (1-5) overlay_strength = strength / 100.0 * 4 + 1 # 1 to 5 # 1. 创建一个和帧大小一样的随机噪声图像 noise = np.random.randn(*frame.shape).astype(np.float32) * overlay_strength # 2. 将噪声加到原帧上 new_frame = frame.astype(np.float32) + noise # 3. 确保像素值在0-255之间 new_frame = np.clip(new_frame, 0, 255).astype(np.uint8) return new_frame def add_audio_watermark(audio_clip, strength): """ 核心功能:给音频添加不可听噪声(对抗音频指纹) audio_clip: 原音频片段 strength: 强度 (0-100) """ # 将强度映射到噪声的幅度 noise_amplitude = (strength / 100.0) * 0.005 # 一个很小的值 # 生成与音频数组相同形状的随机噪声 audio_array = audio_clip.to_soundarray() noise = np.random.randn(*audio_array.shape) * noise_amplitude # 将噪声添加到音频上 new_audio = audio_array + noise # 返回一个新的音频剪辑 return AudioFileClip(new_audio, fps=audio_clip.fps) def resize_with_padding(frame, target_width=720, target_height=1560): """ 将帧调整为目标分辨率,保持宽高比,不足部分用黑色填充 并在黑色区域添加不可见的随机噪声 """ # 获取原始尺寸 h, w = frame.shape[:2] # 计算缩放比例 scale = target_width / w new_h = int(h * scale) # 如果缩放后的高度超过目标高度,则按高度缩放 if new_h > target_height: scale = target_height / h new_w = int(w * scale) resized = cv2.resize(frame, (new_w, target_height)) else: resized = cv2.resize(frame, (target_width, new_h)) # 创建目标画布(黑色) canvas = np.zeros((target_height, target_width, 3), dtype=np.uint8) # 计算放置位置(居中) y_offset = (target_height - resized.shape[0]) // 2 x_offset = (target_width - resized.shape[1]) // 2 # 将缩放后的图像放到画布上 canvas[y_offset:y_offset+resized.shape[0], x_offset:x_offset+resized.shape[1]] = resized # 在黑色区域添加不可见的随机噪声(亮度值0-5) black_areas = np.where(canvas == 0) if len(black_areas[0]) > 0: # 只对黑色区域添加噪声 noise = np.random.randint(0, 6, size=black_areas[0].shape, dtype=np.uint8) for i in range(3): # 对RGB三个通道 canvas[black_areas[0], black_areas[1], i] = noise return canvas def generate_random_metadata(): """生成随机的元数据""" # 随机设备型号列表 devices = [ "iPhone15,3", "iPhone15,2", "iPhone14,2", "iPhone14,1", "SM-G998B", "SM-G996B", "SM-G781B", "Mi 11 Ultra", "Mi 10", "Redmi Note 10 Pro" ] # 随机应用程序列表 apps = [ "Wxmm_9020230808", "Wxmm_9020230701", "Wxmm_9020230605", "LemonCamera_5.2.1", "CapCut_9.5.0", "VivaVideo_9.15.5" ] # 随机生成创建时间(最近30天内) now = datetime.now() random_days = random.randint(0, 30) random_hours = random.randint(0, 23) random_minutes = random.randint(0, 59) random_seconds = random.randint(0, 59) creation_time = now - timedelta(days=random_days, hours=random_hours, minutes=random_minutes, seconds=random_seconds) return { "device_model": random.choice(devices), "writing_application": random.choice(apps), "creation_time": creation_time.strftime("%Y-%m-%dT%H:%M:%S"), "title": f"Video_{random.randint(10000, 99999)}", "artist": "Mobile User", "compatible_brands": "isom,iso2,avc1,mp41", "major_brand": "isom" } def corrupt_metadata(input_path, output_path, custom_metadata=None): """ 使用FFmpeg深度修改元数据 """ if custom_metadata is None: custom_metadata = generate_random_metadata() # 构造FFmpeg命令 command = [ &#39;ffmpeg&#39;, &#39;-i&#39;, input_path, &#39;-map_metadata&#39;, &#39;-1&#39;, # 丢弃所有元数据 &#39;-metadata&#39;, f&#39;title={custom_metadata["title"]}&#39;, &#39;-metadata&#39;, f&#39;artist={custom_metadata["artist"]}&#39;, &#39;-metadata&#39;, f&#39;creation_time={custom_metadata["creation_time"]}&#39;, &#39;-metadata&#39;, f&#39;compatible_brands={custom_metadata["compatible_brands"]}&#39;, &#39;-metadata&#39;, f&#39;major_brand={custom_metadata["major_brand"]}&#39;, &#39;-metadata&#39;, f&#39;handler_name={custom_metadata["writing_application"]}&#39;, &#39;-movflags&#39;, &#39;use_metadata_tags&#39;, &#39;-c:v&#39;, &#39;libx264&#39;, &#39;-preset&#39;, &#39;medium&#39;, &#39;-crf&#39;, str(random.randint(18, 23)), # 随机CRF值 &#39;-profile:v&#39;, &#39;high&#39;, &#39;-level&#39;, &#39;4.0&#39;, &#39;-pix_fmt&#39;, &#39;yuv420p&#39;, &#39;-c:a&#39;, &#39;aac&#39;, &#39;-b:a&#39;, &#39;96k&#39;, &#39;-ar&#39;, &#39;44100&#39;, &#39;-y&#39;, output_path ] # 添加设备特定元数据 if &#39;iPhone&#39; in custom_metadata["device_model"]: command.extend([ &#39;-metadata&#39;, f&#39;com.apple.quicktime.model={custom_metadata["device_model"]}&#39;, &#39;-metadata&#39;, f&#39;com.apple.quicktime.software=16.0&#39; ]) try: subprocess.run(command, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) return True except subprocess.CalledProcessError as e: print(f"FFmpeg error: {e}") return False except FileNotFoundError: sg.popup_error(&#39;错误:未找到FFmpeg!\n请确保ffmpeg.exe在程序同一目录下。&#39;, title=&#39;致命错误&#39;) return False def prepare_pip_videos(input_path, pip_folder, num_pip_videos, pip_opacity, target_duration): """ 准备画中画视频 input_path: 原始视频路径 pip_folder: 画中画视频文件夹 num_pip_videos: 要使用的画中画视频数量 pip_opacity: 画中画视频透明度 (1-5) target_duration: 目标视频时长 """ # 获取所有可用的画中画视频 pip_video_paths = [] if os.path.exists(pip_folder): for ext in [&#39;*.mp4&#39;, &#39;*.avi&#39;, &#39;*.mov&#39;, &#39;*.mkv&#39;]: pip_video_paths.extend(glob.glob(os.path.join(pip_folder, ext))) if not pip_video_paths: return None # 随机选择指定数量的视频 selected_pip_videos = random.sample(pip_video_paths, min(num_pip_videos, len(pip_video_paths))) # 准备画中画视频 pip_videos = [] for i, pip_path in enumerate(selected_pip_videos): try: # 获取画中画视频信息 pip_clip = VideoFileClip(pip_path) pip_duration = pip_clip.duration # 调整画中画视频时长 if pip_duration < target_duration: # 如果画中画视频比目标视频短,则循环播放 loop_count = int(target_duration / pip_duration) + 1 pip_clip = pip_clip.loop(n=loop_count).subclip(0, target_duration) else: # 如果画中画视频比目标视频长,则截取 pip_clip = pip_clip.subclip(0, target_duration) # 设置透明度 opacity = pip_opacity / 10.0 # 将1-5映射到0.1-0.5 pip_clip = pip_clip.set_opacity(opacity) # 随机确定位置和大小 # 计算可以放置的画中画数量 grid_size = math.ceil(math.sqrt(num_pip_videos)) cell_width = 720 // grid_size cell_height = 1560 // grid_size # 计算当前画中画的位置 row = i // grid_size col = i % grid_size x = col * cell_width y = row * cell_height # 随机调整位置和大小(稍微偏移以避免完全对齐) x_offset = random.randint(-20, 20) y_offset = random.randint(-20, 20) size_factor = random.uniform(0.8, 1.0) # 调整大小和位置 pip_clip = pip_clip.resize(width=int(cell_width * size_factor)) pip_clip = pip_clip.set_position((x + x_offset, y + y_offset)) pip_videos.append(pip_clip) print(f"添加画中画视频 {i+1}: {os.path.basename(pip_path)}, 位置: ({x+x_offset}, {y+y_offset})") except Exception as e: print(f"处理画中画视频时出错 {pip_path}: {str(e)}") continue return pip_videos def apply_pip_effect(input_path, output_path, pip_videos): """ 应用画中画效果 """ # 加载原始视频 original_clip = VideoFileClip(input_path) # 创建画中画组合 if pip_videos: # 将画中画视频添加到原始视频上 final_clip = CompositeVideoClip([original_clip] + pip_videos) else: final_clip = original_clip # 写入输出文件 final_clip.write_videofile( output_path, codec=&#39;libx264&#39;, audio_codec=&#39;aac&#39;, fps=original_clip.fps, bitrate="2000k", preset=&#39;medium&#39;, threads=4, logger=None ) # 关闭所有剪辑以释放资源 original_clip.close() for pip in pip_videos: pip.close() final_clip.close() # 2. 主处理函数 def process_video(values): """ 主处理流程控制器 values: 从GUI窗口获取的所有值 """ input_path = values[&#39;-IN-&#39;] output_path = values[&#39;-OUT-&#39;] if not input_path or not output_path: sg.popup_error(&#39;请先选择输入和输出文件!&#39;) return False # 解析用户选择的强度和功能 strength = int(values[&#39;-STRENGTH-&#39;]) use_video_perturb = values[&#39;-VIDEO-&#39;] use_audio_perturb = values[&#39;-AUDIO-&#39;] use_metadata_corrupt = values[&#39;-METADATA-&#39;] use_gan = values[&#39;-GAN-&#39;] use_resize = values[&#39;-RESIZE-&#39;] use_pip = values[&#39;-PIP-&#39;] # 画中画功能 pip_opacity = int(values[&#39;-PIP_OPACITY-&#39;]) if use_pip else 2 # 画中画透明度 num_pip_videos = int(values[&#39;-NUM_PIP_VIDEOS-&#39;]) if use_pip else 0 # 画中画数量 # 临时文件路径 temp_video_path = "temp_processed.mp4" temp_audio_path = "temp_audio.aac" pip_temp_path = "temp_pip.mp4" if use_pip else None final_output_path = output_path # 获取原始视频时长 original_clip = VideoFileClip(input_path) original_duration = original_clip.duration original_clip.close() try: # 第一步:处理画中画效果(如果需要) if use_pip: pip_folder = "P" # 画中画视频文件夹 pip_videos = prepare_pip_videos(input_path, pip_folder, num_pip_videos, pip_opacity, original_duration) if pip_videos: apply_pip_effect(input_path, pip_temp_path, pip_videos) # 更新输入路径为处理后的画中画视频 input_path = pip_temp_path else: sg.popup_notify(&#39;未找到画中画视频或处理失败,已跳过画中画效果。&#39;, title=&#39;警告&#39;) # 第二步:处理视频和音频 if use_video_perturb or use_resize: # 使用OpenCV打开视频 cap = cv2.VideoCapture(input_path) # 获取视频属性 fps = int(cap.get(cv2.CAP_PROP_FPS)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # 设置目标分辨率 target_width, target_height = 720, 1560 # 创建VideoWriter来写入处理后的视频 fourcc = cv2.VideoWriter_fourcc(*&#39;mp4v&#39;) out = cv2.VideoWriter(temp_video_path, fourcc, fps, (target_width, target_height)) processed_frames = 0 # 主循环:逐帧处理 while True: ret, frame = cap.read() if not ret: break # 读到结尾就退出 # 如果勾选了"调整分辨率",先调整分辨率 if use_resize: frame = resize_with_padding(frame, target_width, target_height) # 如果勾选了"视频扰动",就对当前帧进行处理 if use_video_perturb: frame = add_invisible_overlay(frame, strength) # 写入处理后的帧 out.write(frame) processed_frames += 1 # 更新进度条 if not window[&#39;-PROGRESS-&#39;].update(processed_frames, total_frames): # 如果用户点击了取消 break # 释放资源 cap.release() out.release() # 第二步:处理音频 if use_audio_perturb: # 从原视频提取音频 original_video = VideoFileClip(input_path) original_audio = original_video.audio if original_audio is not None: # 给音频添加水印 processed_audio = add_audio_watermark(original_audio, strength) # 保存处理后的音频到临时文件 processed_audio.write_audiofile(temp_audio_path, logger=None) processed_audio.close() original_video.close() else: # 如果没有勾选音频处理,直接提取原音频 original_video = VideoFileClip(input_path) original_audio = original_video.audio if original_audio is not None: original_audio.write_audiofile(temp_audio_path, logger=None) original_video.close() # 第三步:合并视频和音频 # 如果处理了视频或调整了分辨率,使用处理后的视频,否则使用原视频 video_source = temp_video_path if (use_video_perturb or use_resize) else input_path # 如果有音频文件,合并音频 if os.path.exists(temp_audio_path): # 使用FFmpeg合并音视频 merge_cmd = [ &#39;ffmpeg&#39;, &#39;-i&#39;, video_source, &#39;-i&#39;, temp_audio_path, &#39;-c:v&#39;, &#39;copy&#39;, &#39;-c:a&#39;, &#39;aac&#39;, &#39;-map&#39;, &#39;0:v:0&#39;, &#39;-map&#39;, &#39;1:a:0&#39;, &#39;-shortest&#39;, &#39;-y&#39;, final_output_path ] subprocess.run(merge_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) else: # 如果没有音频,直接复制视频 import shutil shutil.copy2(video_source, final_output_path) else: # 如果视频和音频处理都没勾选,直接复制原文件到输出路径 import shutil shutil.copy2(input_path, final_output_path) # 第四步:处理元数据(无论是否处理视频音频,只要勾选了就执行) if use_metadata_corrupt: custom_meta = generate_random_metadata() temp_final_path = final_output_path + "_temp.mp4" success = corrupt_metadata(final_output_path, temp_final_path, custom_meta) if success: # 用处理完元数据的文件替换最终文件 if os.path.exists(final_output_path): os.remove(final_output_path) os.rename(temp_final_path, final_output_path) else: return False # 第五步:GAN处理(预留功能) if use_gan: # gan_processing_function(final_output_path, final_output_path) sg.popup_notify(&#39;GAN功能是预留选项,在当前版本中未实际生效。&#39;, title=&#39;信息&#39;) sg.popup(&#39;处理完成!&#39;, f&#39;输出文件已保存至: {final_output_path}&#39;) return True except Exception as e: sg.popup_error(f&#39;处理过程中出现错误: {str(e)}&#39;) return False finally: # 清理可能的临时文件 for temp_file in [temp_video_path, temp_audio_path, pip_temp_path]: if temp_file and os.path.exists(temp_file): os.remove(temp_file) # 3. 构建GUI界面布局 sg.theme(&#39;DarkBlue&#39;) # 设置一个好看的主题 # 布局定义 layout = [ [sg.Text(&#39;输入视频文件:&#39;), sg.Input(key=&#39;-IN-&#39;), sg.FileBrowse(file_types=(("Video Files", "*.mp4 *.mov *.avi *.mkv"),))], [sg.Text(&#39;输出视频文件:&#39;), sg.Input(key=&#39;-OUT-&#39;), sg.SaveAs(file_types=(("MP4 Files", "*.mp4"),), default_extension=".mp4")], [sg.HorizontalSeparator()], [sg.Text(&#39;处理强度:&#39;)], [sg.Slider(range=(1, 100), default_value=50, orientation=&#39;h&#39;, key=&#39;-STRENGTH-&#39;, size=(40, 15))], [sg.HorizontalSeparator()], [sg.Checkbox(&#39;时空域微扰动 (抗视频指纹 - 核心推荐)&#39;, default=True, key=&#39;-VIDEO-&#39;)], [sg.Checkbox(&#39;音频指纹污染 (抗音频指纹 - 核心推荐)&#39;, default=True, key=&#39;-AUDIO-&#39;)], [sg.Checkbox(&#39;标准化分辨率 (720x1560) + 黑边扰动&#39;, default=True, key=&#39;-RESIZE-&#39;)], [sg.Checkbox(&#39;元数据彻底清理与伪造&#39;, default=True, key=&#39;-METADATA-&#39;)], [sg.Checkbox(&#39;画中画干扰 (从P文件夹随机选择视频)&#39;, default=False, key=&#39;-PIP-&#39;, enable_events=True)], [ sg.Text(&#39;画中画数量:&#39;), sg.Combo([1, 2, 3, 4, 5], default_value=3, key=&#39;-NUM_PIP_VIDEOS-&#39;, enable_events=True, readonly=True), sg.Text(&#39;透明度 (1-5):&#39;), sg.Slider(range=(1, 5), default_value=2, orientation=&#39;h&#39;, key=&#39;-PIP_OPACITY-&#39;, size=(15, 15)) ], [sg.Checkbox(&#39;动态GAN对抗性扰动 (预留功能)&#39;, default=False, key=&#39;-GAN-&#39;, enable_events=True)], [sg.HorizontalSeparator()], [sg.ProgressBar(max_value=100, orientation=&#39;h&#39;, size=(40, 20), key=&#39;-PROGRESS-&#39;, style=&#39;classic&#39;)], [sg.Button(&#39;开始处理&#39;), sg.Button(&#39;退出&#39;)] ] # 4. 创建窗口和事件循环 window = sg.Window(&#39;视频号专版防检测处理工具 v3.0&#39;, layout) while True: event, values = window.read() if event in (sg.WIN_CLOSED, &#39;退出&#39;): break if event == &#39;开始处理&#39;: # 禁用按钮,防止重复点击 window[&#39;开始处理&#39;].update(disabled=True) # 执行处理 process_video(values) # 处理完成,重新启用按钮 window[&#39;开始处理&#39;].update(disabled=False) window[&#39;-PROGRESS-&#39;].update(0) # 重置进度条 if event == &#39;-GAN-&#39;: if values[&#39;-GAN-&#39;]: sg.popup_ok(&#39;请注意:GAN功能是高级预留功能。\n在当前版本中,它会被一个高级扰动算法模拟,但并非真正的GAN。\n效果依然强大。&#39;, title=&#39;功能说明&#39;) if event == &#39;-PIP-&#39;: # 启用或禁用画中画相关控件 pip_enabled = values[&#39;-PIP-&#39;] window[&#39;-NUM_PIP_VIDEOS-&#39;].update(disabled=not pip_enabled) window[&#39;-PIP_OPACITY-&#39;].update(disabled=not pip_enabled) window.close() 分析此代码,并在此代码里优化以下内容,并将代码展示成无需命令行运行的模式,而是鼠标点击就可以运行的软件,只要Windows版本的详细操作。每一步怎么打包需要用到什么软件怎么操作都详细的发给我 画中画:随机选择同目录里P(文件夹)里的5个视频文件(里的视频时长大于或小于原视频通过变速或裁剪,做到和原视频时长一样长),随便选择,不指定文件名,视频随便摆放,但不重叠,又能覆盖整个原视频,在软件界面其视频的透明度可手动选,1-5个值,默认透明度值为2,(不透明度100为满值,值越小透明度越高)人眼不可见的扰动。扰乱视频指纹(软件主页有勾选项) 以上处理完后,生成一个分辨率为7201560肉眼不可见的黑色扰动视频作为背景,时长和原视频时长一致,把处理完的视频等比例缩放且居中存放于背景视频中,不透明度调为98,(不透明度100为满值,值越小透明度越高),最终处理完导出的视频分辨率为720*1560
08-20
import os import sys import tempfile import shutil import random import cv2 import numpy as np from PIL import Image, ImageDraw import subprocess from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QLabel, QFileDialog, QMessageBox, QGroupBox, QProgressBar, QFrame, QComboBox) from PyQt5.QtCore import Qt, QThread, pyqtSignal, QThreadPool, QRunnable from PyQt5.QtGui import QPixmap, QIcon # 定义需要替换的帧列表 REPLACE_FRAMES = [ 10, 20, 30, 40, 50, 60, 250, 260, 270, 280, 290, 300, 490, 500, 510, 520, 530, 540, 730, 740, 750, 760, 770, 780, 970, 980, 990, 1000, 1010, 1020, 1210, 1220, 1230, 1240, 1250, 1260, 1450, 1460, 1470, 1480, 1490, 1500, 1690, 1700, 1710, 1720, 1730, 1740, 1930, 1940, 1950, 1960, 1970, 1980, 2170, 2180, 2190, 2200, 2210, 2220, 2410, 2420, 2430, 2440, 2450, 2460, 2650, 2660, 2670, 2680, 2690, 2700, 2890, 2900, 2910, 2920, 2930, 2940, 3130, 3140, 3150, 3160, 3170, 3180, 3370, 3380, 3390, 3400, 3410, 3420, 3610, 3620, 3630, 3640, 3650, 3660, 3850, 3860, 3870, 3880, 3890, 3900, 4090, 4100, 4110, 4120, 4130, 4140, 4330, 4340, 4350, 4360, 4370, 4380, 4570, 4580, 4590, 4600, 4610, 4620, 4810, 4820, 4830, 4840, 4850, 4860, 5050, 5060, 5070, 5080, 5090, 5100, 5290, 5300, 5310, 5320, 5330, 5340, 5530, 5540, 5550, 5560, 5570, 5580, 5770, 5780, 5790, 5800, 5810, 5820, 6010, 6020, 6030, 6040, 6050, 6060, 6250, 6260, 6270, 6280, 6290, 6300, 6490, 6500, 6510, 6520, 6530, 6540, 6730, 6740, 6750, 6760, 6770, 6780, 6970, 6980, 6990, 7000, 7010, 7020, 7210, 7220, 7230, 7240, 7250, 7260, 7450, 7460, 7470, 7480, 7490, 7500, 7690, 7700, 7710, 7720, 7730, 7740, 7930, 7940, 7950, 7960, 7970, 7980 ] class FrameProcessingTask(QRunnable): """处理单个帧的任务""" def __init__(self, frame, frame_number, output_dir): super().__init__() self.frame = frame self.frame_number = frame_number self.output_dir = output_dir def run(self): # 调整分辨率为1080x1920 frame_resized = cv2.resize(self.frame, (1080, 1920)) frame_path = os.path.join(self.output_dir, f"{self.frame_number:04d}.jpg") cv2.imwrite(frame_path, frame_resized, [cv2.IMWRITE_JPEG_QUALITY, 95]) class VideoProcessor(QThread): progress_updated = pyqtSignal(int) status_updated = pyqtSignal(str) finished = pyqtSignal(bool, str) error_occurred = pyqtSignal(str) def __init__(self, mode, video_a_path, video_b_path=None, thread_count=4): super().__init__() self.mode = mode self.video_a_path = video_a_path self.video_b_path = video_b_path self.temp_dir = tempfile.mkdtemp() self.output_path = "" self.total_frames = 0 self.fps = 0 self.duration = 0 self.cancel_requested = False self.thread_count = thread_count self.thread_pool = QThreadPool() self.thread_pool.setMaxThreadCount(thread_count) def run(self): try: # 获取视频基本信息 self.get_video_info(self.video_a_path) if self.mode == "doodle": self.process_doodle_mode() elif self.mode == "shoot": self.process_shoot_mode() self.cleanup() if not self.cancel_requested: self.finished.emit(True, self.output_path) except Exception as e: self.status_updated.emit(f"错误: {str(e)}") self.error_occurred.emit(str(e)) self.cleanup() if not self.cancel_requested: self.finished.emit(False, str(e)) def cancel_processing(self): """取消处理过程""" self.cancel_requested = True self.status_updated.emit("处理已取消") self.cleanup() def get_video_info(self, video_path): """获取视频的基本信息""" cap = cv2.VideoCapture(video_path) if not cap.isOpened(): raise Exception("无法打开视频文件") self.fps = cap.get(cv2.CAP_PROP_FPS) self.total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) self.duration = self.total_frames / self.fps if self.fps > 0 else 0 cap.release() if self.total_frames == 0: raise Exception("视频文件中没有帧") def process_doodle_mode(self): # 步骤1: 提取音频 self.status_updated.emit("提取音频...") audio_path = os.path.join(self.temp_dir, "audio.aac") self.extract_audio(self.video_a_path, audio_path) if self.cancel_requested: return self.progress_updated.emit(5) # 步骤2: 处理视频帧 self.status_updated.emit("处理视频帧...") frames_dir = os.path.join(self.temp_dir, "frames_a") os.makedirs(frames_dir, exist_ok=True) self.process_video_frames_optimized(self.video_a_path, frames_dir) if self.cancel_requested: return self.progress_updated.emit(30) # 步骤3: 替换指定帧为涂鸦图片 self.status_updated.emit("替换指定帧...") self.replace_frames_with_doodles(frames_dir) if self.cancel_requested: return self.progress_updated.emit(60) # 步骤4: 合成视频B self.status_updated.emit("合成视频...") video_b_path = os.path.join(self.temp_dir, "video_b.mp4") self.create_video_from_frames(frames_dir, video_b_path, self.fps) if self.cancel_requested: return self.progress_updated.emit(70) # 步骤5: 生成背景视频并叠加 self.status_updated.emit("生成背景...") bg_video_path = os.path.join(self.temp_dir, "background.mp4") self.create_background_video(bg_video_path, self.duration) if self.cancel_requested: return self.progress_updated.emit(80) # 步骤6: 最终处理 self.status_updated.emit("最终处理...") self.output_path = self.get_output_path("OK.mp4") self.overlay_videos(bg_video_path, video_b_path, self.output_path) if self.cancel_requested: return self.progress_updated.emit(90) # 步骤7: 添加处理后的音频 self.status_updated.emit("添加音频...") self.add_processed_audio(self.output_path, audio_path, self.output_path) if self.cancel_requested: return self.progress_updated.emit(95) # 步骤8: 污染元数据 self.status_updated.emit("处理元数据...") self.pollute_metadata(self.output_path) if self.cancel_requested: return self.progress_updated.emit(100) self.status_updated.emit("处理完成!") def process_shoot_mode(self): # 步骤1: 提取音频 self.status_updated.emit("提取音频...") audio_path = os.path.join(self.temp_dir, "audio.aac") self.extract_audio(self.video_a_path, audio_path) if self.cancel_requested: return self.progress_updated.emit(5) # 步骤2: 处理视频A帧 self.status_updated.emit("处理视频A...") frames_dir_a = os.path.join(self.temp_dir, "frames_a") os.makedirs(frames_dir_a, exist_ok=True) self.process_video_frames_optimized(self.video_a_path, frames_dir_a) if self.cancel_requested: return self.progress_updated.emit(20) # 步骤3: 处理视频B self.status_updated.emit("处理视频B...") frames_dir_b = os.path.join(self.temp_dir, "frames_b") os.makedirs(frames_dir_b, exist_ok=True) # 调整视频B时长与视频A一致 adjusted_b_path = os.path.join(self.temp_dir, "adjusted_b.mp4") self.adjust_video_duration(self.video_b_path, adjusted_b_path, self.duration) if self.cancel_requested: return # 处理视频B帧 self.process_video_frames_optimized(adjusted_b_path, frames_dir_b) if self.cancel_requested: return self.progress_updated.emit(40) # 步骤4: 替换指定帧 self.status_updated.emit("替换帧...") self.replace_frames_from_b(frames_dir_a, frames_dir_b) if self.cancel_requested: return self.progress_updated.emit(70) # 步骤5: 合成视频C self.status_updated.emit("合成视频...") video_c_path = os.path.join(self.temp_dir, "video_c.mp4") self.create_video_from_frames(frames_dir_a, video_c_path, self.fps) if self.cancel_requested: return self.progress_updated.emit(80) # 步骤6: 生成背景视频并叠加 self.status_updated.emit("生成背景...") bg_video_path = os.path.join(self.temp_dir, "background.mp4") self.create_background_video(bg_video_path, self.duration) if self.cancel_requested: return self.progress_updated.emit(85) # 步骤7: 最终处理 self.status_updated.emit("最终处理...") self.output_path = self.get_output_path("OK.mp4") self.overlay_videos(bg_video_path, video_c_path, self.output_path) if self.cancel_requested: return self.progress_updated.emit(90) # 步骤8: 添加处理后的音频 self.status_updated.emit("添加音频...") self.add_processed_audio(self.output_path, audio_path, self.output_path) if self.cancel_requested: return self.progress_updated.emit(95) # 步骤9: 污染元数据 self.status_updated.emit("处理元数据...") self.pollute_metadata(self.output_path) if self.cancel_requested: return self.progress_updated.emit(100) self.status_updated.emit("处理完成!") def extract_audio(self, video_path, audio_path): """提取音频 - 避免黑框""" if self.cancel_requested: return try: command = [ &#39;ffmpeg&#39;, &#39;-i&#39;, video_path, &#39;-vn&#39;, &#39;-acodec&#39;, &#39;copy&#39;, audio_path, &#39;-y&#39; ] # 避免弹出黑框 startupinfo = None if sys.platform == "win32": startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = 0 # 0 = SW_HIDE subprocess.run(command, check=True, capture_output=True, startupinfo=startupinfo) except subprocess.CalledProcessError as e: raise Exception(f"音频提取失败: {str(e)}") except FileNotFoundError: raise Exception("未找到FFmpeg,请确保已安装FFmpeg并添加到系统路径") def process_video_frames_optimized(self, video_path, output_dir): """优化版的视频帧处理 - 使用多线程""" if self.cancel_requested: return cap = cv2.VideoCapture(video_path) if not cap.isOpened(): raise Exception("无法打开视频文件") frame_count = 0 success = True # 确保输出目录存在 os.makedirs(output_dir, exist_ok=True) while success and not self.cancel_requested: success, frame = cap.read() if not success: break frame_count += 1 # 创建任务 task = FrameProcessingTask(frame.copy(), frame_count, output_dir) self.thread_pool.start(task) # 每处理50帧更新一次进度 if frame_count % 50 == 0: progress = int(frame_count * 100 / self.total_frames) self.progress_updated.emit(progress) # 等待所有任务完成 self.thread_pool.waitForDone() cap.release() if frame_count == 0 and not self.cancel_requested: raise Exception("视频文件中没有帧") else: # 最后更新一次进度 self.progress_updated.emit(int(frame_count * 100 / self.total_frames)) def replace_frames_with_doodles(self, frames_dir): """用涂鸦图片替换指定帧""" if self.cancel_requested: return frame_files = sorted([f for f in os.listdir(frames_dir) if f.endswith(&#39;.jpg&#39;)], key=lambda x: int(x.split(&#39;.&#39;)[0])) # 只处理存在的帧 valid_frames = [f for f in REPLACE_FRAMES if f <= len(frame_files)] if not valid_frames: return for i, frame_num in enumerate(valid_frames): if self.cancel_requested: return doodle_img = self.generate_doodle_image() frame_path = os.path.join(frames_dir, f"{frame_num:04d}.jpg") doodle_img.save(frame_path, quality=95) # 更新进度 if i % 10 == 0: # 每10帧更新一次进度 progress = 30 + int((i + 1) * 30 / len(valid_frames)) self.progress_updated.emit(progress) def replace_frames_from_b(self, frames_dir_a, frames_dir_b): """从视频B替换帧到视频A""" if self.cancel_requested: return frame_files_a = sorted([f for f in os.listdir(frames_dir_a) if f.endswith(&#39;.jpg&#39;)], key=lambda x: int(x.split(&#39;.&#39;)[0])) # 只处理存在的帧 valid_frames = [f for f in REPLACE_FRAMES if f <= len(frame_files_a)] if not valid_frames: return for i, frame_num in enumerate(valid_frames): if self.cancel_requested: return # 从视频B获取对应帧 frame_b_path = os.path.join(frames_dir_b, f"{frame_num:04d}.jpg") if os.path.exists(frame_b_path): # 替换视频A的帧 frame_a_path = os.path.join(frames_dir_a, f"{frame_num:04d}.jpg") shutil.copyfile(frame_b_path, frame_a_path) # 更新进度 if i % 10 == 0: # 每10帧更新一次进度 progress = 40 + int((i + 1) * 30 / len(valid_frames)) self.progress_updated.emit(progress) def generate_doodle_image(self): """生成随机涂鸦图片""" width, height = 1080, 1920 img = Image.new(&#39;RGB&#39;, (width, height), color=(255, 255, 255)) draw = ImageDraw.Draw(img) # 随机绘制一些线条和形状 for _ in range(random.randint(5, 15)): x1 = random.randint(0, width) y1 = random.randint(0, height) x2 = random.randint(0, width) y2 = random.randint(0, height) color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) draw.line([(x1, y1), (x2, y2)], fill=color, width=random.randint(1, 5)) for _ in range(random.randint(3, 8)): x = random.randint(0, width) y = random.randint(0, height) size = random.randint(20, 200) color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) draw.rectangle([x, y, x+size, y+size], fill=color, outline=None) for _ in range(random.randint(2, 5)): x = random.randint(0, width) y = random.randint(0, height) radius = random.randint(10, 100) color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) draw.ellipse([x, y, x+radius, y+radius], fill=color, outline=None) return img def create_video_from_frames(self, frames_dir, output_path, fps): """从帧创建视频 - 避免黑框""" if self.cancel_requested: return try: frame_pattern = os.path.join(frames_dir, &#39;%04d.jpg&#39;) command = [ &#39;ffmpeg&#39;, &#39;-r&#39;, str(fps), &#39;-i&#39;, frame_pattern, &#39;-c:v&#39;, &#39;libx264&#39;, &#39;-pix_fmt&#39;, &#39;yuv420p&#39;, &#39;-crf&#39;, &#39;18&#39;, &#39;-preset&#39;, &#39;fast&#39;, output_path, &#39;-y&#39; ] # 避免弹出黑框 startupinfo = None if sys.platform == "win32": startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = 0 # 0 = SW_HIDE subprocess.run(command, check=True, capture_output=True, startupinfo=startupinfo) except subprocess.CalledProcessError as e: raise Exception(f"视频合成失败: {str(e)}") def create_background_video(self, output_path, duration): """创建背景视频 - 避免黑框""" if self.cancel_requested: return try: # 使用更简单可靠的背景生成方法 command = [ &#39;ffmpeg&#39;, &#39;-f&#39;, &#39;lavfi&#39;, &#39;-i&#39;, f&#39;color=black:size=1080x2336:rate={self.fps}:duration={duration}&#39;, &#39;-c:v&#39;, &#39;libx264&#39;, &#39;-pix_fmt&#39;, &#39;yuv420p&#39;, &#39;-crf&#39;, &#39;23&#39;, &#39;-preset&#39;, &#39;fast&#39;, output_path, &#39;-y&#39; ] # 避免弹出黑框 startupinfo = None if sys.platform == "win32": startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = 0 # 0 = SW_HIDE subprocess.run(command, check=True, capture_output=True, startupinfo=startupinfo) except subprocess.CalledProcessError as e: raise Exception(f"背景视频创建失败: {str(e)}") def overlay_videos(self, bg_video_path, fg_video_path, output_path): """叠加视频 - 避免黑框""" if self.cancel_requested: return try: command = [ &#39;ffmpeg&#39;, &#39;-i&#39;, bg_video_path, &#39;-i&#39;, fg_video_path, &#39;-filter_complex&#39;, &#39;[1]scale=1080:1920:force_original_aspect_ratio=decrease[fg];&#39; + &#39;[0][fg]overlay=(W-w)/2:(H-h)/2:format=auto:alpha=0.98&#39;, &#39;-c:v&#39;, &#39;libx264&#39;, &#39;-pix_fmt&#39;, &#39;yuv420p&#39;, &#39;-crf&#39;, &#39;18&#39;, &#39;-preset&#39;, &#39;fast&#39;, output_path, &#39;-y&#39; ] # 避免弹出黑框 startupinfo = None if sys.platform == "win32": startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = 0 # 0 = SW_HIDE subprocess.run(command, check=True, capture_output=True, startupinfo=startupinfo) except subprocess.CalledProcessError as e: raise Exception(f"视频叠加失败: {str(e)}") def add_processed_audio(self, video_path, audio_path, output_path): """添加处理后的音频 - 避免黑框""" if self.cancel_requested: return try: temp_output = os.path.join(self.temp_dir, "temp_output.mp4") command = [ &#39;ffmpeg&#39;, &#39;-i&#39;, video_path, &#39;-i&#39;, audio_path, &#39;-c:v&#39;, &#39;copy&#39;, &#39;-c:a&#39;, &#39;aac&#39;, &#39;-b:a&#39;, &#39;192k&#39;, &#39;-map&#39;, &#39;0:v:0&#39;, &#39;-map&#39;, &#39;1:a:0&#39;, &#39;-shortest&#39;, temp_output, &#39;-y&#39; ] # 避免弹出黑框 startupinfo = None if sys.platform == "win32": startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = 0 # 0 = SW_HIDE subprocess.run(command, check=True, capture_output=True, startupinfo=startupinfo) shutil.move(temp_output, output_path) except subprocess.CalledProcessError as e: raise Exception(f"音频添加失败: {str(e)}") def adjust_video_duration(self, video_path, output_path, target_duration): """调整视频时长 - 避免黑框""" if self.cancel_requested: return try: # 获取当前视频时长 cap = cv2.VideoCapture(video_path) fps = cap.get(cv2.CAP_PROP_FPS) frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) current_duration = frame_count / fps if fps > 0 else 0 cap.release() if current_duration <= 0: raise Exception("无法获取视频时长") # 计算速度因子 speed_factor = current_duration / target_duration command = [ &#39;ffmpeg&#39;, &#39;-i&#39;, video_path, &#39;-filter:v&#39;, f&#39;setpts={speed_factor}*PTS&#39;, &#39;-an&#39;, output_path, &#39;-y&#39; ] # 避免弹出黑框 startupinfo = None if sys.platform == "win32": startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = 0 # 0 = SW_HIDE subprocess.run(command, check=True, capture_output=True, startupinfo=startupinfo) except subprocess.CalledProcessError as e: raise Exception(f"视频时长调整失败: {str(e)}") def pollute_metadata(self, video_path): """污染元数据 - 避免黑框""" if self.cancel_requested: return try: # 使用FFmpeg直接添加元数据 temp_output = os.path.join(self.temp_dir, "temp_metadata.mp4") # 生成随机元数据 creation_time = self.random_date() location = self.random_location() command = [ &#39;ffmpeg&#39;, &#39;-i&#39;, video_path, &#39;-metadata&#39;, f&#39;creation_time={creation_time}&#39;, &#39;-metadata&#39;, f&#39;location={location}&#39;, &#39;-metadata&#39;, &#39;make=RandomCamera&#39;, &#39;-metadata&#39;, &#39;model=RandomModel&#39;, &#39;-metadata&#39;, &#39;software=Video Processor v1.0&#39;, &#39;-c&#39;, &#39;copy&#39;, temp_output, &#39;-y&#39; ] # 避免弹出黑框 startupinfo = None if sys.platform == "win32": startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = 0 # 0 = SW_HIDE subprocess.run(command, check=True, capture_output=True, startupinfo=startupinfo) shutil.move(temp_output, video_path) except (subprocess.CalledProcessError, FileNotFoundError): # 如果失败,跳过元数据污染 pass def random_date(self): """生成随机日期""" year = random.randint(2010, 2023) month = random.randint(1, 12) day = random.randint(1, 28) hour = random.randint(0, 23) minute = random.randint(0, 59) second = random.randint(0, 59) return f"{year:04d}-{month:02d}-{day:02d}T{hour:02d}:{minute:02d}:{second:02d}Z" def random_location(self): """生成随机位置""" locations = [ "40.7128° N, 74.0060° W", # 纽约 "34.0522° N, 118.2437° W", # 洛杉矶 "51.5074° N, 0.1278° W", # 伦敦 "48.8566° N, 2.3522° E", # 巴黎 "35.6762° N, 139.6503° E", # 东京 "39.9042° N, 116.4074° E", # 北京 "-33.8688° N, 151.2093° E", # 悉尼 "55.7558° N, 37.6173° E" # 莫斯科 ] return random.choice(locations) def get_output_path(self, base_name): """获取输出路径,避免重名""" counter = 0 name, ext = os.path.splitext(base_name) output_path = base_name while os.path.exists(output_path): counter += 1 output_path = f"{name}_{counter}{ext}" return output_path def cleanup(self): """清理临时文件""" if os.path.exists(self.temp_dir): try: shutil.rmtree(self.temp_dir) except: pass # 忽略清理错误 class MainWindow(QMainWindow): def __init__(self): super().__init__() self.setWindowTitle("视频处理工具") self.setGeometry(100, 100, 800, 700) # 设置图标 if hasattr(sys, &#39;_MEIPASS&#39;): # 打包后的路径 icon_path = os.path.join(sys._MEIPASS, &#39;app.ico&#39;) else: # 开发时的路径 icon_path = &#39;app.ico&#39; if os.path.exists(icon_path): self.setWindowIcon(QIcon(icon_path)) self.central_widget = QWidget() self.setCentralWidget(self.central_widget) self.layout = QVBoxLayout() self.central_widget.setLayout(self.layout) self.setup_ui() self.video_a_path = "" self.video_b_path = "" self.processor = None self.thread_count = 4 # 默认线程数 def setup_ui(self): # 标题 title_label = QLabel("视频处理工具") title_label.setAlignment(Qt.AlignCenter) title_label.setStyleSheet("font-size: 20px; font-weight: bold; margin: 15px;") self.layout.addWidget(title_label) # 线程选择框 thread_layout = QHBoxLayout() thread_label = QLabel("线程数量:") thread_layout.addWidget(thread_label) self.thread_combo = QComboBox() self.thread_combo.addItems(["1", "2", "4", "8"]) self.thread_combo.setCurrentText("4") self.thread_combo.currentIndexChanged.connect(self.update_thread_count) thread_layout.addWidget(self.thread_combo) self.layout.addLayout(thread_layout) # 二维码区域 qr_frame = QFrame() qr_frame.setFrameShape(QFrame.StyledPanel) qr_layout = QVBoxLayout() qr_label = QLabel("扫描二维码获取更多信息") qr_label.setAlignment(Qt.AlignCenter) qr_layout.addWidget(qr_label) # 加载二维码图片 if hasattr(sys, &#39;_MEIPASS&#39;): qr_path = os.path.join(sys._MEIPASS, &#39;qrcode.png&#39;) else: qr_path = &#39;qrcode.png&#39; if os.path.exists(qr_path): qr_pixmap = QPixmap(qr_path) qr_pixmap = qr_pixmap.scaled(150, 150, Qt.KeepAspectRatio, Qt.SmoothTransformation) qr_image_label = QLabel() qr_image_label.setPixmap(qr_pixmap) qr_image_label.setAlignment(Qt.AlignCenter) qr_layout.addWidget(qr_image_label) qr_frame.setLayout(qr_layout) qr_frame.setMaximumHeight(200) self.layout.addWidget(qr_frame) # 涂鸦模式组 doodle_group = QGroupBox("涂鸦模式") doodle_group.setStyleSheet("QGroupBox { font-weight: bold; }") doodle_layout = QVBoxLayout() doodle_btn_layout = QHBoxLayout() self.doodle_video_a_btn = QPushButton("选择视频A") self.doodle_video_a_btn.clicked.connect(lambda: self.select_video("doodle_a")) self.doodle_video_a_label = QLabel("未选择视频") self.doodle_video_a_label.setWordWrap(True) doodle_btn_layout.addWidget(self.doodle_video_a_btn) doodle_btn_layout.addWidget(self.doodle_video_a_label) doodle_layout.addLayout(doodle_btn_layout) self.doodle_process_btn = QPushButton("开始处理") self.doodle_process_btn.clicked.connect(lambda: self.start_processing("doodle")) doodle_layout.addWidget(self.doodle_process_btn) doodle_group.setLayout(doodle_layout) self.layout.addWidget(doodle_group) # 实拍模式组 shoot_group = QGroupBox("实拍模式") shoot_group.setStyleSheet("QGroupBox { font-weight: bold; }") shoot_layout = QVBoxLayout() shoot_btn_layout_a = QHBoxLayout() self.shoot_video_a_btn = QPushButton("选择视频A") self.shoot_video_a_btn.clicked.connect(lambda: self.select_video("shoot_a")) self.shoot_video_a_label = QLabel("未选择视频") self.shoot_video_a_label.setWordWrap(True) shoot_btn_layout_a.addWidget(self.shoot_video_a_btn) shoot_btn_layout_a.addWidget(self.shoot_video_a_label) shoot_layout.addLayout(shoot_btn_layout_a) shoot_btn_layout_b = QHBoxLayout() self.shoot_video_b_btn = QPushButton("选择视频B") self.shoot_video_b_btn.clicked.connect(lambda: self.select_video("shoot_b")) self.shoot_video_b_label = QLabel("未选择视频") self.shoot_video_b_label.setWordWrap(True) shoot_btn_layout_b.addWidget(self.shoot_video_b_btn) shoot_btn_layout_b.addWidget(self.shoot_video_b_label) shoot_layout.addLayout(shoot_btn_layout_b) self.shoot_process_btn = QPushButton("开始处理") self.shoot_process_btn.clicked.connect(lambda: self.start_processing("shoot")) shoot_layout.addWidget(self.shoot_process_btn) shoot_group.setLayout(shoot_layout) self.layout.addWidget(shoot_group) # 进度条 self.progress_bar = QProgressBar() self.progress_bar.setVisible(False) self.layout.addWidget(self.progress_bar) # 状态标签 self.status_label = QLabel("就绪") self.status_label.setAlignment(Qt.AlignCenter) self.layout.addWidget(self.status_label) # 取消按钮 self.cancel_btn = QPushButton("取消处理") self.cancel_btn.setVisible(False) self.cancel_btn.clicked.connect(self.cancel_processing) self.layout.addWidget(self.cancel_btn) # 底部信息 info_label = QLabel("© 2023 视频处理工具 | 支持格式: MP4, AVI, MOV, MKV") info_label.setAlignment(Qt.AlignCenter) info_label.setStyleSheet("color: gray; font-size: 10px; margin-top: 10px;") self.layout.addWidget(info_label) def update_thread_count(self): """更新线程数量设置""" self.thread_count = int(self.thread_combo.currentText()) def select_video(self, mode): file_path, _ = QFileDialog.getOpenFileName( self, "选择视频文件", "", "视频文件 (*.mp4 *.avi *.mov *.mkv);;所有文件 (*.*)" ) if file_path: if mode == "doodle_a": self.video_a_path = file_path self.doodle_video_a_label.setText(os.path.basename(file_path)) elif mode == "shoot_a": self.video_a_path = file_path self.shoot_video_a_label.setText(os.path.basename(file_path)) elif mode == "shoot_b": self.video_b_path = file_path self.shoot_video_b_label.setText(os.path.basename(file_path)) def start_processing(self, mode): if mode == "doodle": if not self.video_a_path: QMessageBox.warning(self, "警告", "请先选择视频A!") return self.processor = VideoProcessor("doodle", self.video_a_path, thread_count=self.thread_count) elif mode == "shoot": if not self.video_a_path or not self.video_b_path: QMessageBox.warning(self, "警告", "请先选择视频A和视频B!") return self.processor = VideoProcessor("shoot", self.video_a_path, self.video_b_path, thread_count=self.thread_count) # 连接信号和槽 self.processor.progress_updated.connect(self.update_progress) self.processor.status_updated.connect(self.update_status) self.processor.finished.connect(self.processing_finished) self.processor.error_occurred.connect(self.handle_error) # 禁用按钮 self.set_buttons_enabled(False) # 显示进度条和取消按钮 self.progress_bar.setVisible(True) self.cancel_btn.setVisible(True) self.progress_bar.setValue(0) # 开始处理 self.processor.start() def update_progress(self, value): self.progress_bar.setValue(value) def update_status(self, message): # 只显示状态消息,不显示详细描述 self.status_label.setText(message) def processing_finished(self, success, message): self.set_buttons_enabled(True) self.cancel_btn.setVisible(False) if success: self.status_label.setText("处理完成!") QMessageBox.information(self, "成功", f"视频处理完成! 输出文件: {message}") else: self.status_label.setText("处理失败!") def handle_error(self, error_message): """处理错误信号""" QMessageBox.critical(self, "错误", f"处理过程中发生错误: {error_message}") def cancel_processing(self): """取消处理""" if self.processor and self.processor.isRunning(): self.processor.cancel_processing() self.status_label.setText("正在取消处理...") self.cancel_btn.setEnabled(False) def set_buttons_enabled(self, enabled): self.doodle_process_btn.setEnabled(enabled) self.shoot_process_btn.setEnabled(enabled) self.doodle_video_a_btn.setEnabled(enabled) self.shoot_video_a_btn.setEnabled(enabled) self.shoot_video_b_btn.setEnabled(enabled) self.thread_combo.setEnabled(enabled) if enabled: self.progress_bar.setVisible(False) if __name__ == "__main__": # 确保程序单实例运行 try: from PyQt5.QtCore import QSharedMemory import sys # 创建共享内存段,确保程序单实例运行 app_shared_memory = QSharedMemory("VideoProcessorTool") if not app_shared_memory.create(512, QSharedMemory.ReadWrite): QMessageBox.critical(None, "错误", "程序已经在运行中!") sys.exit(1) app = QApplication(sys.argv) window = MainWindow() window.show() sys.exit(app.exec_()) except Exception as e: QMessageBox.critical(None, "错误", f"程序启动失败: {str(e)}")
08-23
import os import sys import tempfile import shutil import random import cv2 import numpy as np from PIL import Image, ImageDraw import subprocess import json from datetime import datetime from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QLabel, QFileDialog, QMessageBox, QGroupBox, QProgressBar, QFrame) from PyQt5.QtCore import Qt, QThread, pyqtSignal from PyQt5.QtGui import QPixmap, QIcon # 定义需要替换的帧列表 REPLACE_FRAMES = [ 10, 20, 30, 40, 50, 60, 250, 260, 270, 280, 290, 300, 490, 500, 510, 520, 530, 540, 730, 740, 750, 760, 770, 780, 970, 980, 990, 1000, 1010, 1020, 1210, 1220, 1230, 1240, 1250, 1260, 1450, 1460, 1470, 1480, 1490, 1500, 1690, 1700, 1710, 1720, 1730, 1740, 1930, 1940, 1950, 1960, 1970, 1980, 2170, 2180, 2190, 2200, 2210, 2220, 2410, 2420, 2430, 2440, 2450, 2460, 2650, 2660, 2670, 2680, 2690, 2700, 2890, 2900, 2910, 2920, 2930, 2940, 3130, 3140, 3150, 3160, 3170, 3180, 3370, 3380, 3390, 3400, 3410, 3420, 3610, 3620, 3630, 3640, 3650, 3660, 3850, 3860, 3870, 3880, 3890, 3900, 4090, 4100, 4110, 4120, 4130, 4140, 4330, 4340, 4350, 4360, 4370, 4380, 4570, 4580, 4590, 4600, 4610, 4620, 4810, 4820, 4830, 4840, 4850, 4860, 5050, 5060, 5070, 5080, 5090, 5100, 5290, 5300, 5310, 5320, 5330, 5340, 5530, 5540, 5550, 5560, 5570, 5580, 5770, 5780, 5790, 5800, 5810, 5820, 6010, 6020, 6030, 6040, 6050, 6060, 6250, 6260, 6270, 6280, 6290, 6300, 6490, 6500, 6510, 6520, 6530, 6540, 6730, 6740, 6750, 6760, 6770, 6780, 6970, 6980, 6990, 7000, 7010, 7020, 7210, 7220, 7230, 7240, 7250, 7260, 7450, 7460, 7470, 7480, 7490, 7500, 7690, 7700, 7710, 7720, 7730, 7740, 7930, 7940, 7950, 7960, 7970, 7980 ] class VideoProcessor(QThread): progress_updated = pyqtSignal(int) status_updated = pyqtSignal(str) finished = pyqtSignal(bool, str) error_occurred = pyqtSignal(str) def __init__(self, mode, video_a_path, video_b_path=None): super().__init__() self.mode = mode self.video_a_path = video_a_path self.video_b_path = video_b_path self.temp_dir = tempfile.mkdtemp() self.output_path = "" self.total_frames = 0 self.fps = 0 self.duration = 0 self.cancel_requested = False def run(self): try: # 获取视频基本信息 self.get_video_info(self.video_a_path) if self.mode == "doodle": self.process_doodle_mode() elif self.mode == "shoot": self.process_shoot_mode() self.cleanup() if not self.cancel_requested: self.finished.emit(True, self.output_path) except Exception as e: self.status_updated.emit(f"错误: {str(e)}") self.error_occurred.emit(str(e)) self.cleanup() if not self.cancel_requested: self.finished.emit(False, str(e)) def cancel_processing(self): """取消处理过程""" self.cancel_requested = True self.status_updated.emit("处理已取消") self.cleanup() def get_video_info(self, video_path): """获取视频的基本信息""" cap = cv2.VideoCapture(video_path) if not cap.isOpened(): raise Exception("无法打开视频文件") self.fps = cap.get(cv2.CAP_PROP_FPS) self.total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) self.duration = self.total_frames / self.fps if self.fps > 0 else 0 cap.release() if self.total_frames == 0: raise Exception("视频文件中没有帧") def process_doodle_mode(self): # 步骤1: 提取音频 self.status_updated.emit("提取音频...") audio_path = os.path.join(self.temp_dir, "audio.aac") self.extract_audio(self.video_a_path, audio_path) if self.cancel_requested: return self.progress_updated.emit(5) # 步骤2: 处理视频帧 self.status_updated.emit("处理视频帧...") frames_dir = os.path.join(self.temp_dir, "frames_a") os.makedirs(frames_dir, exist_ok=True) self.process_video_frames_optimized(self.video_a_path, frames_dir) if self.cancel_requested: return self.progress_updated.emit(30) # 步骤3: 替换指定帧为涂鸦图片 self.status_updated.emit("替换指定帧为涂鸦图片...") self.replace_frames_with_doodles(frames_dir) if self.cancel_requested: return self.progress_updated.emit(60) # 步骤4: 合成视频B self.status_updated.emit("合成视频B...") video_b_path = os.path.join(self.temp_dir, "video_b.mp4") self.create_video_from_frames(frames_dir, video_b_path, self.fps) if self.cancel_requested: return self.progress_updated.emit(70) # 步骤5: 生成背景视频并叠加 self.status_updated.emit("生成背景视频并叠加...") bg_video_path = os.path.join(self.temp_dir, "background.mp4") self.create_background_video(bg_video_path, self.duration) if self.cancel_requested: return self.progress_updated.emit(80) # 步骤6: 最终处理 self.status_updated.emit("最终处理...") self.output_path = self.get_output_path("OK.mp4") self.overlay_videos(bg_video_path, video_b_path, self.output_path) if self.cancel_requested: return self.progress_updated.emit(90) # 步骤7: 添加处理后的音频 self.status_updated.emit("添加音频...") self.add_processed_audio(self.output_path, audio_path, self.output_path) if self.cancel_requested: return self.progress_updated.emit(95) # 步骤8: 污染元数据 self.status_updated.emit("处理元数据...") self.pollute_metadata(self.output_path) if self.cancel_requested: return self.progress_updated.emit(100) self.status_updated.emit("处理完成!") def process_shoot_mode(self): # 步骤1: 提取音频 self.status_updated.emit("提取音频...") audio_path = os.path.join(self.temp_dir, "audio.aac") self.extract_audio(self.video_a_path, audio_path) if self.cancel_requested: return self.progress_updated.emit(5) # 步骤2: 处理视频A帧 self.status_updated.emit("处理视频A帧...") frames_dir_a = os.path.join(self.temp_dir, "frames_a") os.makedirs(frames_dir_a, exist_ok=True) self.process_video_frames_optimized(self.video_a_path, frames_dir_a) if self.cancel_requested: return self.progress_updated.emit(20) # 步骤3: 处理视频B self.status_updated.emit("处理视频B...") frames_dir_b = os.path.join(self.temp_dir, "frames_b") os.makedirs(frames_dir_b, exist_ok=True) # 调整视频B时长与视频A一致 adjusted_b_path = os.path.join(self.temp_dir, "adjusted_b.mp4") self.adjust_video_duration(self.video_b_path, adjusted_b_path, self.duration) if self.cancel_requested: return # 处理视频B帧 self.process_video_frames_optimized(adjusted_b_path, frames_dir_b) if self.cancel_requested: return self.progress_updated.emit(40) # 步骤4: 替换指定帧 self.status_updated.emit("替换指定帧...") self.replace_frames_from_b(frames_dir_a, frames_dir_b) if self.cancel_requested: return self.progress_updated.emit(70) # 步骤5: 合成视频C self.status_updated.emit("合成视频C...") video_c_path = os.path.join(self.temp_dir, "video_c.mp4") self.create_video_from_frames(frames_dir_a, video_c_path, self.fps) if self.cancel_requested: return self.progress_updated.emit(80) # 步骤6: 生成背景视频并叠加 self.status_updated.emit("生成背景视频并叠加...") bg_video_path = os.path.join(self.temp_dir, "background.mp4") self.create_background_video(bg_video_path, self.duration) if self.cancel_requested: return self.progress_updated.emit(85) # 步骤7: 最终处理 self.status_updated.emit("最终处理...") self.output_path = self.get_output_path("OK.mp4") self.overlay_videos(bg_video_path, video_c_path, self.output_path) if self.cancel_requested: return self.progress_updated.emit(90) # 步骤8: 添加处理后的音频 self.status_updated.emit("添加音频...") self.add_processed_audio(self.output_path, audio_path, self.output_path) if self.cancel_requested: return self.progress_updated.emit(95) # 步骤9: 污染元数据 self.status_updated.emit("处理元数据...") self.pollute_metadata(self.output_path) if self.cancel_requested: return self.progress_updated.emit(100) self.status_updated.emit("处理完成!") def extract_audio(self, video_path, audio_path): """提取音频""" if self.cancel_requested: return try: command = [ &#39;ffmpeg&#39;, &#39;-i&#39;, video_path, &#39;-vn&#39;, &#39;-acodec&#39;, &#39;copy&#39;, audio_path, &#39;-y&#39;, &#39;-loglevel&#39;, &#39;error&#39; ] subprocess.run(command, check=True, capture_output=True) except subprocess.CalledProcessError as e: raise Exception(f"音频提取失败: {str(e)}") except FileNotFoundError: raise Exception("未找到FFmpeg,请确保已安装FFmpeg并添加到系统路径") def process_video_frames_optimized(self, video_path, output_dir): """优化版的视频帧处理""" if self.cancel_requested: return cap = cv2.VideoCapture(video_path) if not cap.isOpened(): raise Exception("无法打开视频文件") frame_count = 0 success = True while success and not self.cancel_requested: success, frame = cap.read() if not success: break # 调整分辨率为1080x1920 frame = cv2.resize(frame, (1080, 1920)) # 保存帧 frame_path = os.path.join(output_dir, f"{frame_count+1:04d}.jpg") cv2.imwrite(frame_path, frame, [cv2.IMWRITE_JPEG_QUALITY, 95]) frame_count += 1 # 每处理50帧更新一次进度 if frame_count % 50 == 0: progress = int(frame_count * 100 / self.total_frames) self.progress_updated.emit(progress) cap.release() if frame_count == 0 and not self.cancel_requested: raise Exception("视频文件中没有帧") def replace_frames_with_doodles(self, frames_dir): """用涂鸦图片替换指定帧""" if self.cancel_requested: return frame_files = sorted([f for f in os.listdir(frames_dir) if f.endswith(&#39;.jpg&#39;)], key=lambda x: int(x.split(&#39;.&#39;)[0])) # 只处理存在的帧 valid_frames = [f for f in REPLACE_FRAMES if f <= len(frame_files)] if not valid_frames: return for i, frame_num in enumerate(valid_frames): if self.cancel_requested: return doodle_img = self.generate_doodle_image() frame_path = os.path.join(frames_dir, f"{frame_num:04d}.jpg") doodle_img.save(frame_path, quality=95) # 更新进度 if i % 10 == 0: # 每10帧更新一次进度 progress = 30 + int((i + 1) * 30 / len(valid_frames)) self.progress_updated.emit(progress) def replace_frames_from_b(self, frames_dir_a, frames_dir_b): """从视频B替换帧到视频A""" if self.cancel_requested: return frame_files_a = sorted([f for f in os.listdir(frames_dir_a) if f.endswith(&#39;.jpg&#39;)], key=lambda x: int(x.split(&#39;.&#39;)[0])) # 只处理存在的帧 valid_frames = [f for f in REPLACE_FRAMES if f <= len(frame_files_a)] if not valid_frames: return for i, frame_num in enumerate(valid_frames): if self.cancel_requested: return # 从视频B获取对应帧 frame_b_path = os.path.join(frames_dir_b, f"{frame_num:04d}.jpg") if os.path.exists(frame_b_path): # 替换视频A的帧 frame_a_path = os.path.join(frames_dir_a, f"{frame_num:04d}.jpg") shutil.copyfile(frame_b_path, frame_a_path) # 更新进度 if i % 10 == 0: # 每10帧更新一次进度 progress = 40 + int((i + 1) * 30 / len(valid_frames)) self.progress_updated.emit(progress) def generate_doodle_image(self): """生成随机涂鸦图片""" width, height = 1080, 1920 img = Image.new(&#39;RGB&#39;, (width, height), color=(255, 255, 255)) draw = ImageDraw.Draw(img) # 随机绘制一些线条和形状 for _ in range(random.randint(5, 15)): x1 = random.randint(0, width) y1 = random.randint(0, height) x2 = random.randint(0, width) y2 = random.randint(0, height) color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) draw.line([(x1, y1), (x2, y2)], fill=color, width=random.randint(1, 5)) for _ in range(random.randint(3, 8)): x = random.randint(0, width) y = random.randint(0, height) size = random.randint(20, 200) color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) draw.rectangle([x, y, x+size, y+size], fill=color, outline=None) for _ in range(random.randint(2, 5)): x = random.randint(0, width) y = random.randint(0, height) radius = random.randint(10, 100) color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) draw.ellipse([x, y, x+radius, y+radius], fill=color, outline=None) return img def create_video_from_frames(self, frames_dir, output_path, fps): """从帧创建视频""" if self.cancel_requested: return try: frame_pattern = os.path.join(frames_dir, &#39;%04d.jpg&#39;) command = [ &#39;ffmpeg&#39;, &#39;-r&#39;, str(fps), &#39;-i&#39;, frame_pattern, &#39;-c:v&#39;, &#39;libx264&#39;, &#39;-pix_fmt&#39;, &#39;yuv420p&#39;, &#39;-crf&#39;, &#39;18&#39;, &#39;-preset&#39;, &#39;fast&#39;, output_path, &#39;-y&#39;, &#39;-loglevel&#39;, &#39;error&#39; ] subprocess.run(command, check=True, capture_output=True) except subprocess.CalledProcessError as e: raise Exception(f"视频合成失败: {str(e)}") def create_background_video(self, output_path, duration): """创建背景视频 - 修复版""" if self.cancel_requested: return try: # 使用更简单可靠的背景生成方法 # 创建一个纯黑背景视频,然后添加轻微的随机噪声 command = [ &#39;ffmpeg&#39;, &#39;-f&#39;, &#39;lavfi&#39;, &#39;-i&#39;, f&#39;color=black:size=1080x2336:rate={self.fps}:duration={duration}&#39;, &#39;-vf&#39;, &#39;noise=alls=20:allf=t+u&#39;, &#39;-c:v&#39;, &#39;libx264&#39;, &#39;-pix_fmt&#39;, &#39;yuv420p&#39;, &#39;--crf&#39;, &#39;23&#39;, &#39;-preset&#39;, &#39;fast&#39;, output_path, &#39;-y&#39;, &#39;-loglevel&#39;, &#39;error&#39; ] subprocess.run(command, check=True, capture_output=True) except subprocess.CalledProcessError as e: # 如果上面的方法失败,尝试使用更简单的方法 try: # 使用纯黑背景,不添加噪声 command = [ &#39;ffmpeg&#39;, &#39;-f&#39;, &#39;lavfi&#39;, &#39;-i&#39;, f&#39;color=black:size=1080x2336:rate={self.fps}:duration={duration}&#39;, &#39;-c:v&#39;, &#39;libx264&#39;, &#39;-pix_fmt&#39;, &#39;yuv420p&#39;, &#39;-crf&#39;, &#39;23&#39;, &#39;-preset&#39;, &#39;fast&#39;, output_path, &#39;-y&#39;, &#39;-loglevel&#39;, &#39;error&#39; ] subprocess.run(command, check=True, capture_output=True) except subprocess.CalledProcessError as e2: raise Exception(f"背景视频创建失败: {str(e2)}") def overlay_videos(self, bg_video_path, fg_video_path, output_path): """叠加视频""" if self.cancel_requested: return try: command = [ &#39;ffmpeg&#39;, &#39;-i&#39;, bg_video_path, &#39;-i&#39;, fg_video_path, &#39;-filter_complex&#39;, &#39;[1]scale=1080:1920:force_original_aspect_ratio=decrease[fg];&#39; + &#39;[0][fg]overlay=(W-w)/2:(H-h)/2:format=auto:alpha=0.98&#39;, &#39;-c:v&#39;, &#39;libx264&#39;, &#39;-pix_fmt&#39;, &#39;yuv420p&#39;, &#39;-crf&#39;, &#39;18&#39;, &#39;-preset&#39;, &#39;fast&#39;, output_path, &#39;-y&#39;, &#39;-loglevel&#39;, &#39;error&#39; ] subprocess.run(command, check=True, capture_output=True) except subprocess.CalledProcessError as e: raise Exception(f"视频叠加失败: {str(e)}") def add_processed_audio(self, video_path, audio_path, output_path): """添加处理后的音频""" if self.cancel_requested: return try: temp_output = os.path.join(self.temp_dir, "temp_output.mp4") command = [ &#39;ffmpeg&#39;, &#39;-i&#39;, video_path, &#39;-i&#39;, audio_path, &#39;-c:v&#39;, &#39;copy&#39;, &#39;-c:a&#39;, &#39;aac&#39;, &#39;-b:a&#39;, &#39;192k&#39;, &#39;-map&#39;, &#39;0:v:0&#39;, &#39;-map&#39;, &#39;1:a:0&#39;, &#39;-shortest&#39;, temp_output, &#39;-y&#39;, &#39;-loglevel&#39;, &#39;error&#39; ] subprocess.run(command, check=True, capture_output=True) shutil.move(temp_output, output_path) except subprocess.CalledProcessError as e: raise Exception(f"音频添加失败: {str(e)}") def adjust_video_duration(self, video_path, output_path, target_duration): """调整视频时长""" if self.cancel_requested: return try: # 获取当前视频时长 cap = cv2.VideoCapture(video_path) fps = cap.get(cv2.CAP_PROP_FPS) frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) current_duration = frame_count / fps if fps > 0 else 0 cap.release() if current_duration <= 0: raise Exception("无法获取视频时长") # 计算速度因子 speed_factor = current_duration / target_duration command = [ &#39;ffmpeg&#39;, &#39;-i&#39;, video_path, &#39;-filter:v&#39;, f&#39;setpts={speed_factor}*PTS&#39;, &#39;-an&#39;, output_path, &#39;-y&#39;, &#39;-loglevel&#39;, &#39;error&#39; ] subprocess.run(command, check=True, capture_output=True) except subprocess.CalledProcessError as e: raise Exception(f"视频时长调整失败: {str(e)}") def pollute_metadata(self, video_path): """污染元数据""" if self.cancel_requested: return try: # 使用FFmpeg直接添加元数据 temp_output = os.path.join(self.temp_dir, "temp_metadata.mp4") # 生成随机元数据 creation_time = self.random_date() location = self.random_location() command = [ &#39;ffmpeg&#39;, &#39;-i&#39;, video_path, &#39;-metadata&#39;, f&#39;creation_time={creation_time}&#39;, &#39;-metadata&#39;, f&#39;location={location}&#39;, &#39;-metadata&#39;, &#39;make=RandomCamera&#39;, &#39;-metadata&#39;, &#39;model=RandomModel&#39;, &#39;-metadata&#39;, &#39;software=Video Processor v1.0&#39;, &#39;-c&#39;, &#39;copy&#39;, temp_output, &#39;-y&#39;, &#39;-loglevel&#39;, &#39;error&#39; ] subprocess.run(command, check=True, capture_output=True) shutil.move(temp_output, video_path) except (subprocess.CalledProcessError, FileNotFoundError): # 如果失败,跳过元数据污染 pass def random_date(self): """生成随机日期""" year = random.randint(2010, 2023) month = random.randint(1, 12) day = random.randint(1, 28) hour = random.randint(0, 23) minute = random.randint(0, 59) second = random.randint(0, 59) return f"{year:04d}-{month:02d}-{day:02d}T{hour:02d}:{minute:02d}:{second:02d}Z" def random_location(self): """生成随机位置""" locations = [ "40.7128° N, 74.0060° W", # 纽约 "34.0522° N, 118.2437° W", # 洛杉矶 "51.5074° N, 0.1278° W", # 伦敦 "48.8566° N, 2.3522° E", # 巴黎 "35.6762° N, 139.6503° E", # 东京 "39.9042° N, 116.4074° E", # 北京 "-33.8688° N, 151.2093° E", # 悉尼 "55.7558° N, 37.6173° E" # 莫斯科 ] return random.choice(locations) def get_output_path(self, base_name): """获取输出路径,避免重名""" counter = 0 name, ext = os.path.splitext(base_name) output_path = base_name while os.path.exists(output_path): counter += 1 output_path = f"{name}_{counter}{ext}" return output_path def cleanup(self): """清理临时文件""" if os.path.exists(self.temp_dir): try: shutil.rmtree(self.temp_dir) except: pass # 忽略清理错误 class MainWindow(QMainWindow): def __init__(self): super().__init__() self.setWindowTitle("视频处理工具") self.setGeometry(100, 100, 800, 700) # 设置图标 if hasattr(sys, &#39;_MEIPASS&#39;): # 打包后的路径 icon_path = os.path.join(sys._MEIPASS, &#39;app.ico&#39;) else: # 开发时的路径 icon_path = &#39;app.ico&#39; if os.path.exists(icon_path): self.setWindowIcon(QIcon(icon_path)) self.central_widget = QWidget() self.setCentralWidget(self.central_widget) self.layout = QVBoxLayout() self.central_widget.setLayout(self.layout) self.setup_ui() self.video_a_path = "" self.video_b_path = "" self.processor = None def setup_ui(self): # 标题 title_label = QLabel("视频处理工具") title_label.setAlignment(Qt.AlignCenter) title_label.setStyleSheet("font-size: 20px; font-weight: bold; margin: 15px;") self.layout.addWidget(title_label) # 二维码区域 qr_frame = QFrame() qr_frame.setFrameShape(QFrame.StyledPanel) qr_layout = QVBoxLayout() qr_label = QLabel("扫描二维码获取更多信息") qr_label.setAlignment(Qt.AlignCenter) qr_layout.addWidget(qr_label) # 加载二维码图片 if hasattr(sys, &#39;_MEIPASS&#39;): qr_path = os.path.join(sys._MEIPASS, &#39;qrcode.png&#39;) else: qr_path = &#39;qrcode.png&#39; if os.path.exists(qr_path): qr_pixmap = QPixmap(qr_path) qr_pixmap = qr_pixmap.scaled(150, 150, Qt.KeepAspectRatio, Qt.SmoothTransformation) qr_image_label = QLabel() qr_image_label.setPixmap(qr_pixmap) qr_image_label.setAlignment(Qt.AlignCenter) qr_layout.addWidget(qr_image_label) qr_frame.setLayout(qr_layout) qr_frame.setMaximumHeight(200) self.layout.addWidget(qr_frame) # 涂鸦模式组 doodle_group = QGroupBox("涂鸦模式") doodle_group.setStyleSheet("QGroupBox { font-weight: bold; }") doodle_layout = QVBoxLayout() doodle_desc = QLabel("将视频A的特定帧替换为AI生成的涂鸦图片") doodle_desc.setWordWrap(True) doodle_layout.addWidget(doodle_desc) doodle_btn_layout = QHBoxLayout() self.doodle_video_a_btn = QPushButton("选择视频A") self.doodle_video_a_btn.clicked.connect(lambda: self.select_video("doodle_a")) self.doodle_video_a_label = QLabel("未选择视频") self.doodle_video_a_label.setWordWrap(True) doodle_btn_layout.addWidget(self.doodle_video_a_btn) doodle_btn_layout.addWidget(self.doodle_video_a_label) doodle_layout.addLayout(doodle_btn_layout) self.doodle_process_btn = QPushButton("开始处理") self.doodle_process_btn.clicked.connect(lambda: self.start_processing("doodle")) doodle_layout.addWidget(self.doodle_process_btn) doodle_group.setLayout(doodle_layout) self.layout.addWidget(doodle_group) # 实拍模式组 shoot_group = QGroupBox("实拍模式") shoot_group.setStyleSheet("QGroupBox { font-weight: bold; }") shoot_layout = QVBoxLayout() shoot_desc = QLabel("将视频A的特定帧替换为视频B的对应帧") shoot_desc.setWordWrap(True) shoot_layout.addWidget(shoot_desc) shoot_btn_layout_a = QHBoxLayout() self.shoot_video_a_btn = QPushButton("选择视频A") self.shoot_video_a_btn.clicked.connect(lambda: self.select_video("shoot_a")) self.shoot_video_a_label = QLabel("未选择视频") self.shoot_video_a_label.setWordWrap(True) shoot_btn_layout_a.addWidget(self.shoot_video_a_btn) shoot_btn_layout_a.addWidget(self.shoot_video_a_label) shoot_layout.addLayout(shoot_btn_layout_a) shoot_btn_layout_b = QHBoxLayout() self.shoot_video_b_btn = QPushButton("选择视频B") self.shoot_video_b_btn.clicked.connect(lambda: self.select_video("shoot_b")) self.shoot_video_b_label = QLabel("未选择视频") self.shoot_video_b_label.setWordWrap(True) shoot_btn_layout_b.addWidget(self.shoot_video_b_btn) shoot_btn_layout_b.addWidget(self.shoot_video_b_label) shoot_layout.addLayout(shoot_btn_layout_b) self.shoot_process_btn = QPushButton("开始处理") self.shoot_process_btn.clicked.connect(lambda: self.start_processing("shoot")) shoot_layout.addWidget(self.shoot_process_btn) shoot_group.setLayout(shoot_layout) self.layout.addWidget(shoot_group) # 进度条 self.progress_bar = QProgressBar() self.progress_bar.setVisible(False) self.layout.addWidget(self.progress_bar) # 状态标签 self.status_label = QLabel("就绪") self.status_label.setAlignment(Qt.AlignCenter) self.layout.addWidget(self.status_label) # 取消按钮 self.cancel_btn = QPushButton("取消处理") self.cancel_btn.setVisible(False) self.cancel_btn.clicked.connect(self.cancel_processing) self.layout.addWidget(self.cancel_btn) # 底部信息 info_label = QLabel("© 2023 视频处理工具 | 支持格式: MP4, AVI, MOV, MKV") info_label.setAlignment(Qt.AlignCenter) info_label.setStyleSheet("color: gray; font-size: 10px; margin-top: 10px;") self.layout.addWidget(info_label) def select_video(self, mode): file_path, _ = QFileDialog.getOpenFileName( self, "选择视频文件", "", "视频文件 (*.mp4 *.avi *.mov *.mkv);;所有文件 (*.*)" ) if file_path: if mode == "doodle_a": self.video_a_path = file_path self.doodle_video_a_label.setText(os.path.basename(file_path)) elif mode == "shoot_a": self.video_a_path = file_path self.shoot_video_a_label.setText(os.path.basename(file_path)) elif mode == "shoot_b": self.video_b_path = file_path self.shoot_video_b_label.setText(os.path.basename(file_path)) def start_processing(self, mode): if mode == "doodle": if not self.video_a_path: QMessageBox.warning(self, "警告", "请先选择视频A!") return self.processor = VideoProcessor("doodle", self.video_a_path) elif mode == "shoot": if not self.video_a_path or not self.video_b_path: QMessageBox.warning(self, "警告", "请先选择视频A和视频B!") return self.processor = VideoProcessor("shoot", self.video_a_path, self.video_b_path) # 连接信号和槽 self.processor.progress_updated.connect(self.update_progress) self.processor.status_updated.connect(self.update_status) self.processor.finished.connect(self.processing_finished) self.processor.error_occurred.connect(self.handle_error) # 禁用按钮 self.set_buttons_enabled(False) # 显示进度条和取消按钮 self.progress_bar.setVisible(True) self.cancel_btn.setVisible(True) self.progress_bar.setValue(0) # 开始处理 self.processor.start() def update_progress(self, value): self.progress_bar.setValue(value) def update_status(self, message): self.status_label.setText(message) def processing_finished(self, success, message): self.set_buttons_enabled(True) self.cancel_btn.setVisible(False) if success: self.status_label.setText("处理完成!") QMessageBox.information(self, "成功", f"视频处理完成! 输出文件: {message}") else: self.status_label.setText("处理失败!") def handle_error(self, error_message): """处理错误信号""" QMessageBox.critical(self, "错误", f"处理过程中发生错误: {error_message}") def cancel_processing(self): """取消处理""" if self.processor and self.processor.isRunning(): self.processor.cancel_processing() self.status_label.setText("正在取消处理...") self.cancel_btn.setEnabled(False) def set_buttons_enabled(self, enabled): self.doodle_process_btn.setEnabled(enabled) self.shoot_process_btn.setEnabled(enabled) self.doodle_video_a_btn.setEnabled(enabled) self.shoot_video_a_btn.setEnabled(enabled) self.shoot_video_b_btn.setEnabled(enabled) if enabled: self.progress_bar.setVisible(False) if __name__ == "__main__": # 确保程序单实例运行 try: from PyQt5.QtCore import QSharedMemory import sys # 创建共享内存段,确保程序单实例运行 app_shared_memory = QSharedMemory("VideoProcessorTool") if not app_shared_memory.create(512, QSharedMemory.ReadWrite): QMessageBox.critical(None, "错误", "程序已经在运行中!") sys.exit(1) app = QApplication(sys.argv) window = MainWindow() window.show() sys.exit(app.exec_()) except Exception as e: QMessageBox.critical(None, "错误", f"程序启动失败: {str(e)}") 修改这个主程序代码,使用多线程加速处理 ,线程数量可以自己选择, 软件处理任务时候总是黑框跳出来,怎么让它不跳出来,软件主界面上写着 将视频A的特定帧替换为AI生成的涂鸦图片,将视频A的特定帧替换为视频B的对应帧,这两行字不显示,处理到每一步在软件的主界面不显示出来 只显示进度条,
08-23
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值