import os
import sys
import random
import shutil
import subprocess
import numpy as np
from scipy.fftpack import dct, idct
from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout,
QHBoxLayout, QPushButton, QLabel, QLineEdit,
QFileDialog, QMessageBox, QProgressBar, QTextEdit)
from PyQt5.QtCore import Qt, QThread, pyqtSignal
import cv2
from moviepy.editor import VideoFileClip, ColorClip
class VideoProcessor(QThread):
progress_signal = pyqtSignal(int)
log_signal = pyqtSignal(str)
finished_signal = pyqtSignal(bool)
def __init__(self, a_video_path, output_dir, num_videos):
super().__init__()
self.a_video_path = a_video_path
self.output_dir = output_dir
self.num_videos = num_videos
self.video_dir = "video"
self.daifa_dir = "daifa"
def run(self):
try:
# 创建输出目录
if not os.path.exists(self.daifa_dir):
os.makedirs(self.daifa_dir)
# 获取B视频列表
b_videos = [f for f in os.listdir(self.video_dir)
if f.lower().endswith(('.mp4', '.avi', '.mov', '.mkv'))]
if len(b_videos) < self.num_videos:
self.log_signal.emit(f"错误: video文件夹中只有{len(b_videos)}个视频,但需要{self.num_videos}个")
self.finished_signal.emit(False)
return
# 随机选择B视频
selected_b_videos = random.sample(b_videos, self.num_videos)
# 处理A视频
a_video_info = self.get_video_info(self.a_video_path)
self.log_signal.emit(f"A视频信息: 时长={a_video_info['duration']}秒, 分辨率={a_video_info['width']}x{a_video_info['height']}")
# 提取A视频音频
audio_path = self.extract_audio(self.a_video_path)
self.log_signal.emit("已提取A视频音频")
# 处理音频隐写
stego_audio_path = self.audio_steganography(audio_path)
self.log_signal.emit("已完成音频隐写处理")
for i, b_video_name in enumerate(selected_b_videos):
self.log_signal.emit(f"处理第{i+1}个视频: {b_video_name}")
# 处理A视频帧
a_frames_dir = self.process_a_video(self.a_video_path, a_video_info)
self.progress_signal.emit(25)
# 处理B视频
b_video_path = os.path.join(self.video_dir, b_video_name)
b_frames_dir = self.process_b_video(b_video_path, a_video_info)
self.progress_signal.emit(50)
# 嵌入隐写
stego_frames_dir = self.embed_frames(a_frames_dir, b_frames_dir, a_video_info)
self.progress_signal.emit(75)
# 合成最终视频
output_path = os.path.join(self.daifa_dir, f"daifa{i+1}.mp4")
self.create_final_video(stego_frames_dir, stego_audio_path, output_path, a_video_info)
# 添加元数据
self.add_metadata(output_path)
# 清理临时文件
self.cleanup([a_frames_dir, b_frames_dir, stego_frames_dir])
self.progress_signal.emit(100)
self.log_signal.emit(f"已完成第{i+1}个视频处理: {output_path}")
# 删除已使用的B视频
os.remove(b_video_path)
# 清理音频文件
self.cleanup([audio_path, stego_audio_path])
self.finished_signal.emit(True)
except Exception as e:
self.log_signal.emit(f"处理过程中出错: {str(e)}")
self.finished_signal.emit(False)
def get_video_info(self, video_path):
clip = VideoFileClip(video_path)
info = {
'duration': clip.duration,
'fps': clip.fps,
'width': clip.w,
'height': clip.h
}
clip.close()
return info
def extract_audio(self, video_path):
clip = VideoFileClip(video_path)
audio_path = "temp_audio.wav"
clip.audio.write_audiofile(audio_path, verbose=False, logger=None)
clip.close()
return audio_path
def audio_steganography(self, audio_path):
# 这里实现音频隐写算法
# 简化实现:只是复制文件
stego_audio_path = "temp_audio_stego.wav"
shutil.copyfile(audio_path, stego_audio_path)
return stego_audio_path
def process_a_video(self, video_path, video_info):
frames_dir = "temp_a_frames"
if os.path.exists(frames_dir):
shutil.rmtree(frames_dir)
os.makedirs(frames_dir)
# 创建扰动背景视频
bg_clip = ColorClip((720, 1560), color=[0, 0, 0], duration=video_info['duration'])
bg_clip = bg_clip.set_fps(30)
# 加载A视频并调整大小
a_clip = VideoFileClip(video_path)
# 计算缩放比例
scale = min(720 / a_clip.w, 1560 / a_clip.h)
new_w = int(a_clip.w * scale)
new_h = int(a_clip.h * scale)
# 调整大小并设置位置
a_clip = a_clip.resize((new_w, new_h))
a_clip = a_clip.set_opacity(0.98)
# 合成视频
x_pos = (720 - new_w) // 2
y_pos = (1560 - new_h) // 2
final_clip = bg_clip.set_duration(a_clip.duration)
final_clip = final_clip.set_position((x_pos, y_pos))
final_clip = final_clip.set_opacity(1)
# 写入临时文件
temp_video = "temp_a_processed.mp4"
final_clip.write_videofile(
temp_video,
codec='libx264',
audio=False,
fps=30,
verbose=False,
logger=None
)
# 拆帧
cap = cv2.VideoCapture(temp_video)
frame_count = 0
while True:
ret, frame = cap.read()
if not ret:
break
cv2.imwrite(os.path.join(frames_dir, f"frame_{frame_count:06d}.png"), frame)
frame_count += 1
cap.release()
# 清理临时文件
os.remove(temp_video)
a_clip.close()
bg_clip.close()
final_clip.close()
return frames_dir
def process_b_video(self, video_path, a_video_info):
frames_dir = "temp_b_frames"
if os.path.exists(frames_dir):
shutil.rmtree(frames_dir)
os.makedirs(frames_dir)
# 创建扰动背景视频
bg_clip = ColorClip((720, 1560), color=[0, 0, 0], duration=a_video_info['duration'])
bg_clip = bg_clip.set_fps(30)
# 加载B视频并调整大小
b_clip = VideoFileClip(video_path)
# 裁剪到与A视频相同时长
if b_clip.duration > a_video_info['duration']:
b_clip = b_clip.subclip(0, a_video_info['duration'])
# 计算缩放比例
scale = min(720 / b_clip.w, 1560 / b_clip.h)
new_w = int(b_clip.w * scale)
new_h = int(b_clip.h * scale)
# 调整大小
b_clip = b_clip.resize((new_w, new_h))
# 合成视频
x_pos = (720 - new_w) // 2
y_pos = (1560 - new_h) // 2
final_clip = bg_clip.set_duration(b_clip.duration)
final_clip = final_clip.set_position((x_pos, y_pos))
# 写入临时文件
temp_video = "temp_b_processed.mp4"
final_clip.write_videofile(
temp_video,
codec='libx264',
audio=False,
fps=30,
verbose=False,
logger=None
)
# 拆帧
cap = cv2.VideoCapture(temp_video)
frame_count = 0
while True:
ret, frame = cap.read()
if not ret:
break
cv2.imwrite(os.path.join(frames_dir, f"frame_{frame_count:06d}.png"), frame)
frame_count += 1
cap.release()
# 清理临时文件
os.remove(temp_video)
b_clip.close()
bg_clip.close()
final_clip.close()
return frames_dir
def dct_embed(self, cover_frame, secret_frame, alpha=0.01):
# 将图像转换为YUV颜色空间
cover_yuv = cv2.cvtColor(cover_frame, cv2.COLOR_BGR2YUV)
secret_yuv = cv2.cvtColor(secret_frame, cv2.COLOR_BGR2YUV)
# 只使用Y通道进行隐写
cover_y = cover_yuv[:,:,0].astype(np.float32)
secret_y = secret_yuv[:,:,0].astype(np.float32)
# 将秘密图像缩放到合适的大小
secret_y = cv2.resize(secret_y, (cover_y.shape[1]//8, cover_y.shape[0]//8))
# 对封面图像进行8x8分块DCT
stego_y = cover_y.copy()
for i in range(0, cover_y.shape[0], 8):
for j in range(0, cover_y.shape[1], 8):
if i//8 < secret_y.shape[0] and j//8 < secret_y.shape[1]:
block = cover_y[i:i+8, j:j+8]
dct_block = dct(dct(block.T, norm='ortho').T, norm='ortho')
# 在中频系数中嵌入信息
dct_block[4, 4] += alpha * secret_y[i//8, j//8]
idct_block = idct(idct(dct_block.T, norm='ortho').T, norm='ortho')
stego_y[i:i+8, j:j+8] = idct_block
# 合并回YUV图像
stego_yuv = cover_yuv.copy()
stego_yuv[:,:,0] = stego_y
# 转换回BGR颜色空间
stego_frame = cv2.cvtColor(stego_yuv, cv2.COLOR_YUV2BGR)
return stego_frame
def embed_frames(self, a_frames_dir, b_frames_dir, video_info):
stego_frames_dir = "temp_stego_frames"
if os.path.exists(stego_frames_dir):
shutil.rmtree(stego_frames_dir)
os.makedirs(stego_frames_dir)
a_frames = sorted([f for f in os.listdir(a_frames_dir) if f.endswith('.png')])
b_frames = sorted([f for f in os.listdir(b_frames_dir) if f.endswith('.png')])
total_frames = min(len(a_frames), len(b_frames))
for i in range(total_frames):
a_frame_path = os.path.join(a_frames_dir, a_frames[i])
b_frame_path = os.path.join(b_frames_dir, b_frames[i])
a_frame = cv2.imread(a_frame_path)
b_frame = cv2.imread(b_frame_path)
# 使用DCT隐写
stego_frame = self.dct_embed(a_frame, b_frame)
# 保存隐写后的帧
cv2.imwrite(os.path.join(stego_frames_dir, f"frame_{i:06d}.png"), stego_frame)
if i % 30 == 0: # 每秒钟更新一次进度
self.progress_signal.emit(50 + int(25 * i / total_frames))
return stego_frames_dir
def create_final_video(self, frames_dir, audio_path, output_path, video_info):
# 获取帧列表
frames = sorted([f for f in os.listdir(frames_dir) if f.endswith('.png')])
# 创建视频写入器
first_frame = cv2.imread(os.path.join(frames_dir, frames[0]))
height, width = first_frame.shape[:2]
fourcc = cv2.VideoWriter_fourcc(*'avc1')
out = cv2.VideoWriter(output_path, fourcc, 30, (width, height))
# 写入所有帧
for frame_name in frames:
frame = cv2.imread(os.path.join(frames_dir, frame_name))
out.write(frame)
out.release()
# 添加音频
cmd = [
'ffmpeg', '-y',
'-i', output_path,
'-i', audio_path,
'-c:v', 'copy',
'-c:a', 'aac',
'-b:a', '96k',
'-strict', 'experimental',
output_path + '_with_audio.mp4'
]
subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# 替换原文件
os.remove(output_path)
os.rename(output_path + '_with_audio.mp4', output_path)
def add_metadata(self, video_path):
# 随机生成元数据
dates = ['2023:06:15 12:30:45', '2023:07:20 14:25:30', '2023:08:10 09:15:22']
locations = ['Beijing', 'Shanghai', 'Guangzhou', 'Shenzhen']
devices = ['iPhone 14 Pro', 'HUAWEI P60', 'Xiaomi 13', 'Canon EOS R5']
date = random.choice(dates)
location = random.choice(locations)
device = random.choice(devices)
# 使用FFmpeg添加元数据
cmd = [
'ffmpeg', '-y',
'-i', video_path,
'-metadata', f'creation_time={date}',
'-metadata', f'location={location}',
'-metadata', f'model={device}',
'-c', 'copy',
video_path + '_with_metadata.mp4'
]
subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# 替换原文件
os.remove(video_path)
os.rename(video_path + '_with_metadata.mp4', video_path)
def cleanup(self, paths):
for path in paths:
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('视频隐写处理软件')
self.setGeometry(100, 100, 600, 500)
central_widget = QWidget()
self.setCentralWidget(central_widget)
layout = QVBoxLayout()
# A视频选择
a_video_layout = QHBoxLayout()
self.a_video_label = QLabel('A视频路径:')
self.a_video_path = QLineEdit()
self.a_video_browse = QPushButton('浏览')
self.a_video_browse.clicked.connect(self.browse_a_video)
a_video_layout.addWidget(self.a_video_label)
a_video_layout.addWidget(self.a_video_path)
a_video_layout.addWidget(self.a_video_browse)
layout.addLayout(a_video_layout)
# 处理数量输入
num_layout = QHBoxLayout()
self.num_label = QLabel('处理数量:')
self.num_input = QLineEdit('1')
num_layout.addWidget(self.num_label)
num_layout.addWidget(self.num_input)
layout.addLayout(num_layout)
# 进度条
self.progress_bar = QProgressBar()
layout.addWidget(self.progress_bar)
# 日志显示
self.log_display = QTextEdit()
self.log_display.setReadOnly(True)
layout.addWidget(self.log_display)
# 开始按钮
self.start_button = QPushButton('开始处理')
self.start_button.clicked.connect(self.start_processing)
layout.addWidget(self.start_button)
central_widget.setLayout(layout)
def browse_a_video(self):
file_path, _ = QFileDialog.getOpenFileName(
self, '选择A视频', '', '视频文件 (*.mp4 *.avi *.mov *.mkv)')
if file_path:
self.a_video_path.setText(file_path)
def log_message(self, message):
self.log_display.append(message)
def start_processing(self):
a_video_path = self.a_video_path.text()
if not os.path.exists(a_video_path):
QMessageBox.warning(self, '错误', '请选择有效的A视频文件')
return
try:
num_videos = int(self.num_input.text())
video_dir = "video"
if not os.path.exists(video_dir):
os.makedirs(video_dir)
QMessageBox.warning(self, '错误', 'video文件夹不存在,已创建空文件夹')
return
b_videos = [f for f in os.listdir(video_dir)
if f.lower().endswith(('.mp4', '.avi', '.mov', '.mkv'))]
if len(b_videos) < num_videos:
QMessageBox.warning(
self, '错误',
f'video文件夹中只有{len(b_videos)}个视频,但需要{num_videos}个'
)
return
except ValueError:
QMessageBox.warning(self, '错误', '请输入有效的数字')
return
self.start_button.setEnabled(False)
self.progress_bar.setValue(0)
self.log_display.clear()
self.processor = VideoProcessor(a_video_path, "daifa", num_videos)
self.processor.progress_signal.connect(self.progress_bar.setValue)
self.processor.log_signal.connect(self.log_message)
self.processor.finished_signal.connect(self.processing_finished)
self.processor.start()
def processing_finished(self, success):
self.start_button.setEnabled(True)
if success:
QMessageBox.information(self, '完成', '视频处理完成')
else:
QMessageBox.warning(self, '错误', '视频处理过程中出现错误')
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
给这个代码一个详细打包步骤
最新发布