今天利用ffmpeg的滤镜功能合并,左一右三方式,如下所示:

读者需要先对滤镜的描述字符串有所了解,读者可以参看我写的一篇博客:
ffmpeg利用滤镜进行视频混合(命令行)
四个文件都是1920x1080,时长一分钟,帧率为10.
关于主调函数,如下所示:
int main()
{
CVideoMerge cVideoMerge;
const char *pFileA = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-vs.mp4";
const char *pFileB = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-e.mp4";
const char *pFileC = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-computer.mp4";
const char *pFileD = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-zhuomian.mp4";
const char *pFileOut = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\out-merge.mp4";
cVideoMerge.StartMerge(pFileA, pFileB, pFileC, pFileD, pFileOut);
cVideoMerge.WaitFinish();
return 0;
}
StartMerge的部分代码如下所示:
//申请30帧缓存
m_pVideoAFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);
m_pVideoBFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);
m_pVideoCFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);
m_pVideoDFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);
m_hVideoAReadThread = CreateThread(NULL, 0, VideoAReadProc, this, 0, NULL);
m_hVideoBReadThread = CreateThread(NULL, 0, VideoBReadProc, this, 0, NULL);
m_hVideoCReadThread = CreateThread(NULL, 0, VideoCReadProc, this, 0, NULL);
m_hVideoDReadThread = CreateThread(NULL, 0, VideoDReadProc, this, 0, NULL);
m_hVideoMergeThread = CreateThread(NULL, 0, VideoMergeProc, this, 0, NULL);
可以看出,上面创建了四个队列,五个线程,其中线程m_hVideoAReadThread至m_hVideoDReadThread用于读取本地视频文件,并分别写入对了m_pVideoAFifo至m_pVideoDFifo。
然后m_hVideoMergeThread从队列中取出四帧数据,然后送入滤镜,进行合并,然后再编解码。
整个工程的代码结构如下:

下面分别给出这三个文件的内容:
FfmpegMerge4File.cpp的内容如下:
#include <iostream>
#include "VideoMerge.h"
#ifdef __cplusplus
extern "C"
{
#endif
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")
#ifdef __cplusplus
};
#endif
int main()
{
CVideoMerge cVideoMerge;
const char *pFileA = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-vs.mp4";
const char *pFileB = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-e.mp4";
const char *pFileC = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-computer.mp4";
const char *pFileD = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-zhuomian.mp4";
const char *pFileOut = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\out-merge.mp4";
cVideoMerge.StartMerge(pFileA, pFileB, pFileC, pFileD, pFileOut);
cVideoMerge.WaitFinish();
return 0;
}
VideoMerge.h的内容如下:
#pragma once
#include <Windows.h>
#ifdef __cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avutil.h"
#include "libavutil/fifo.h"
#include "libavutil/frame.h"
#include "libavutil/imgutils.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#ifdef __cplusplus
};
#endif
class CVideoMerge
{
public:
CVideoMerge();
~CVideoMerge();
public:
int StartMerge(const char *pFileA, const char *pFileB, const char *pFileC, const char *pFileD, const char *pFileOut);
int WaitFinish();
private:
int OpenFileA(const char *pFileA);
int OpenFileB(const char *pFileB);
int OpenFileC(const char *pFileC);
int OpenFileD(const char *pFileD);
int OpenOutPut(const char *pFileOut);
int InitFilter(const char* filter_desc);
private:
static DWORD WINAPI VideoAReadProc(LPVOID lpParam);
void VideoARead();
static DWORD WINAPI VideoBReadProc(LPVOID lpParam);
void VideoBRead();
static DWORD WINAPI VideoCReadProc(LPVOID lpParam);
void VideoCRead();
static DWORD WINAPI VideoDReadProc(LPVOID lpParam);
void VideoDRead();
static DWORD WINAPI VideoMergeProc(LPVOID lpParam);
void VideoMerge();
private:
AVFormatContext *m_pFormatCtx_FileA = NULL;
AVFormatContext *m_pFormatCtx_FileB = NULL;
AVFormatContext *m_pFormatCtx_FileC = NULL;
AVFormatContext *m_pFormatCtx_FileD = NULL;
AVCodecContext *m_pReadCodecCtx_VideoA = NULL;
AVCodec *m_pReadCodec_VideoA = NULL;
AVCodecContext *m_pReadCodecCtx_VideoB = NULL;
AVCodec *m_pReadCodec_VideoB = NULL;
AVCodecContext *m_pReadCodecCtx_VideoC = NULL;
AVCodec *m_pReadCodec_VideoC = NULL;
AVCodecContext *m_pReadCodecCtx_VideoD = NULL;
AVCodec *m_pReadCodec_VideoD = NULL;
AVCodecContext *m_pCodecEncodeCtx_Video = NULL;
AVFormatContext *m_pFormatCtx_Out = NULL;
AVFifoBuffer *m_pVideoAFifo = NULL;
AVFifoBuffer *m_pVideoBFifo = NULL;
AVFifoBuffer *m_pVideoCFifo = NULL;
AVFifoBuffer *m_pVideoDFifo = NULL;
AVFilterGraph* m_pFilterGraph = NULL;
AVFilterContext* m_pFilterCtxSrcVideoA = NULL;
AVFilterContext* m_pFilterCtxSrcVideoB = NULL;
AVFilterContext* m_pFilterCtxSrcVideoC = NULL;
AVFilterContext* m_pFilterCtxSrcVideoD = NULL;
AVFilterContext* m_pFilterCtxSink = NULL;
int m_iMergeWidth = 2560;
int m_iMergeHeight = 1080;
int m_iYuv420FrameSize = 0;
private:
CRITICAL_SECTION m_csVideoASection;
CRITICAL_SECTION m_csVideoBSection;
CRITICAL_SECTION m_csVideoCSection;
CRITICAL_SECTION m_csVideoDSection;
HANDLE m_hVideoAReadThread = NULL;
HANDLE m_hVideoBReadThread = NULL;
HANDLE m_hVideoCReadThread = NULL;
HANDLE m_hVideoDReadThread = NULL;
HANDLE m_hVideoMergeThread = NULL;
};
VideoMerge.cpp的内容如下:
#include "VideoMerge.h"
#include "log/log.h"
CVideoMerge::CVideoMerge()
{
InitializeCriticalSection(&m_csVideoASection);
InitializeCriticalSection(&m_csVideoBSection);
InitializeCriticalSection(&m_csVideoCSection);
InitializeCriticalSection(&m_csVideoDSection);
}
CVideoMerge::~CVideoMerge()
{
DeleteCriticalSection(&m_csVideoASection);
DeleteCriticalSection(&m_csVideoBSection);
DeleteCriticalSection(&m_csVideoCSection);
DeleteCriticalSection(&m_csVideoDSection);
}
int CVideoMerge::StartMerge(const char *pFileA, const char *pFileB, const char *pFileC, const char *pFileD, const char *pFileOut)
{
int ret = -1;
do
{
ret = OpenFileA(pFileA);
if (ret != 0)
{
break;
}
ret = OpenFileB(pFileB);
if (ret != 0)
{
break;
}
ret = OpenFileC(pFileC);
if (ret != 0)
{
break;
}
ret = OpenFileD(pFileD);
if (ret != 0)
{
break;
}
ret = OpenOutPut(pFileOut);
if (ret != 0)
{
break;
}
///这个滤镜的效果是以第一个视频做模板,且第一个视频的铺满全屏,第二个视频覆盖在第一个视频的右半部分
//const char* filter_desc = "[in0]pad=1920:1080[x1];[in1]scale=w=960:h=1080[inn1];[x1][inn1]overlay=960:0[out]";
const char

本文介绍了一个使用FFmpeg滤镜功能实现视频合并的项目案例,具体实现为将四个输入视频流(左一右三布局)合并成一个输出视频。文章详细展示了通过C++编程语言实现的多线程读取视频、缓存帧数据、滤镜处理和编码输出等过程。
最低0.47元/天 解锁文章

被折叠的 条评论
为什么被折叠?



