下载
Dev版本有相应的头文件和lib库
Share版本有exe和dll文件
建立VS工程
配置
1.将ffmpeg、sdl的头文件路径加入到【VC++目录】-【包含目录】中;
2.将ffmpeg、sdl的lib文件路径加入到【VC++目录】-【库目录】中;
3.将ffmpeg、sdl的lib文件加入到【链接器】-【输入】-【附加依赖项】中;第3点不加的话,会产生“LNK2019 无法解析的外部符号”的问题。
源码
参考:最简单的基于FFMPEG+SDL的视频播放器 ver2 (采用SDL2.0)
#ifdef __cplusplus
extern "C" {
#include<stdio.h>
#include<libavcodec/avcodec.h>
#include<libavformat/avformat.h>
#include<libavutil/avutil.h>
#include<libswscale/swscale.h>
#include<libavutil/avutil.h>
#include<libavutil/imgutils.h>
#include<SDL.h>
}
#endif
#define OUTPUT_YUV420P 0
#define filename "D:/Workspaces/Git/FfmpegTest/ffmpegTest/bin/chujunjun.avi"
#define outfilename "./output.yuv"
int _tmain(int argc, char **argv)
{
/*
AVFormatContext:保存需要读入的文件的格式信息,比如流的个数以及流数据等
AVCodecCotext:保存了相应流的详细编码信息,比如视频的宽、高,编码类型等。
AVCodec:真正的编解码器,其中有编解码需要调用的函数
AVFrame:用于保存数据帧的数据结构,这里的两个帧分别是保存颜色转换前后的两帧图像
AVPacket:解析文件时会将音/视频帧读入到packet中
*/
AVFormatContext *pFormatCtx;
int i, videoStream;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVFrame *pFrameYUV;
uint8_t *buffer;
int numBytes;
SDL_Window *screen;
SDL_Renderer *sdlRender;
SDL_Texture *sdlTexture;
SDL_Rect sdlRect;
int frameFinished;
AVPacket packet;
i = 0;
struct SwsContext *img_convert_ctx;
int err_code;
char buf[1024];
FILE *fp_yuv;
int y_size;
//调用它用以注册所有支持的文件格式以及编解码器
av_register_all();
pFormatCtx = avformat_alloc_context();
//打开并读取文件头信息
err_code = avformat_open_input(&pFormatCtx, filename, NULL, NULL);
if (err_code != 0)
{
av_strerror(err_code, buf, 1024);
printf("coundn't open the file!,error code = %d(%s)\n", err_code, buf);
return -1;
}
//获取文件流信息,pFormatCtx->streams指向文件中的流
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
{
printf("Couldn't find stream information.\n");
return -1;
}
//遍历找到第一条视频流
videoStream = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
{
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoStream = i;
break;
}
}
if (videoStream == -1)
{
printf("Didn't find a video stream.\n");
return -1;
}
//根据流信息打开相应的解码器
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL)
{
fprintf(stderr, "Unsupported codec !\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
printf("cann't open the codec!\n");
return -1;
}
//为即将解码的图片分配内存空间,将图像写成YUV420P的 PPM 文件,
//因此这里需要两个 AVFrame,pFrame用于存储解码后的数据,pFrameYUV用于存储转换后的数据
pFrame = av_frame_alloc();
if (pFrame == NULL)
{
printf("pFrame alloc fail!\n");
return -1;
}
pFrameYUV = av_frame_alloc();
if (pFrameYUV == NULL)
{
printf("pFrameYUV alloc fail!\n");
return -1;
}
//根据原图高宽计算420格式的图片所占的空间大小
numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width,
pCodecCtx->height, 1);
buffer = (uint8_t*)av_mallocz(numBytes * sizeof(uint8_t));
if (!buffer)
{
printf("numBytes :%d , buffer malloc 's mem \n", numBytes);
return -1;
}
printf("--------------- File Information ----------------\n");
av_dump_format(pFormatCtx, 0, filename, 0);
printf("-------------------------------------------------\n");
//将pFrameYUV与buffer关联
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, buffer,
AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
AV_PIX_FMT_YUV420P,
SWS_BICUBIC,
NULL, NULL, NULL);
if (img_convert_ctx == NULL)
{
fprintf(stderr, "Cannot initialize the conversion context!\n");
return -1;
}
#if OUTPUT_YUV420P
fp_yuv = fopen(outfilename, "wb+");
#endif
if (SDL_Init(SDL_INIT_AUDIO | SDL_INIT_TIMER | SDL_INIT_VIDEO))
{
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
return -1;
}
screen = SDL_CreateWindow("Simplest ffmpeg player's Window",
SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, pCodecCtx->width,
pCodecCtx->height, SDL_WINDOW_OPENGL);
if (!screen)
{
fprintf(stderr, "SDL: could not create window - exiting - %s\n", SDL_GetError());
return -1;
}
sdlRender = SDL_CreateRenderer(screen, -1, 0);
if (!sdlRender)
{
fprintf(stderr, "SDL:cound not create render : %s\n", SDL_GetError());
return -1;
}
sdlTexture = SDL_CreateTexture(sdlRender, SDL_PIXELFORMAT_IYUV,
SDL_TEXTUREACCESS_STREAMING, pCodecCtx->width, pCodecCtx->height);
if (!sdlTexture)
{
fprintf(stderr, "SDL:cound not create Texture : %s\n", SDL_GetError());
return -1;
}
sdlRect.x = 0;
sdlRect.y = 0;
sdlRect.w = pCodecCtx->width;
sdlRect.h = pCodecCtx->height;
while (av_read_frame(pFormatCtx, &packet) >= 0)
{
if (packet.stream_index == videoStream)
{
if (avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet) < 0)
{
printf("Decode Error!\n");
return -1;
}
if (frameFinished)
{
//图片格式转换
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0,
pCodecCtx->height,
pFrameYUV->data,
pFrameYUV->linesize);
#if OUTPUT_YUV420P
y_size = pCodecCtx->width * pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
#endif
#if 0
SDL_UpdateTexture(sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0]);
#else
SDL_UpdateYUVTexture(sdlTexture, &sdlRect, pFrameYUV->data[0],
pFrameYUV->linesize[0], pFrameYUV->data[1],
pFrameYUV->linesize[1], pFrameYUV->data[2],
pFrameYUV->linesize[2]);
#endif
SDL_RenderClear(sdlRender);
SDL_RenderCopy(sdlRender, sdlTexture, NULL, &sdlRect);
SDL_RenderPresent(sdlRender);
SDL_Delay(40);//延时
}
}
av_free_packet(&packet);//释放读出来的包
}
#if OUTPUT_YUV420P
fclose(fp_yuv);
#endif
sws_freeContext(img_convert_ctx);
SDL_Quit();
av_free(buffer);
av_free(pFrame);
av_free(pFrameYUV);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return EXIT_SUCCESS;
}