之前在看别人代码的时候发现,有的博主在对摄像头采集的时候会先使用avcodec_decode_video2进行解码,让我很是不解,后来才知道是因为摄像头的不同,采集出来的数据有可能是经过了内部编码的数据,也有可能就是原始yuv数据,所以我这里做一个测试,看看如果是yuv数据的话,使用avcodec_decode_video2解码会不会有什么影响。结果证明是不会有影响的。直接上代码。
代码有参考博客主的文章:https://blog.youkuaiyun.com/li_wen01/article/details/62226563
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include <libavutil/frame.h>
#include <libavutil/mem.h>
#include <assert.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
#include <libavutil/time.h>
#include <libavutil/mathematics.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <SDL.h>
#include <SDL_thread.h>
char* input_name= "video4linux2";
char* file_name = "/dev/video0";
char* out_file = "yuv420.yuv";
void captureOneFrame(void){
AVFormatContext *fmtCtx = NULL;
AVInputFormat *inputFmt;
AVPacket *packet = NULL;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec;
struct SwsContext *sws_ctx;
FILE *fp;
int i;
int ret;
int videoindex;
enum AVPixelFormat dst_pix_fmt = AV_PIX_FMT_YUV420P;
const char *dst_size = NULL;
const char *src_size = NULL;
uint8_t *src_data[4];
uint8_t *dst_data[4];
int src_linesize[4];
int dst_linesize[4];
int src_bufsize;
int dst_bufsize;
int src_w ;
int src_h ;
int dst_w = 1280;
int dst_h = 960;
fp = fopen(out_file, "wb");
if (fp < 0) {
printf("open frame data file failed\n");
return ;
}
inputFmt = av_find_input_format (input_name);
if (inputFmt == NULL) {
printf("can not find_input_format\n");
return;
}
if (avformat_open_input ( &fmtCtx, file_name, inputFmt, NULL) < 0){
printf("can not open_input_file\n"); return;
}
av_dump_format(fmtCtx, 0, file_name, 0);
videoindex= -1;
for(i=0; i<fmtCtx->nb_streams; i++)
if(fmtCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
videoindex=i;
break;
}
if(videoindex==-1){
printf("Didn't find a video stream.\n");
return -1;
}
pCodecCtx = fmtCtx->streams[videoindex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
printf("Could not open codec.\n");
return -1;
}
AVFrame* pFrame = av_frame_alloc();
AVFrame* pFrameYUV = av_frame_alloc();
uint8_t* out_buffer=(uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
packet=(AVPacket *)av_malloc(sizeof(AVPacket));
printf("picture width = %d \n", pCodecCtx->width);
printf("picture height = %d \n", pCodecCtx->height);
printf("Pixel Format = %d \n", pCodecCtx->pix_fmt);
sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
int loop = 200;
int got_picture = -1;
//printf("开始编码了\n");
while(loop--){
av_read_frame(fmtCtx, packet);
if(packet->stream_index == videoindex){
// printf("找到视频流\n");
avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (got_picture) {
// printf("编码成功\n");
sws_scale(sws_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
fwrite(pFrameYUV->data[0], 1, pCodecCtx->width*pCodecCtx->height, fp);
fwrite(pFrameYUV->data[1], 1, pCodecCtx->width*pCodecCtx->height/4, fp);
fwrite(pFrameYUV->data[2], 1, pCodecCtx->width*pCodecCtx->height/4, fp);
}
}
av_free_packet(packet);
}
fclose(fp);
sws_freeContext(sws_ctx);
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&fmtCtx);
}
int main(void){
avcodec_register_all();
avdevice_register_all();
captureOneFrame();
return 0;
}