FFMPEG+SDL教程 原来的FFMPEG+SDL教程版本太久,所以改写了FFMPEG-0.5简单播,这里只改写了tutorial1和tutorial2。 tutor1: // arm-linux-gcc -o tutor1 tutor1.c -I/usr/local/ffmpeg/include -L/usr/local/ffmpeg/lib // -lavdevice -lavutil -lavformat -lavcodec -lswscale -lamrnb -lamrwb -lfaac -lfaad -lmp3lame #include "libavformat/avformat.h" #include "libavcodec/avcodec.h" #include "libswscale/swscale.h" #include <stdio.h> int main(int argc,char *argv[]) { AVFormatContext *pFormatCtx; AVCodecContext *pCodecCtx; AVCodec *pCodec; AVFrame *pFrame; AVFrame *pFrameRGB; AVPacket packet; unsigned int i; int videoStream; int frameFinished; int numBytes; uint8_t *buffer; void SaveFrame(AVFrame *pFrame,int width,int height,int iFrame); if(argc<2) { printf("please privide a movie file/n"); return -1; } // Register all format and codec av_register_all(); // Open video file if(av_open_input_file(&pFormatCtx,argv[1],NULL,0,NULL)!=0) { return -1; } // Retrieve stream information if(av_find_stream_info(pFormatCtx)<0) { return -1; } // Dump information about file onto standard error dump_format(pFormatCtx,0,argv[1],0); // Find the first video stream videoStream=-1; for(i=0;i<pFormatCtx->nb_streams;i++) { if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) { videoStream=i; break; } } if(videoStream==-1) { return -1; } // Get a pointer to the codec context for the video stream pCodecCtx=pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { fprintf(stderr,"unsupportde codec/n"); return -1; } // Open codec if(avcodec_open(pCodecCtx,pCodec)<0) { return -1; } // Allocate video frame pFrame=avcodec_alloc_frame(); // Allocate an AVFrame structure pFrameRGB=avcodec_alloc_frame(); if(pFrameRGB==NULL) { return -1; } // Detemine required buffer size and allocate buffer numBytes=avpicture_get_size(PIX_FMT_RGB24,pCodecCtx->width, pCodecCtx->height); buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); /* Assign appropriate parts of buffer to image planes in pFrameRGB Note that pFrameRGB is an AVFrame,but AVFrame is a superset of AVPicture */ avpicture_fill((AVPicture *)pFrameRGB,buffer,PIX_FMT_RGB24, pCodecCtx->width,pCodecCtx->height); static struct SwsContext *img_convert_ctx; if(img_convert_ctx==NULL) { img_convert_ctx=sws_getContext(pCodecCtx->width,pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width,pCodecCtx->height, PIX_FMT_RGB24, SWS_BICUBIC,NULL,NULL,NULL); if(img_convert_ctx==NULL) { fprintf(stderr,"cannot initialize the convertion/n"); return -1; } } // Read frames and save first frames to disk i=0; while(av_read_frame(pFormatCtx,&packet)>=0) { // Is this a packet from the video stream if(packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video(pCodecCtx,pFrame,&frameFinished, packet.data,packet.size); // Did wo get a video frame if(frameFinished) { // Convert the image form its native format to RGB sws_scale(img_convert_ctx,pFrame->data,pFrame->linesize, 0,pCodecCtx->height,pFrameRGB->data, pFrameRGB->linesize); // Save the frame to disk if(++i<=1) { //printf("the pFrame %d/n",pFrame->linesize[0]); //printf("format %s",pCodecCtx->pix_fmt); //printf("the linesize %d/n",pFrameRGB->linesize[0]); SaveFrame(pFrameRGB,pCodecCtx->width, pCodecCtx->height,i); } } } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); } // Free swscontext // sws_freeContext(img_convert_ctx); // Free the RGB image av_free(buffer); av_free(pFrameRGB); // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); // Close the codec av_close_input_file(pFormatCtx); return 0; } void SaveFrame(AVFrame *pFrame,int width,int height,int iFrame) { FILE *pFile; char szFilename[32]; int y; // Open file sprintf(szFilename,"frame%d.ppm",iFrame); pFile=fopen(szFilename,"wb"); if(pFile==NULL) return; // Write header fprintf(pFile,"P6/n%d %d/n255/n",width,height); // Write pixel data for(y=0;y<height;y++) { fwrite(pFrame->data[0]+y*pFrame->linesize[0],1,width*3,pFile); } // Close file fclose(pFile); } tutor2: // arm-linux-gcc -o tutor2 tutor2.c -I/usr/local/ffmpeg/include -L/usr/local/ffmpeg/lib -lavdevice -lavutil -lavformat -lavcodec -lswscale -lamrnb -lamrwb -lfaac -lfaad -lmp3lame -lSDL #include "libavformat/avformat.h" #include "libavcodec/avcodec.h" #include "libswscale/swscale.h" #include "/usr/local/sdl/include/SDL/SDL.h" #include <stdio.h> #include <string.h> int main(int argc,char *argv[]) { AVFormatContext *pFormatCtx; int i,videoStream; AVCodecContext *pCodecCtx; AVCodec *pCodec; AVFrame *pFrame; AVPacket packet; int frameFinished; SDL_Surface *screen; SDL_Overlay *bmp; SDL_Rect rect; SDL_Event event; if(argc<2) { printf("please input a movie file/n"); return -1; } av_register_all(); if(av_open_input_file(&pFormatCtx,argv[1],NULL,0,NULL)!=0) return -1; if(av_find_stream_info(pFormatCtx)<0) return -1; dump_format(pFormatCtx,0,argv[1],0); videoStream=-1; for(i=0;i<pFormatCtx->nb_streams;i++) { if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) { videoStream=i; } } if(videoStream==-1) return -1; pCodecCtx=pFormatCtx->streams[videoStream]->codec; pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { fprintf(stderr,"unsupported codec/n"); return -1; } if(avcodec_open(pCodecCtx,pCodec)<0) return -1; pFrame=avcodec_alloc_frame(); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr,"can not initialize SDL %s/n",SDL_GetError()); return -1; } // setvideomode with your actual screen pixel screen=SDL_SetVideoMode(240,320,0,0); //screen=SDL_SetVideoMode(pCodecCtx->width,pCodecCtx->height,0,0); if(!screen) { fprintf(stderr,"can not set video mode/n"); return -1; } bmp=SDL_CreateYUVOverlay(pCodecCtx->width,pCodecCtx->height, SDL_YV12_OVERLAY,screen); static struct SwsContext *img_convert_ctx; if(img_convert_ctx==NULL) { // As we only generate a YUV420P picture, // we must convert it to the codec pixel format if needed img_convert_ctx=sws_getContext(pCodecCtx->width,pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width,pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC,NULL,NULL,NULL); if(img_convert_ctx==NULL) { fprintf(stderr,"can not initialize convert context/n"); return -1; } } while(av_read_frame(pFormatCtx,&packet)>=0) { if(packet.stream_index==videoStream) { avcodec_decode_video(pCodecCtx,pFrame,&frameFinished, packet.data,packet.size); if(frameFinished) { SDL_LockYUVOverlay(bmp); AVPicture pict; pict.data[0]=bmp->pixels[0]; pict.data[1]=bmp->pixels[2]; pict.data[2]=bmp->pixels[1]; pict.linesize[0]=bmp->pitches[0]; pict.linesize[1]=bmp->pitches[2]; pict.linesize[2]=bmp->pitches[1]; sws_scale(img_convert_ctx,pFrame->data,pFrame->linesize, 0,pCodecCtx->height,pict.data,pict.linesize); SDL_UnlockYUVOverlay(bmp); rect.x=0; rect.y=0; rect.w=pCodecCtx->width; rect.h=pCodecCtx->height; SDL_DisplayYUVOverlay(bmp,&rect); // Sleep(60); } } av_free_packet(&packet); SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT:SDL_Quit(); break; default:break; } } SDL_Quit(); av_free(pFrame); avcodec_close(pCodecCtx); av_close_input_file(pFormatCtx); return 0; }