前段时间有空,顺便了解了ffmpeg,从ffmpeg中提取不少代码,并将其播放3gp(mp4),视频h264,音频aac相关代码移植到IOS,现整理如下,欢迎大家指正。
IOS 上移植ffmpeg
Linux环境下配置:
--disable-yasm --disable-asm--disable-altivec --disable-amd3dnow --disable-amd3dnowext --disable-mmx--disable-mmxext --disable-sse --disable-sse2 --disable-sse3 --disable-ssse3--disable-sse4 --disable-sse42 --disable-fma4 --disable-avx2 --disable-armv5te--disable-armv6 --disable-armv6t2 --disable-vfp --disable-neon --disable-vis--disable-inline-asm --disable-mips32r2 --disable-mipsdspr1 --disable-mipsdspr2--disable-encoders --disable-muxers --disable-devices --enable-ffplay --disable-ffmpeg--disable-ffserver --disable-ffprobe --disable-demuxers --enable-demuxer=mov--disable-decoders --enable-decoder=aac --enable-decoder=h264 --disable-parsers--enable-dct --disable-doc --disable-htmlpages --disable-manpages--disable-podpages --disable-txtpages --disable-protocols--enable-protocol=file --enable-protocol=http --disable-indevs--disable-outdevs --enable-outdev=sdl --disable-filters--enable-filter=aresample
该配置删去所有的muxer,只是支持mp4、3gp的demuxer,视频解码只是支持h264,音频解码只是支持aac,h264、aac的raw文件也支持,支持访问本地文件和http协议。
只是支持音频重采样滤波器,去掉所有的针对硬件的优化。
采用sdl版本1.2.5开源库,只是采用其中线程,yuv转换成bmp的部分代码,但是sdl本来转换后的图像是倒立的,需要修改成:
static void Color32DitherYV12Mod1X( int *colortab, Uint32 *rgb_2_pix,
unsigned char *lum, unsigned char *cr,
unsigned char *cb, unsigned char *out,
int rows, int cols, int mod )
{
unsigned int* row1;
unsigned int* row2;
unsigned char* lum2;
int x, y;
int cr_r;
int crb_g;
int cb_b;
int cols_2 = cols / 2;
int y_2;
row1 = (unsigned int*) out + (rows - 1) * cols ;
row2 = row1 - cols + mod;
lum2 = lum + cols;
//mod += cols + mod;
//y = rows / 2;
y = 0;
y_2 = rows >> 1;
while( y++ < y_2)
{
x = 0;
while( x++ < cols_2)
{
register int L;
cr_r = 0*768+256 + colortab[ *cr + 0*256 ];
crb_g = 1*768+256 + colortab[ *cr + 1*256 ]
+colortab[ *cb + 2*256 ];
cb_b = 2*768+256 + colortab[ *cb + 3*256 ];
++cr; ++cb;
L = *lum++;
*row1++ = (rgb_2_pix[ L+ cr_r ] |
rgb_2_pix[ L+ crb_g ] |
rgb_2_pix[ L+ cb_b ]);
L = *lum++;
*row1++ = (rgb_2_pix[ L+ cr_r ] |
rgb_2_pix[ L+ crb_g ] |
rgb_2_pix[ L+ cb_b ]);
/* Now, do second row. */
L = *lum2++;
*row2++ = (rgb_2_pix[ L + cr_r ] |
rgb_2_pix[ L+ crb_g ] |
rgb_2_pix[ L+ cb_b ]);
L = *lum2++;
*row2++ = (rgb_2_pix[ L+ cr_r ] |
rgb_2_pix[ L+ crb_g ] |
rgb_2_pix[ L + cb_b ]);
}
/*
* These values are at thestart of the next line, (due
* to the ++'s above),butthey need to be at the start
* of the line after that.
*/
lum += cols;
lum2 += cols;
row1 -= (cols << 1) + cols;//mod is 0
row1 += mod;
row2 -= (cols << 1) + cols;
row2 += mod;
}
}
ios中将内存中的位图数据绘制到屏幕视图上:
创建图像:
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef offscreen = CGBitmapContextCreate (bmp,// 4
width,
height,
8, // bitsper component
width * 4,
colorSpace,
kCGImageAlphaNoneSkipFirst);
// draw stuff into offscreen
CGImageRef image = CGBitmapContextCreateImage(offscreen);
CFRelease(offscreen);
阿尔法通道采用kCGImageAlphaNoneSkipFirst,不做任何的颜色混合处理,而且ios只是支持32位的位图,如果采用kCGImageAlphaPremultipliedLast,那么将会出现花屏现象,如果采用kCGImageAlphaNoneSkipLast,将出现本来红色变成蓝色的现象。
绘制图像:
CGContextRef context;
context = UIGraphicsGetCurrentContext();
CGContextSetInterpolationQuality(context, kCGInterpolationLow);
CGContextDrawImage(context, playRect, image);
由于要绘制的图片大小跟绘制的区域大小不一致,在底层需要对图像进行缩放,所以这里需要首先设置插值的质量问题,IOS默认的情况下是kCGInterpolationMedium,据猜测,这种模式下采用的是二次立方插值,其效率是很低的,在iphone 5上测试,fps:12,512x384,占用cpu最高时达56%,而采用kCGInterpolationLow模式同样的条件下最高cpu只是36%。据了解,kCGInterpolationLow模式采用的是二次线性插值法。另外,可以采用自己做插值放大,然后这里采用最低的插值质量,有可能改善程序的执行效率和视频播放的视频效果。不过这种方式没有测试。
替换SDL中的音频播放:
打开音频设备,设置音频播放参数:
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
{
AQCallbackStruct aqc = ((VideoState *)opaque)->aqc;
UInt32 err, bufferSize;
int i;
aqc.mDataFormat.mSampleRate = wanted_sample_rate;
aqc.mDataFormat.mFormatID = kAudioFormatLinearPCM;
aqc.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
aqc.mDataFormat.mFramesPerPacket = 1;
aqc.mDataFormat.mChannelsPerFrame =wanted_nb_channels;
aqc.mDataFormat.mBitsPerChannel = wanted_nb_channels * 8;
aqc.mDataFormat.mBytesPerFrame = wanted_nb_channels * 2;
aqc.mDataFormat.mBytesPerPacket = aqc.mDataFormat.mBytesPerFrame;
aqc.frameCount = AUDIO_BUFFER_SIZE;
if (audio_buf) {
free(audio_buf);
}
audio_buf = (unsigned char*)av_malloc(aqc.mDataFormat.mBytesPerFrame * aqc.frameCount);
if(!audio_buf)
return -1;
err = AudioQueueNewOutput(&aqc.mDataFormat, AQBufferCallback, opaque, NULL, kCFRunLoopCommonModes,0,&aqc.queue);
if(err)
{
return err;
}
bufferSize = aqc.frameCount * aqc.mDataFormat.mBytesPerFrame;
VideoState* is = (VideoState*)opaque;
audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
audio_hw_params->freq =wanted_sample_rate;//spec.freq;
audio_hw_params->channel_layout =wanted_channel_layout;
audio_hw_params->channels =wanted_nb_channels;// spec.channels;
memcpy(&is->aqc, &aqc, sizeof(AQCallbackStruct));
for (i = 0; i < AUDIO_BUFFERS; i++) {
err = AudioQueueAllocateBuffer(aqc.queue, bufferSize, &aqc.mBuffers[i]);
if (err) {
return err;
}
AQBufferCallback(is, aqc.queue, aqc.mBuffers[i]);
}
err = AudioQueueStart(aqc.queue, NULL);
if (err) {
return err;
}
return aqc.mDataFormat.mChannelsPerFrame * AUDIO_BUFFER_SIZE * aqc.mDataFormat.mChannelsPerFrame;
}
音频回调函数:
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
{
VideoState *is = (VideoState *)opaque;
int audio_size, len1;
int bytes_per_sec;
int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
audio_callback_time = av_gettime();
while (len > 0) {
if (is->audio_buf_index >= is->audio_buf_size) {
audio_size = audio_decode_frame(is);
if (audio_size < 0) {
/* if error, just output silence */
is->audio_buf = is->silence_buf;
is->audio_buf_size = sizeof(is->silence_buf) / frame_size *frame_size;
} else {
if (is->show_mode != SHOW_MODE_VIDEO)
update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
is->audio_buf_size =audio_size;
}
is->audio_buf_index = 0;
}
len1 = is->audio_buf_size - is->audio_buf_index;
if (len1 > len)
len1 = len;
memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
len -= len1;
stream += len1;
is->audio_buf_index += len1;
}
bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
/* Let's assume the audio driver that is used by SDL has two periods. */
if (!isnan(is->audio_clock)) {
set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
sync_clock_to_slave(&is->extclk, &is->audclk);
}
}
此音频回调函数首先进行音频解码,获取音频原始数据,如果解码不成功,将播放静音,但是整个音频回调一直在进行,在ios中如果另外打开另一个使用音频播放的程序,当前程序将不能播放音频。从音频解码器得到的音频数据未必能够充满音频缓冲区,所以,一直需要充慢缓冲区才能写到音频设备里。另外,如果没有视频播放,也就是播放aac、mp3等,采用update_sample_display来播放波形图或者其他的特殊显示效果。audio_buf_index用来表示当前缓冲区的位置,当把音频数据考入另外一个缓冲区时,原始数据从这里开始。如果有剩余的数据,将留在is中,下一次回调再使用。
一些UI事件处理的移植:
SDL_Event event;
event.type = FF_QUIT_EVENT;
event.user.data1 = is;
SDL_PushEvent(&event);
UI事件是在主线程中进行的,上面的处理在本移植中已经去掉,所以得采用IOS本来的API进行替代,已经尝试使用通知,但是行不通,因为这里实在C中进行调用,通知的接收者找不到,当然采用另外的办法可能可以做到。最简单的是采用IOS里的异步操作,让其在主线程中执行即可:
dispatch_async(dispatch_get_main_queue(), ^{
do_exit(is);
});
视频的刷新问题:
采用一个定时器,不断的进行屏幕的刷新,这样才能播放视频,否则只能听到声音。定时器时间间隔设置最好是小于支持的最小帧间间隔的1/2。否则会出现一些假的掉帧现象,使得原本流畅的视频变得不再流畅。激活视频绘制:
double remaining_time = 0.0;
remaining_time = 0.01;//REFRESH_RATE;
if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
video_refresh(is,&remaining_time);
获取播放进度:
首先获取视频总时间:is->ic->duration,此变量的数据是解析mov文件而得。
获取当前播放进度:get_master_clock(is) * AV_TIME_BASE;是以微秒为单位的。
拖动播放:
首先设置拖动偏移,以s为单位,
void PlayEngine::seek(int64_t offset)
{
double pos;
pos = get_master_clock(is);
if (isnan(pos))
pos = (double)is->seek_pos / AV_TIME_BASE;
pos += offset;
if (is->ic->start_time != AV_NOPTS_VALUE && pos < is->ic->start_time / (double)AV_TIME_BASE)
pos = is->ic->start_time / (double)AV_TIME_BASE;
stream_seek(is, (int64_t)(pos * AV_TIME_BASE), (int64_t)(offset * AV_TIME_BASE), 0);
}
首先获取当前的同步时钟,将同步时钟(微妙为单位)加上拖动偏移得到将要播放的时间位置。isnan(pos)这个条件主要是看视频播放有没开始。
Ffmpeg播放代码框架:
Ffmplay代码主要有3个线程,一个是视频文件的读线程,另外一个是视频解码线程,还有一个是字幕解码线程,读线程是必须有的,另外两个是跟文件有关的。
读线程:主要是负责打开视频文件、解析视频文件、音频设备打开、音频回调、视频解码线程创建、字幕解码线程创建、音频压缩包视频压缩包的读取。
视频解码线程:主要负责视频格式的解码,并将解码结果放入图片队列中
字幕解码:主要负责字幕的解码,并将解码结果放入字幕队列中
音频回调问题:
音频设备初始化、音频播放回调都放在读线程中,一个方面是因为音频解码占时不多,另外音频回调是不能放到UI线程中,因为UI的拖动的时候就可能导致音频播放不流畅。另外这种硬件的回调主要是通过中断来进行的,所以也就更具体的线程有关。放到读线程中是完全可以满足性能要求的,因为读线程中主要用的也就是IO操作。
音频播放、视频播放已经说过。
音频视频同步:
代码分析:
注册协议、解码器、编码器、音频输入输出设备、滤波器:
/* register all codecs, demux and protocols */
avcodec_register_all();
#if CONFIG_AVDEVICE
avdevice_register_all();
#endif
#if CONFIG_AVFILTER
avfilter_register_all();
#endif
av_register_all();
包括注册一些硬件加速器。
//初始化网络
avformat_network_init();
init_opts();
//show_banner(argc, argv, options);
//解析输入参数
parse_options(NULL, argc, argv, options, opt_input_file);
//input_filename = "test.aac";
if (!input_filename) {
show_usage();
av_log(NULL, AV_LOG_FATAL, "An inputfile must be specified\n");
av_log(NULL, AV_LOG_FATAL,
"Use -h to get full help or, even better, run 'man%s'\n", program_name);
exit(1);
}
//注册解码、编码器锁,对于多个应用程序共用同样的解码器有用
if (av_lockmgr_register(lockmgr)) {
av_log(NULL, AV_LOG_FATAL, "Could notinitialize lock manager!\n");
do_exit(NULL);
}
//初始化flush包,假如此包,表示要对包队列进行flush操作
av_init_packet(&flush_pkt);
flush_pkt.data = (uint8_t *)&flush_pkt;
//打开视频流
is = stream_open(input_filename, file_iformat);
if (!is) {
av_log(NULL, AV_LOG_FATAL, "Failed toinitialize VideoState!\n");
do_exit(NULL);
}
读线程函数代码分析:
VideoState *is = (VideoState *)arg;
AVFormatContext *ic = NULL;//视频格式上下文,从视频中读取header,解析保存
int err, i, ret;//返回错误
int st_index[AVMEDIA_TYPE_NB];//视频、音频、字幕流序号
AVPacket pkt1, *pkt = &pkt1;//原始数据包
int eof = 0;//文件结束标志
int64_t stream_start_time;//流开始时间
int pkt_in_play_range = 0;//数据包在播放的有效范围内标志
AVDictionaryEntry *t; //字典项
AVDictionary **opts;//选项,一些用户设置
int orig_nb_streams;//原始文件流数量
SDL_mutex *wait_mutex = SDL_CreateMutex();//互斥锁
memset(st_index, -1, sizeof(st_index));
//初始化视频流、音频流、字幕流序号为-1
is->last_video_stream = is->video_stream = -1;
is->last_audio_stream = is->audio_stream = -1;
is->last_subtitle_stream = is->subtitle_stream = -1;
//创建化视频文件格式上下文
ic = avformat_alloc_context();
//设置中断回调函数,也就是在现在处理过程中,设置退出视频播放的的判断标识,以便放弃其他操作。
ic->interrupt_callback.callback = decode_interrupt_cb;
ic->interrupt_callback.opaque = is;//回调结构体的参数
//打开视频文件,读取头信息
err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
if (err < 0) {
//print_error(is->filename, err);
ret = -1;
goto fail;
}
//找到格式选项中的符合key的选项
if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
av_log(NULL, AV_LOG_ERROR, "Option %snot found.\n", t->key);
ret = AVERROR_OPTION_NOT_FOUND;
goto fail;
}
is->ic = ic;
//如果设置了生成pts(显示时间戳),则将ic里的标志设为设置时间戳
if (genpts)
ic->flags |= AVFMT_FLAG_GENPTS;
//这里选项字典本来是从codec选项中得来,但是设置NULL不影响工作,主要是初始化,否则将会挂掉。
opts = NULL;//setup_find_stream_info_opts(ic,codec_opts);
orig_nb_streams = ic->nb_streams;//从文件中得到的流数量,最多为3,也即是包含视频、音频、字幕。
//找出格式流信息,比如一些时间戳的初始化,还有帧率等等
err = avformat_find_stream_info(ic, opts);
if (err < 0) {
av_log(NULL, AV_LOG_WARNING,
"%s: could not find codec parameters\n", is->filename);
ret = -1;
goto fail;
}
for (i = 0; i < orig_nb_streams; i++)
//av_dict_free(&opts[i]);
av_freep(&opts);
//设置抵达文件末尾标识为0
if (ic->pb)
ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() totest for the end
//拖动标志,以bytes为单位,如果是连续的并且ogg格式之外的
if (seek_by_bytes < 0)
seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT)&& strcmp("ogg", ic->iformat->name);
//最大帧间距
is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
/* if seeking requested, we execute it */
//如果开始时间不是AV_NOPTS_VALUE,则要设置开始播放的位置,这里跟拖动播放的功能一样
if (start_time != AV_NOPTS_VALUE) {
int64_t timestamp;
timestamp = start_time;
/* add the stream start time */
if (ic->start_time != AV_NOPTS_VALUE)
timestamp += ic->start_time;
ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_WARNING, "%s: couldnot seek to position %0.3f\n",
is->filename, (double)timestamp / AV_TIME_BASE);
}
}
//判断是否是实时播放,如rtp协议播放
is->realtime = is_realtime(ic);
//设置丢弃模式
for (i = 0; i < ic->nb_streams; i++)
ic->streams[i]->discard = AVDISCARD_ALL;
//if (!video_disable)
//找到最适合的视频流、音频流、字幕流,并赋值给相应的流序号
st_index[AVMEDIA_TYPE_VIDEO] =
av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
//if (!audio_disable)
st_index[AVMEDIA_TYPE_AUDIO] =
av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
wanted_stream[AVMEDIA_TYPE_AUDIO],
st_index[AVMEDIA_TYPE_VIDEO],
NULL, 0);
//if (!video_disable && !subtitle_disable)
st_index[AVMEDIA_TYPE_SUBTITLE] =
av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
wanted_stream[AVMEDIA_TYPE_SUBTITLE],
(st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
st_index[AVMEDIA_TYPE_AUDIO] :
st_index[AVMEDIA_TYPE_VIDEO]),
NULL, 0);
if (show_status) {
av_dump_format(ic, 0, is->filename, 0);//输出格式信息
}
is->show_mode = show_mode;
/* open the streams */
//如果有音频流,打开音频流,这个方法中包括初始化音频设备,设置回调函数等
if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
}
//如果有视频流,打开视频流组件,开启视频解码线程
ret = -1;
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
}
//如果视频显示模式设置为SHOW_MODE_NONE,有视频,可以设置为显示视频,否则可以绘制一些波形图等。
if (is->show_mode == SHOW_MODE_NONE)
is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
//如果有字幕流,创建字幕流解码线程
if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
}
//如果没有音频流和视频流,转到fail。
if (is->video_stream < 0 &&is->audio_stream < 0) {
av_log(NULL, AV_LOG_FATAL, "Failed toopen file '%s' or configure filtergraph\n",
is->filename);
ret = -1;
goto fail;
}
//设置无限缓存标志,主要是针对实时播放
if (infinite_buffer < 0 && is->realtime)
infinite_buffer = 1;
//线程主循环
for (;;) {
if (is->abort_request)//放弃播放,退出循环
break;
if (is->paused != is->last_paused) {//播放暂停,正在播放时,触发暂停,当前播放暂停,再触发暂停,继续播放
is->last_paused = is->paused;
if (is->paused)
is->read_pause_return = av_read_pause(ic);
else
av_read_play(ic);
}
#if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
if (is->paused &&//实时协议时的暂停播放处理
(!strcmp(ic->iformat->name,"rtsp") ||
(ic->pb &&!strncmp(input_filename, "mmsh:", 5)))) {
/* wait 10 ms to avoid trying to get another packet */
/* XXX: horrible */
SDL_Delay(10);
continue;
}
#endif
//拖动播放请求,重新定位读文件的位置,清理当前音频包队列、视频包队列,重新设置播放时钟
if (is->seek_req) {
int64_t seek_target = is->seek_pos;
int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
// FIXME the +-2 is due to rounding being not done in the correctdirection in generation
// of the seek_pos/seek_relvariables
ret = avformat_seek_file(is->ic, -1, seek_min,seek_target, seek_max, is->seek_flags);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR,
"%s: error while seeking\n", is->ic->filename);
} else {
if (is->audio_stream >= 0) {
packet_queue_flush(&is->audioq);
packet_queue_put(&is->audioq, &flush_pkt);
}
if (is->subtitle_stream >= 0) {
packet_queue_flush(&is->subtitleq);
packet_queue_put(&is->subtitleq, &flush_pkt);
}
if (is->video_stream >= 0) {
packet_queue_flush(&is->videoq);
packet_queue_put(&is->videoq, &flush_pkt);
}
if (is->seek_flags & AVSEEK_FLAG_BYTE) {
set_clock(&is->extclk, NAN, 0);
} else {
set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
}
}
is->seek_req = 0;//还原拖动播放请求
is->queue_attachments_req = 1;//设置附图队列请求标识为1,请求附图
eof = 0;
if (is->paused)//如果拖动时是暂停的,显示下一帧
step_to_next_frame(is);
}
//附图队列请求
if (is->queue_attachments_req) {
if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {//如果有视频流并且视频流倾向是文件附图的话
AVPacket copy;
if ((ret = av_copy_packet(©,&is->video_st->attached_pic)) < 0)
goto fail;
packet_queue_put(&is->videoq, ©);
packet_queue_put_nullpacket(&is->videoq, is->video_stream);
}
is->queue_attachments_req = 0;
}
//队列已满,阻塞在读取条件变量上
/* if the queue are full, no need to read more */
if (infinite_buffer<1 &&
(is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
|| ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
&& (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
|| (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC))
&& (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
/* wait 10 ms */
SDL_LockMutex(wait_mutex);
SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
SDL_UnlockMutex(wait_mutex);
continue;
}
//这里不知作何使用
if (!is->paused &&
(!is->audio_st || is->audio_finished ==is->audioq.serial) &&
(!is->video_st || (is->video_finished ==is->videoq.serial && is->pictq_size == 0))) {
if (loop != 1 && (!loop || --loop)) {
stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
} else if (autoexit) {
ret = AVERROR_EOF;
goto fail;
}
}
//结束
if (eof) {
if (is->video_stream >= 0)
packet_queue_put_nullpacket(&is->videoq, is->video_stream);
if (is->audio_stream >= 0)
packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
SDL_Delay(10);
eof=0;
continue;
}
//读包
ret = av_read_frame(ic, pkt);
if (ret < 0) {
if (ret == AVERROR_EOF || url_feof(ic->pb))
eof = 1;//到达结束
if (ic->pb && ic->pb->error)//出错
break;
SDL_LockMutex(wait_mutex);
SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
SDL_UnlockMutex(wait_mutex);
continue;
}
/* check if packet is in play range specified by user, then queue,otherwise discard */检查包是否在用户设置的范围之内,如果在,加入相应的包队列
stream_start_time = ic->streams[pkt->stream_index]->start_time;
pkt_in_play_range = duration == AV_NOPTS_VALUE ||
(pkt->pts -(stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
av_q2d(ic->streams[pkt->stream_index]->time_base) -
(double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
<= ((double)duration / 1000000);
if (pkt->stream_index == is->audio_stream&& pkt_in_play_range) {
packet_queue_put(&is->audioq, pkt);
} else if (pkt->stream_index == is->video_stream && pkt_in_play_range
&&!(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
packet_queue_put(&is->videoq, pkt);
} else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
packet_queue_put(&is->subtitleq, pkt);
} else {
av_free_packet(pkt);
}
}
/* wait until the end */
while (!is->abort_request) {
SDL_Delay(100);
}
ret = 0;
//失败处理操作
fail:
/* close each stream */
if (is->audio_stream >= 0)
stream_component_close(is, is->audio_stream);
if (is->video_stream >= 0)
stream_component_close(is, is->video_stream);
if (is->subtitle_stream >= 0)
stream_component_close(is, is->subtitle_stream);
if (is->ic) {
avformat_close_input(&is->ic);
}
if (ret != 0) {
// SDL_Event event;
//
// event.type =FF_QUIT_EVENT;
// event.user.data1 = is;
// SDL_PushEvent(&event);
dispatch_async(dispatch_get_main_queue(), ^{
do_exit(is);//通知主线程退出
});
}
SDL_DestroyMutex(wait_mutex);
视频线程代码分析:
static int video_thread(void *arg)
{
AVPacket pkt = { 0 };//原始数据包
VideoState *is = (VideoState *)arg;//线程参数
AVFrame *frame = av_frame_alloc();//视频帧
double pts;//显示时间戳
int ret;
int serial = 0;
#if CONFIG_AVFILTER
AVFilterGraph *graph = avfilter_graph_alloc();
AVFilterContext *filt_out = NULL, *filt_in = NULL;//输入滤波器,输出滤波器
int last_w = 0;//上一帧图像宽
int last_h = 0;//上一帧图像高
enum AVPixelFormat last_format = AVPixelFormat(-2);//上一帧像素格式
int last_serial = -1;
#endif
//线程主循环
for (;;) {
while (is->paused && is->videoq.abort_request)//暂停或者放弃
SDL_Delay(10);//线程只是简单延迟,不做任何操作
//设置帧默认参数
avcodec_get_frame_defaults(frame);
av_free_packet(&pkt);
//获取图像中的一帧,从解码而得
ret = get_video_frame(is,frame, &pkt, &serial);
if (ret < 0)//出错
goto the_end;
if (!ret)
continue;//无帧产生,终止本次循环
#if CONFIG_AVFILTER
if ( last_w != frame->width
|| last_h != frame->height
|| last_format !=frame->format
|| last_serial !=serial) {//格式、尺寸等跟之前的帧不同,则需要重新配置滤波器,特别是当图像质量切换的时候,比如rtp协议的sp帧
av_log(NULL, AV_LOG_DEBUG,
"Video frame changed from size:%dx%d format:%sserial:%d to size:%dx%d format:%s serial:%d\n",
last_w, last_h,
(const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
frame->width, frame->height,
(const char *)av_x_if_null(av_get_pix_fmt_name(AVPixelFormat( frame->format)), "none"), serial);
avfilter_graph_free(&graph);
graph = avfilter_graph_alloc();
//创建和配置输入输出滤波器
if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
// SDL_Event event;
// event.type = FF_QUIT_EVENT;
// event.user.data1 = is;
// SDL_PushEvent(&event);
dispatch_async(dispatch_get_main_queue(), ^{
do_exit(is);
});
av_free_packet(&pkt);
goto the_end;
}
filt_in = is->in_video_filter;
filt_out = is->out_video_filter;
last_w = frame->width;
last_h = frame->height;
last_format = AVPixelFormat(frame->format);
last_serial = serial;
}
//滤波处理输入
ret = av_buffersrc_add_frame(filt_in, frame);
if (ret < 0)
goto the_end;
av_frame_unref(frame);
avcodec_get_frame_defaults(frame);
av_free_packet(&pkt);
while (ret >= 0) {
is->frame_last_returned_time = av_gettime() / 1000000.0;
//滤波处理输出
ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
if (ret < 0) {
if (ret == AVERROR_EOF)
is->video_finished = serial;
ret = 0;
break;
}
//获取滤波处理时间间隔,当前时间减去上一帧的返回时间
is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
is->frame_last_filter_delay = 0;
//设置pts
pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(filt_out->inputs[0]->time_base);
ret = queue_picture(is,frame, pts, av_frame_get_pkt_pos(frame), serial);//加入图片队列
av_frame_unref(frame);//帧解除引用
}
#else
//没有滤波器设置,做scale操作
pts = (frame->pts ==AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(is->video_st->time_base);
ret = queue_picture(is, frame, pts,av_frame_get_pkt_pos(frame), serial);
av_frame_unref(frame);
#endif
if (ret < 0)
goto the_end;
}
//线程结束
the_end:
avcodec_flush_buffers(is->video_st->codec);
#if CONFIG_AVFILTER
avfilter_graph_free(&graph);
#endif
av_free_packet(&pkt);
av_frame_free(&frame);
return 0;
}
get_video_frame方法源码分析:
static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
{
int got_picture;
//从视频数据队列中获取原始数据包
if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
return -1;
//如果获取的数据包是作为flush数据包,flush解码器缓冲区
if (pkt->data == flush_pkt.data) {
avcodec_flush_buffers(is->video_st->codec);
SDL_LockMutex(is->pictq_mutex);
// Make sure there are no long delay timers (ideally we should just flushthe queue but that's harder)
//如果图片队列中还有图片并且用户没有放弃播放,阻塞在图片条件变量上
while (is->pictq_size && !is->videoq.abort_request) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
is->video_current_pos = -1;
is->frame_last_pts = AV_NOPTS_VALUE;
is->frame_last_duration = 0;
is->frame_timer = (double)av_gettime() / 1000000.0;
is->frame_last_dropped_pts = AV_NOPTS_VALUE;
SDL_UnlockMutex(is->pictq_mutex);
return 0;
}
//解码原始数据
if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
return 0;//解码失败
if (!got_picture && !pkt->data)
is->video_finished =*serial;
//有图像输出
if (got_picture) {
int ret = 1;
double dpts = NAN;
if (decoder_reorder_pts == -1) {
frame->pts = av_frame_get_best_effort_timestamp(frame);
} else if (decoder_reorder_pts) {
frame->pts = frame->pkt_pts;
} else {
frame->pts = frame->pkt_dts;
}
if (frame->pts != AV_NOPTS_VALUE)
dpts = av_q2d(is->video_st->time_base) * frame->pts;
//获取纵横比
frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
//计算时钟
if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
SDL_LockMutex(is->pictq_mutex);
if (is->frame_last_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE) {
double clockdiff = get_clock(&is->vidclk) - get_master_clock(is);
double ptsdiff = dpts -is->frame_last_pts;
if (!isnan(clockdiff)&& fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
!isnan(ptsdiff)&& ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
clockdiff +ptsdiff - is->frame_last_filter_delay < 0 &&
is->videoq.nb_packets) {
is->frame_last_dropped_pos = av_frame_get_pkt_pos(frame);
is->frame_last_dropped_pts = dpts;
is->frame_last_dropped_serial = *serial;
is->frame_drops_early++;
av_frame_unref(frame);
ret = 0;
}
}
SDL_UnlockMutex(is->pictq_mutex);
}
return ret;
}
return 0;
}
queue_picture 函数分析
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos, int serial)
{
//图像数据结构体
VideoPicture *vp;
#if defined(DEBUG_SYNC) && 0
printf("frame_type=%c pts=%0.3f\n",
av_get_picture_type_char(src_frame->pict_type), pts);
#endif
/* wait until we have space to put a new picture */
SDL_LockMutex(is->pictq_mutex);
/* keep the last already displayed picture in the queue*/
//缓冲区满并且用户并未放弃
while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 1&&
!is->videoq.abort_request) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
SDL_UnlockMutex(is->pictq_mutex);
//用户放弃返回-1
if (is->videoq.abort_request)
return -1;
vp = &is->pictq[is->pictq_windex];
vp->sar = src_frame->sample_aspect_ratio;
/* alloc or resize hardware picture buffer */
if (!vp->bmp || vp->reallocate || !vp->allocated ||
vp->width != src_frame->width ||
vp->height !=src_frame->height) {
//SDL_Event event;
vp->allocated = 0;
vp->reallocate = 0;
vp->width = src_frame->width;
vp->height = src_frame->height;
/* the allocation must be done in the main thread to avoid
locking problems. */
// event.type =FF_ALLOC_EVENT;
// event.user.data1 = is;
// SDL_PushEvent(&event);
dispatch_async(dispatch_get_main_queue(), ^{
alloc_picture(is);//创建图片,在主线程中
});
/* wait until the picture is allocated */
SDL_LockMutex(is->pictq_mutex);
while (!vp->allocated && !is->videoq.abort_request) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
/* if the queue is aborted, we have to pop the pending ALLOC event or waitfor the allocation to complete */
if (is->videoq.abort_request/* && SDL_PeepEvents(&event, 1, SDL_GETEVENT,SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1*/) {
while (!vp->allocated) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
}
SDL_UnlockMutex(is->pictq_mutex);
if (is->videoq.abort_request)
return -1;
}
/* if the frame is not skipped, then display it */
if (vp->bmp) {
AVPicture pict = { { 0 } };
/* get a pointer on the bitmap */
//SDL_LockYUVOverlay (vp->bmp);
pict.data[0] = vp->bmp->pixels[0];
pict.data[1] = vp->bmp->pixels[2];
pict.data[2] = vp->bmp->pixels[1];
pict.linesize[0] = vp->bmp->pitches[0];
pict.linesize[1] = vp->bmp->pitches[2];
pict.linesize[2] = vp->bmp->pitches[1];
#if CONFIG_AVFILTER
// FIXME use direct rendering
av_picture_copy(&pict, (AVPicture *)src_frame,
AVPixelFormat(src_frame->format), vp->width, vp->height);
#else
av_opt_get_int(sws_opts, "sws_flags", 0,&sws_flags);
//如果没有设置滤波器,需要进行scale
is->img_convert_ctx =sws_getCachedContext(is->img_convert_ctx,
vp->width, vp->height, src_frame->format, vp->width,vp->height,
AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
if (is->img_convert_ctx == NULL) {
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
exit(1);
}
sws_scale(is->img_convert_ctx, src_frame->data,src_frame->linesize,
0, vp->height,pict.data, pict.linesize);
#endif
/* workaround SDL PITCH_WORKAROUND */
duplicate_right_border_pixels(vp->bmp);//拷贝有边框像素
/* update the bitmap content */
//SDL_UnlockYUVOverlay(vp->bmp);
vp->pts = pts;
vp->pos = pos;
vp->serial = serial;
/* now we can update the picture count */
if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
is->pictq_windex = 0;
SDL_LockMutex(is->pictq_mutex);
is->pictq_size++;
SDL_UnlockMutex(is->pictq_mutex);
}
return 0;
}
/* allocate a picture (needs to do that in main thread to avoid
potential locking problems */
static void alloc_picture(VideoState *is)
{
VideoPicture *vp;
int64_t bufferdiff;
vp = &is->pictq[is->pictq_windex];
//创建之前需要释放已有的图片内存
free_picture(vp);
// video_open(is, 0, vp);
//在sdl中创建图片,主要是跟yuv转换成bmp的一些初始化,为了优化性能之故
vp->bmp = SDL_CreateYUV_SW(vp->width, vp->height,SDL_YV12_OVERLAY,NULL);
// SDL_CreateYUVOverlay(vp->width,vp->height,
// SDL_YV12_OVERLAY,
// screen);
bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
/* SDL allocates a buffer smaller than requested if the video
* overlay hardware isunable to support the requested size. */
av_log(NULL, AV_LOG_FATAL,
"Error: the video system does not support animage\n"
"size of %dx%d pixels. Try using -lowres or -vf\"scale=w:h\"\n"
"to reduce the image size.\n", vp->width, vp->height );
do_exit(is);
}
SDL_LockMutex(is->pictq_mutex);
vp->allocated = 1;
SDL_CondSignal(is->pictq_cond);
SDL_UnlockMutex(is->pictq_mutex);
}
字幕线程函数分析:
static int subtitle_thread(void *arg)
{
VideoState *is = (VideoState *)arg;
SubPicture *sp;
AVPacket pkt1, *pkt = &pkt1;
int got_subtitle;
int serial;
double pts;
int i, j;
int r, g, b, y, u, v, a;
for (;;) {
while (is->paused && !is->subtitleq.abort_request) {
SDL_Delay(10);
}
//获取包
if (packet_queue_get(&is->subtitleq, pkt, 1, &serial) < 0)
break;
if (pkt->data == flush_pkt.data) {
avcodec_flush_buffers(is->subtitle_st->codec);
continue;
}
SDL_LockMutex(is->subpq_mutex);
while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
!is->subtitleq.abort_request) {
SDL_CondWait(is->subpq_cond, is->subpq_mutex);
}
SDL_UnlockMutex(is->subpq_mutex);
if (is->subtitleq.abort_request)
return 0;
sp = &is->subpq[is->subpq_windex];
/* NOTE: ipts is the PTS of the _first_ picture beginning in
this packet, if any */
pts = 0;
//计算时间戳
if (pkt->pts != AV_NOPTS_VALUE)
pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
//字幕解码
avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
&got_subtitle, pkt);
if (got_subtitle && sp->sub.format == 0) {
if (sp->sub.pts != AV_NOPTS_VALUE)
pts = sp->sub.pts / (double)AV_TIME_BASE;
sp->pts = pts;
sp->serial = serial;
//加入字幕队列
for (i = 0; i < sp->sub.num_rects; i++)
{
for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
{
RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
y = RGB_TO_Y_CCIR(r, g, b);
u = RGB_TO_U_CCIR(r, g, b, 0);
v = RGB_TO_V_CCIR(r, g, b, 0);
YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v,a);
}
}
/* now we can update the picture count */
if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
is->subpq_windex = 0;
SDL_LockMutex(is->subpq_mutex);
is->subpq_size++;
SDL_UnlockMutex(is->subpq_mutex);
} else if (got_subtitle) {
avsubtitle_free(&sp->sub);
}
av_free_packet(pkt);
}
return 0;
}
视频刷新函数分析
/* called to display each frame */
static void video_refresh(void *opaque, double *remaining_time)
{
VideoState *is = (VideoState*)opaque;//播放参数
VideoPicture *vp;//视频图片
double time;
SubPicture *sp, *sp2;//字幕
//播放没有暂停,并且同步方式是同步外部时钟和实时播放,检查外部时钟速度
if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
check_external_clock_speed(is);
//有音频播放,并且没有视频显示,另外显示未被禁止,绘制一些特效
if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
time = av_gettime() / 1000000.0;
if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
//强制刷新,或者需要绘制
video_display(is);
is->last_vis_time = time;
}
//赋值剩余时间,取当前剩余时间与上次显示时间跟播放速度只和的插值的最小值
*remaining_time = FFMIN(*remaining_time,is->last_vis_time + rdftspeed - time);
}
//如果有视频播放
if (is->video_st) {
int redisplay = 0;
if (is->force_refresh)
redisplay = pictq_prev_picture(is);
retry:
//如果图片队列中没有图片
if (is->pictq_size == 0) {
SDL_LockMutex(is->pictq_mutex);
//上次放弃的时间戳是有效的,并且上次放弃时的时间戳大于上一帧的时间戳,更新视频时间戳
if (is->frame_last_dropped_pts != AV_NOPTS_VALUE&& is->frame_last_dropped_pts > is->frame_last_pts) {
update_video_pts(is,is->frame_last_dropped_pts, is->frame_last_dropped_pos, is->frame_last_dropped_serial);
is->frame_last_dropped_pts = AV_NOPTS_VALUE;
}
SDL_UnlockMutex(is->pictq_mutex);
// nothing to do, no picture to display in the queue
} else {//图片队列中有图片
//上一帧间间隔,当前间隔,延迟量
double last_duration, duration, delay;
/* dequeue the picture */
vp = &is->pictq[is->pictq_rindex];
if (vp->serial != is->videoq.serial) {//不是同一系列?需要有样例
pictq_next_picture(is);
redisplay = 0;
goto retry;//重试
}
if (is->paused)//暂停
goto display;
/* compute nominal last_duration */
last_duration = vp->pts - is->frame_last_pts;
if (!isnan(last_duration) && last_duration > 0 &&last_duration < is->max_frame_duration) {
/* if duration of the last frame was sane, updatelast_duration in video state */
is->frame_last_duration = last_duration;
}
if (redisplay)//反复播放帧
delay = 0.0;
else
delay = compute_target_delay(is->frame_last_duration, is);
time= av_gettime()/1000000.0;
if (time <is->frame_timer + delay && !redisplay) {
//当前时间小于帧播放时间跟延迟之和,并且不是repeat帧
//计算剩余时间,返回
*remaining_time = FFMIN(is->frame_timer + delay - time,*remaining_time);//计算剩余时间
return;
}
is->frame_timer += delay;
if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
is->frame_timer = time;
SDL_LockMutex(is->pictq_mutex);
if (!redisplay && !isnan(vp->pts))
update_video_pts(is,vp->pts, vp->pos, vp->serial);
SDL_UnlockMutex(is->pictq_mutex);
if (is->pictq_size > 1) {//处理跳帧
VideoPicture *nextvp =&is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
duration =nextvp->pts - vp->pts;
if(!is->step &&(redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
if (!redisplay)
is->frame_drops_late++;
pictq_next_picture(is);
redisplay = 0;
goto retry;
}
}
//字幕播放
if (is->subtitle_st) {
while (is->subpq_size > 0) {
sp =&is->subpq[is->subpq_rindex];
if (is->subpq_size > 1)
sp2 =&is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
else
sp2 = NULL;
if (sp->serial != is->subtitleq.serial
|| (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
|| (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
{
free_subpicture(sp);
/* update queue size and signal for next picture */
if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
is->subpq_rindex = 0;
SDL_LockMutex(is->subpq_mutex);
is->subpq_size--;
SDL_CondSignal(is->subpq_cond);
SDL_UnlockMutex(is->subpq_mutex);
} else {
break;
}
}
}
display:
/* display picture */
if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
video_display(is);
pictq_next_picture(is);
if (is->step && !is->paused)
stream_toggle_pause(is);
}
}
is->force_refresh = 0;
}
//绘制字幕和图片
static void video_image_display(VideoState *is)
{
VideoPicture *vp;
SubPicture *sp;
AVPicture pict;
SDL_Rect rect;
int i;
vp = &is->pictq[is->pictq_rindex];
if (vp->bmp) {
if (is->subtitle_st) {
if (is->subpq_size > 0) {
sp = &is->subpq[is->subpq_rindex];
if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
//SDL_LockYUVOverlay (vp->bmp);
pict.data[0] = vp->bmp->pixels[0];
pict.data[1] = vp->bmp->pixels[2];
pict.data[2] = vp->bmp->pixels[1];
pict.linesize[0] = vp->bmp->pitches[0];
pict.linesize[1] = vp->bmp->pitches[2];
pict.linesize[2] = vp->bmp->pitches[1];
//绘制字幕
for (i = 0; i < sp->sub.num_rects; i++)
blend_subrect(&pict,sp->sub.rects[i],
vp->bmp->w, vp->bmp->h);
//SDL_UnlockYUVOverlay (vp->bmp);
}
}
}
//计算显示区域,移植到ios时,可以去掉
calculate_display_rect(&rect,is->xleft, is->ytop, is->width, is->height, vp);
//SDL_DisplayYUVOverlay(vp->bmp, &rect);
SDL_DelayDisplay(is, vp);//显示图片
//更新显示区域
if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h ||is->force_refresh) {
// int bgcolor =SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
// fill_border(is->xleft,is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h,bgcolor, 1);
is->last_display_rect = rect;
}
}
}
//播放音频时,在没有视频显示的情况下,调用该函数,显示声音波形等
static void video_audio_display(VideoState *s)
{
int i, i_start, x, y1, y, ys, delay, n,nb_display_channels;
int ch, channels, h, h2, bgcolor, fgcolor;
int64_t time_diff;
int rdft_bits, nb_freq;
for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
;
nb_freq = 1 <<(rdft_bits - 1);
/* compute display index : center on currently output samples */
channels = s->audio_tgt.channels;
nb_display_channels = channels;
if (!s->paused) {
int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
n = 2 * channels;
delay = s->audio_write_buf_size;
delay /= n;
/* to be more precise, we take into account the time spent since
the last buffer computation*/
if (audio_callback_time) {
time_diff = av_gettime() - audio_callback_time;
delay -= (time_diff *s->audio_tgt.freq) / 1000000;
}
delay += 2 * data_used;
if (delay < data_used)
delay = data_used;
i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
if (s->show_mode == SHOW_MODE_WAVES) {//显示波形
h = INT_MIN;
for (i = 0; i < 1000; i += channels) {
int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
int a = s->sample_array[idx];
int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
int score = a - d;
if (h < score&& (b ^ c) < 0) {
h = score;
i_start = idx;
}
}
}
s->last_i_start =i_start;
} else {
i_start = s->last_i_start;
}
// bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
if (s->show_mode == SHOW_MODE_WAVES) {
// fill_rectangle(screen,
// s->xleft, s->ytop, s->width, s->height,
// bgcolor,0);
//
// fgcolor =SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
/* total height for one channel */
h = s->height /nb_display_channels;
/* graph height / 2 */
h2 = (h * 9) / 20;
for (ch = 0; ch < nb_display_channels; ch++) {
i = i_start + ch;
y1 = s->ytop + ch * h + (h / 2); /* position of center line */
for (x = 0; x < s->width; x++) {
y = (s->sample_array[i] * h2)>> 15;
if (y < 0) {
y = -y;
ys = y1 - y;
} else {
ys = y1;
}
// fill_rectangle(screen,
// s->xleft + x,ys, 1, y,
// fgcolor, 0);
i += channels;
if (i >= SAMPLE_ARRAY_SIZE)
i -= SAMPLE_ARRAY_SIZE;
}
}
// fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
for (ch = 1; ch < nb_display_channels; ch++) {
y = s->ytop + ch * h;
// fill_rectangle(screen,
// s->xleft, y, s->width, 1,
// fgcolor, 0);
}
// SDL_UpdateRect(screen,s->xleft, s->ytop, s->width, s->height);
} else {//显示频谱
nb_display_channels= FFMIN(nb_display_channels,2);
if (rdft_bits != s->rdft_bits) {
av_rdft_end(s->rdft);
av_free(s->rdft_data);
s->rdft = av_rdft_init(rdft_bits,DFT_R2C);
s->rdft_bits = rdft_bits;
s->rdft_data = (FFTSample *)av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
}
{
FFTSample *data[2];
for (ch = 0; ch < nb_display_channels; ch++) {
data[ch] = s->rdft_data + 2 * nb_freq * ch;
i = i_start + ch;
for (x = 0; x < 2 * nb_freq; x++){
double w = (x-nb_freq)* (1.0 / nb_freq);
data[ch][x] =s->sample_array[i] * (1.0 - w * w);
i += channels;
if (i >= SAMPLE_ARRAY_SIZE)
i -= SAMPLE_ARRAY_SIZE;
}
av_rdft_calc(s->rdft, data[ch]);
}
/* Least efficient way to do this, we should of course
* directly access itbut it is more than fast enough. */
for (y = 0; y < s->height; y++) {
double w = 1 / sqrt(nb_freq);
int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
int b =(nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
+ data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
a = FFMIN(a, 255);
b = FFMIN(b, 255);
// fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
//
// fill_rectangle(screen,
// s->xpos,s->height-y, 1, 1,
// fgcolor, 0);
}
}
// SDL_UpdateRect(screen,s->xpos, s->ytop, 1, s->height);
if (!s->paused)
s->xpos++;
if (s->xpos >= s->width)
s->xpos= s->xleft;
}
}