视频解码
在数据读取线程中,视频解码线程的创建是:
SDL_CreateThreadEx(&is->_video_tid, video_thread, ffp, "ff_video_dec");
在它创建之后有一个死循环:
for (;;) {
if (is->abort_request)
break;
//ignore audio part
ret = av_read_frame(ic, pkt);
packet_queue_put(&is->videoq, pkt); //将获取到的视频包推入videoq队列中
}
这里循环是把解析到的frame放入is->videoq中,我们在后面解码的时候,肯定是从这里面读取帧,然后再解码。
来看解码线程:
ffp->node_vdec->func_run_sync(node);
又是直接运行的ffplayer结构体里面的函数,这里肯定和音频播放一样,在之前初始化的时候给这个函数指针赋值过,我们回头看看到底在哪里为它赋值:
还是看之前初始化的ijkmp_android_create()调用ffpipeline_create_from_android(),然后里面有一句:
pipeline->func_open_video_decoder = func_open_video_decoder;
然后我们再返回看到在stream_component_open()中有一句:
decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
ffp->node_vdec = ffpipeline_open_video_decoder(ffp->pipeline, ffp);
IJKFF_Pipenode* ffpipeline_open_video_decoder(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
return pipeline->func_open_video_decoder(pipeline, ffp);
}
这里我们发现其实func_open_video_decoder()赋值给了ffp->node_vdec
func_open_video_decoder()
|(调用)
//(这里其实有个判断,是用硬解码还是软解,我们只分析硬解码)
// node = ffpipenode_create_video_decoder_from_ffplay(ffp);
node = ffpipenode_create_video_decoder_from_android_mediacodec(ffp, pipeline, opaque->weak_vout);
return node;
ffpipenode_create_video_decoder_from_android_mediacodec()
//alloc一个IJKFF_Pipenode
IJKFF_Pipenode *node = ffpipenode_alloc(sizeof(IJKFF_Pipenode_Opaque));
node->func_destroy = func_destroy;
node->func_run_sync = func_run_sync;
node->func_flush = func_flush;
return node;
最终ffp->node_vdec =node,前面ffp->node_vdec->func_run_sync(node),现在看来就是调用的func_run_sync()函数:
func_run_sync()
static int func_run_sync(IJKFF_Pipenode *node)
{
IJKFF_Pipenode_Opaque *opaque = node->opaque;
FFPlayer *ffp = opaque->ffp;
VideoState *is = ffp->is;
Decoder *d = &is->viddec;
PacketQueue *q = d->queue;
//...
//
opaque->enqueue_thread = SDL_CreateThreadEx(&opaque->_enqueue_thread, enqueue_thread_func, node, "amediacodec_input_thread");
//...
while (!q->abort_request) {
//...
//
ret = drain_output_buffer(env, node, timeUs, &dequeue_count, frame, &got_frame);
//...
//
ret = ffp_queue_picture(ffp, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
//...
}
fail:
//...
}
可以看到在视频解码流程里面又创建了一个线程:
opaque->enqueue_thread = SDL_CreateThreadEx(&opaque->_enqueue_thread, enqueue_thread_func, node, "amediacodec_input_thread");
enqueue_thread_func()
static int enqueue_thread_func(void *arg)
{
IJKFF_Pipenode *node = arg;
IJKFF_Pipenode_Opaque *opaque = node->opaque;
FFPlayer *ffp = opaque->ffp;
VideoState *is = ffp->is;
Decoder *d = &is->viddec;
PacketQueue *q = d->queue;
//...
while (!q->abort_request) {
//
ret = feed_input_buffer(env, node, AMC_INPUT_TIMEOUT_US, &dequeue_count);
if (ret != 0) {
goto fail;
}
}
//...
}
feed_input_buffer()
staticint feed_input_buffer(JNIEnv *env, IJKFF_Pipenode *node, int64_t timeUs, int*enqueue_count)
{
if(ffp_packet_queue_get_or_buffering(ffp, d->queue, &pkt,&d->pkt_serial, &d->finished) < 0) { //取得数据包
//...
input_buffer_ptr= SDL_AMediaCodec_getInputBuffer(opaque->acodec, input_buffer_index,&input_buffer_size); //得到硬解码应该输入的buffer的地址
//...
memcpy(input_buffer_ptr,d->pkt_temp.data, copy_size);
//...
amc_ret= SDL_AMediaCodec_queueInputBuffer(opaque->acodec, input_buffer_index, 0,copy_size, time_stamp, 0);
//送到AMediaCodec解码器
//...
}
}
这一步是用硬解码器解码,这个enqueue_thread也就只是把每一帧数据送到解码器去解码而已。
发现真正的解码是重新开一个线程解码,那解码线程是干嘛的呢?在func_run_sync()里面紧接着调用drain_output_buffer()
drain_output_buffer()
|(调用)
drain_output_buffer_l(env, node, timeUs, dequeue_count, frame, got_frame);
drain_output_buffer_l()
static int drain_output_buffer_l(JNIEnv *env, IJKFF_Pipenode *node, int64_t timeUs,int *dequeue_count)
{
//...
output_buffer_index= SDL_AMediaCodec_dequeueOutputBuffer(opaque->acodec, &bufferInfo,timeUs);
//...
}
SDL_AMediaCodec_dequeueOutputBuffer:从硬解码器中获得解码后的数据
然后在func_run_sync()里面会继续执行ffp_queue_picture()把数据插入到显示队列ijkplayer->ffplayer->is->pictq中。
显示线程
解码后的数据会放到is->pictq中,显示线程就是从is->pictq读取数据,然后直接推送给硬件设备,完成渲染。
video_refresh_thread()
video_refresh_thread(void *arg)
|(调用)
video_refresh(ffp, &remaining_time);
|(调用)
video_display2(ffp);
|(调用)
video_image_display2(ffp);
video_image_display2()
static void video_image_display2(FFPlayer *ffp)
{
VideoState *is = ffp->is;
Frame *vp;
vp = frame_queue_peek(&is->pictq);
if (vp->bmp) {
SDL_VoutDisplayYUVOverlay(ffp->vout, vp->bmp);
ffp->stat.vfps = SDL_SpeedSamplerAdd(&ffp->vfps_sampler, FFP_SHOW_VFPS_FFPLAY, "vfps[ffplay]");
if (!ffp->first_video_frame_rendered) {
ffp->first_video_frame_rendered = 1;
ffp_notify_msg1(ffp, FFP_MSG_VIDEO_RENDERING_START);
}
}
}
static Frame *frame_queue_peek(FrameQueue *f)
{
return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
}
从队列中取数据,然后调用SDL_VoutDisplayYUVOverlay(ffp->vout, vp->bmp)完成渲染。
int SDL_VoutDisplayYUVOverlay(SDL_Vout *vout, SDL_VoutOverlay *overlay)
{
if (vout && overlay && vout->display_overlay)
return vout->display_overlay(vout, overlay);
return -1;
}
我们在初始化的时候,在ijkmp_android_create()里面调用的SDL_VoutAndroid_CreateForAndroidSurface()里面有一行代码:
vout->display_overlay = func_display_overlay(;
它其实调用了func_display_overlay()完成渲染,这里主要是从pictq列表中获取视频frame数据,然后再写入nativewindows的视频缓冲中进行渲染。调用OpenGL进行绘制图像。
msg_loop线程
消息循环线程
inline static void post_event(JNIEnv *env, jobject weak_this, int what, int arg1, int arg2)
{
(*env)->CallStaticVoidMethod(env, g_clazz.clazz, g_clazz.jmid_postEventFromNative, weak_this, what, arg1, arg2, NULL );
}
这个函数直接调用java层里面的函数,然后发送一个状态消息。在java里,有一个handler(初始化时候),然后向looper中发送消息,然后handler处理消息,然后做出相应反应。比如之前的:
videoView.setOnPreparedListener(new IMediaPlayer.OnPreparedListener() {
@Override
public void onPrepared(IMediaPlayer mp) {
videoView.start();
}
});
这里是handler处理消息后回调的。