ijkplayer iOS 音视频解码 详解

来源:互联网 发布:node模块化的优缺点 编辑:程序博客网 时间:2024/06/02 07:27


ijkplayer iOS  音视频解码//创建音频解码线程if ((ret = decoder_start(&is->auddec, audio_thread, ffp, "ff_audio_dec")) < 0)//创建视频解码线程if ((ret = decoder_start(&is->viddec, video_thread, ffp, "ff_video_dec")) < 0)不管视频解码还是音频解码,其基本流程都是从解码前的数据缓冲区中取出一帧数据进行解码,完成后放入相应的解码后的数据缓冲区,如下图所示:接收数据 —————>  解码前的数据 -> 解码器 -> 解码后的数据 —————> 渲染(播放)创建存放video/audio解码前数据的videoq/audioq创建存放video/audio解码后数据的pictq/sampq/* start video display */    if (frame_queue_init(&is->pictq, &is->videoq, ffp->pictq_size, 1) < 0)        goto fail;    if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)        goto fail;    if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)        goto fail;FrameQueue video/audio解码后数据的pictq/sampqPacketQueue *pktq video/audio解码前数据的videoq/audioqstatic int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)//注意以上解码前的数据类型和解码后的数据类型均在VideoState结构体中包含 typedef struct VideoState {     ……//解码后的数据队列    FrameQueue pictq;    FrameQueue subpq;    FrameQueue sampq;     ……//解码器    Decoder auddec;    Decoder viddec;    Decoder subdec;     ……    int audio_stream;     ……//解码前的数据队列    PacketQueue audioq;    PacketQueue subtitleq;    PacketQueue videoq;     ……    PacketQueue *buffer_indicator_queue;     ……} VideoState;static IJKFF_Pipenode *func_open_video_decoder(IJKFF_Pipeline *pipeline, FFPlayer *ffp){    IJKFF_Pipenode* node = NULL;    IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;    if (ffp->videotoolbox) {        //这里如果是采用硬解码 使用videotoolbox来硬解码        node = ffpipenode_create_video_decoder_from_ios_videotoolbox(ffp);        if (!node)            ALOGE("vtb fail!!! switch to ffmpeg decode!!!! \n");    }    if (node == NULL) {        //如果没有设置硬解码 采用FFmpeg来软解        node = ffpipenode_create_video_decoder_from_ffplay(ffp);        ffp->stat.vdec_type = FFP_PROPV_DECODER_AVCODEC;        opaque->is_videotoolbox_open = false;    } else {        ffp->stat.vdec_type = FFP_PROPV_DECODER_VIDEOTOOLBOX;        opaque->is_videotoolbox_open = true;    }    ffp_notify_msg2(ffp, FFP_MSG_VIDEO_DECODER_OPEN, opaque->is_videotoolbox_open);    return node;}//创建Video软解码ffpipenode_create_video_decoder_from_ffplay//创建Video硬解码IJKFF_Pipenode *ffpipenode_create_video_decoder_from_ios_videotoolbox(FFPlayer *ffp)//软解码IJKFF_Pipenode *ffpipenode_create_video_decoder_from_ffplay(FFPlayer *ffp){    IJKFF_Pipenode *node = ffpipenode_alloc(sizeof(IJKFF_Pipenode_Opaque));    if (!node)        return node;    IJKFF_Pipenode_Opaque *opaque = node->opaque;    opaque->ffp         = ffp;    node->func_destroy  = func_destroy;    node->func_run_sync = func_run_sync;    ffp_set_video_codec_info(ffp, AVCODEC_MODULE_NAME, avcodec_get_name(ffp->is->viddec.avctx->codec_id));    ffp->stat.vdec_type = FFP_PROPV_DECODER_AVCODEC;    return node;}//硬解码IJKFF_Pipenode *ffpipenode_create_video_decoder_from_ios_videotoolbox(FFPlayer *ffp){    if (!ffp || !ffp->is)        return NULL;    if ([[[UIDevice currentDevice] systemVersion] floatValue]  < 8.0){        return NULL;    }    IJKFF_Pipenode *node = ffpipenode_alloc(sizeof(IJKFF_Pipenode_Opaque));    if (!node)        return node;    memset(node, sizeof(IJKFF_Pipenode), 0);    VideoState            *is         = ffp->is;    IJKFF_Pipenode_Opaque *opaque     = node->opaque;    node->func_destroy  = func_destroy;    node->func_run_sync = func_run_sync;    opaque->ffp         = ffp;    opaque->decoder     = &is->viddec;    opaque->avctx = opaque->decoder->avctx;    switch (opaque->avctx->codec_id) {    case AV_CODEC_ID_H264:            if (ffp->vtb_async)                opaque->context = Ijk_VideoToolbox_Async_Create(ffp, opaque->avctx);            else                opaque->context = Ijk_VideoToolbox_Sync_Create(ffp, opaque->avctx);        break;    default:        ALOGI("Videotoolbox-pipeline:open_video_decoder: not H264\n");        goto fail;    }    if (opaque->context == NULL) {        ALOGE("could not init video tool box decoder !!!");        goto fail;    }    return node;fail:    ffpipenode_free_p(&node);    return NULL;}"AVPacket"是一个结构体,里面装的是h.264"AVFream"里面装的是yuv数据AVPacket —> Decode —> AVFream视频解码线程视频解码线程 video_thread视频解码线程static int video_thread(void *arg){    FFPlayer *ffp = (FFPlayer *)arg;    int       ret = 0;    if (ffp->node_vdec) {        ret = ffpipenode_run_sync(ffp->node_vdec);    }    return ret;}int ffpipenode_run_sync(IJKFF_Pipenode *node){    return node->func_run_sync(node);}//其中无论是创建软解码还是硬解码都会指定node->func_run_sync = func_run_sync;//这个函数当为软解码时IJKFF_Pipenode node->func_run_sync 指向的是ffpipenode_ffplay_vdec.c里面的func_run_sync函数当为硬解码时IJKFF_Pipenode node->func_run_sync 指向的是ffpipenode_ios_videotoolbox_vdec.m里面的func_run_sync函数此处体现了C中函数指针的重要性 linux内核代码中函数指针的用法到处可见 哈哈 这里的用法只是小case 哈哈先分析软解码static int func_run_sync(IJKFF_Pipenode *node){    IJKFF_Pipenode_Opaque *opaque = node->opaque;    return ffp_video_thread(opaque->ffp);}int ffp_video_thread(FFPlayer *ffp){    return ffplay_video_thread(ffp);}static int ffplay_video_thread(void *arg){     ……    for (;;) {        //get_video_frame中调用了decoder_decode_frame会进行解码        ret = get_video_frame(ffp, frame);……        ret = av_buffersrc_add_frame(filt_in, frame);……            //将解码后的数据送入pictq。            ret = queue_picture(ffp, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);……}}//将解码后的帧数据送入pictqstatic int queue_picture(FFPlayer *ffp, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial){…….        // FIXME: set swscale options        if (SDL_VoutFillFrameYUVOverlay(vp->bmp, src_frame) < 0) {            av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");            exit(1);        }…….frame_queue_push(&is->pictq);…….}//将视频做全部填充int SDL_VoutFillFrameYUVOverlay(SDL_VoutOverlay *overlay, const AVFrame *frame){    if (!overlay || !overlay->func_fill_frame)        return -1;    return overlay->func_fill_frame(overlay, frame);}//进行图像填充static int func_fill_frame(SDL_VoutOverlay *overlay, const AVFrame *frame){…….//进行图像缩放和格式转换,该函数可以使用各种不同算法来对图像进行处理        sws_scale(opaque->img_convert_ctx, (const uint8_t**) frame->data, frame->linesize,                  0, frame->height, swscale_dst_pic.data, swscale_dst_pic.linesize);…….}再次分析硬解码//硬解码器的创建IJKFF_Pipenode *ffpipenode_create_video_decoder_from_ios_videotoolbox(FFPlayer *ffp){    if (!ffp || !ffp->is)        return NULL;    if ([[[UIDevice currentDevice] systemVersion] floatValue]  < 8.0){        return NULL;    }    IJKFF_Pipenode *node = ffpipenode_alloc(sizeof(IJKFF_Pipenode_Opaque));    if (!node)        return node;    memset(node, sizeof(IJKFF_Pipenode), 0);    VideoState            *is         = ffp->is;    IJKFF_Pipenode_Opaque *opaque     = node->opaque;    node->func_destroy  = func_destroy;    node->func_run_sync = func_run_sync;    opaque->ffp         = ffp;    opaque->decoder     = &is->viddec;    opaque->avctx = opaque->decoder->avctx;    switch (opaque->avctx->codec_id) {    case AV_CODEC_ID_H264:            if (ffp->vtb_async)                opaque->context = Ijk_VideoToolbox_Async_Create(ffp, opaque->avctx);            else                opaque->context = Ijk_VideoToolbox_Sync_Create(ffp, opaque->avctx);        break;    default:        ALOGI("Videotoolbox-pipeline:open_video_decoder: not H264\n");        goto fail;    }    if (opaque->context == NULL) {        ALOGE("could not init video tool box decoder !!!");        goto fail;    }    return node;fail:    ffpipenode_free_p(&node);    return NULL;}//此处创建的是同步解码器opaque->context = Ijk_VideoToolbox_Sync_Create(ffp, opaque->avctx);Ijk_VideoToolBox *Ijk_VideoToolbox_Sync_Create(FFPlayer* ffp, AVCodecContext* ic) {    return Ijk_VideoToolbox_CreateInternal(0, ffp, ic);}inline static Ijk_VideoToolBox *Ijk_VideoToolbox_CreateInternal(int async, FFPlayer* ffp, AVCodecContext* ic){    Ijk_VideoToolBox *vtb = (Ijk_VideoToolBox*) mallocz(sizeof(Ijk_VideoToolBox));    if (!vtb)        return NULL;    if (async) {        //创建异步解码器        vtb->opaque = videotoolbox_async_create(ffp, ic);        vtb->decode_frame = videotoolbox_async_decode_frame;        vtb->free = videotoolbox_async_free;    } else {        //创建同步解码器        vtb->opaque = videotoolbox_sync_create(ffp, ic);        vtb->decode_frame = videotoolbox_sync_decode_frame;        vtb->free = videotoolbox_sync_free;    }    if (!vtb->opaque) {        freep((void **)&vtb);        return NULL;    }    return vtb;}//创建硬解码同步解码器Ijk_VideoToolBox_Opaque* videotoolbox_sync_create(FFPlayer* ffp, AVCodecContext* avctx){    ......    context_vtb->vt_session = vtbsession_create(context_vtb);    ......}//硬件解码器创建函数//解码后的数据回调函数地址outputCallback.decompressionOutputCallback = VTDecoderCallback;static VTDecompressionSessionRef vtbsession_create(Ijk_VideoToolBox_Opaque* context){    FFPlayer *ffp = context->ffp;    int       ret = 0;    int       width  = context->codecpar->width;    int       height = context->codecpar->height;    VTDecompressionSessionRef vt_session = NULL;    CFMutableDictionaryRef destinationPixelBufferAttributes;    VTDecompressionOutputCallbackRecord outputCallback;    OSStatus status;    ret = vtbformat_init(&context->fmt_desc, context->codecpar);    if (ffp->vtb_max_frame_width > 0 && width > ffp->vtb_max_frame_width) {        double w_scaler = (float)ffp->vtb_max_frame_width / width;        width = ffp->vtb_max_frame_width;        height = height * w_scaler;    }    ALOGI("after scale width %d height %d \n", width, height);    destinationPixelBufferAttributes = CFDictionaryCreateMutable(                                                                 NULL,                                                                 0,                                                                 &kCFTypeDictionaryKeyCallBacks,                                                                 &kCFTypeDictionaryValueCallBacks);    CFDictionarySetSInt32(destinationPixelBufferAttributes,                          kCVPixelBufferPixelFormatTypeKey, kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange);    CFDictionarySetSInt32(destinationPixelBufferAttributes,                          kCVPixelBufferWidthKey, width);    CFDictionarySetSInt32(destinationPixelBufferAttributes,                          kCVPixelBufferHeightKey, height);    CFDictionarySetBoolean(destinationPixelBufferAttributes,                          kCVPixelBufferOpenGLESCompatibilityKey, YES);//此处会指定解码后的数据回调函数    outputCallback.decompressionOutputCallback = VTDecoderCallback;    outputCallback.decompressionOutputRefCon = context  ;    status = VTDecompressionSessionCreate(                                          kCFAllocatorDefault,                                          context->fmt_desc.fmt_desc,                                          NULL,                                          destinationPixelBufferAttributes,                                          &outputCallback,                                          &vt_session);    if (status != noErr) {        NSError* error = [NSError errorWithDomain:NSOSStatusErrorDomain code:status userInfo:nil];        NSLog(@"Error %@", [error description]);        ALOGI("%s - failed with status = (%d)", __FUNCTION__, (int)status);    }    CFRelease(destinationPixelBufferAttributes);    memset(&context->sample_info, 0, sizeof(struct sample_info));    return vt_session;}vtb->decode_frame = videotoolbox_sync_decode_frame//设置解码函数指针//此处可以发现硬解码的decode_frame函数指针指向 videotoolbox_sync_decode_frame 函数vtb->decode_frame = videotoolbox_sync_decode_frame;当为硬解码时static int func_run_sync(IJKFF_Pipenode *node){    IJKFF_Pipenode_Opaque *opaque = node->opaque;    int ret = videotoolbox_video_thread(opaque);    if (opaque->context) {        opaque->context->free(opaque->context->opaque);        free(opaque->context);        opaque->context = NULL;    }    return ret;}int videotoolbox_video_thread(void *arg){    IJKFF_Pipenode_Opaque* opaque = (IJKFF_Pipenode_Opaque*) arg;    FFPlayer *ffp = opaque->ffp;    VideoState *is = ffp->is;    Decoder   *d = &is->viddec;    int ret = 0;    for (;;) {        if (is->abort_request || d->queue->abort_request) {            return -1;        }        @autoreleasepool {//这里的decode_frame函数指针指向 videotoolbox_sync_decode_frame 函数            ret = opaque->context->decode_frame(opaque->context->opaque);        }        if (ret < 0)            goto the_end;        if (!ret)            continue;        if (ret < 0)            goto the_end;    }the_end:    return 0;}//视频同步硬解码int videotoolbox_sync_decode_frame(Ijk_VideoToolBox_Opaque* context){    ......//获取解码前的数据包AVPacket        if (!d->packet_pending || d->queue->serial != d->pkt_serial) {            AVPacket pkt;            do {                if (d->queue->nb_packets == 0)                    SDL_CondSignal(d->empty_queue_cond);                ffp_video_statistic_l(ffp);                if (ffp_packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0)                    return -1;                if (ffp_is_flush_packet(&pkt)) {                    avcodec_flush_buffers(d->avctx);                    context->refresh_request = true;                    context->serial += 1;                    d->finished = 0;                    ALOGI("flushed last keyframe pts %lld \n",d->pkt.pts);                    d->next_pts = d->start_pts;                    d->next_pts_tb = d->start_pts_tb;                }            } while (ffp_is_flush_packet(&pkt) || d->queue->serial != d->pkt_serial);            av_packet_split_side_data(&pkt);            av_packet_unref(&d->pkt);            d->pkt_temp = d->pkt = pkt;            d->packet_pending = 1;        }//解码        ret = decode_video(context, d->avctx, &d->pkt_temp, &got_frame);    ......}static int decode_video(Ijk_VideoToolBox_Opaque* context, AVCodecContext *avctx, AVPacket *avpkt, int* got_picture_ptr){    ......//iOS硬件解码VideoToolbox在应用中进入后台VTDecompressionSession失效 此时需要重置解码器        //context->refresh_session = true; 此标志位置为YES会重新创建解码器    if (context->refresh_session) {        ret = 0;        vtbsession_destroy(context);        memset(&context->sample_info, 0, sizeof(struct sample_info));//重新创建硬件解码器        context->vt_session = vtbsession_create(context);        if (!context->vt_session)            return -1;        if ((context->m_buffer_deep > 0) &&            ff_avpacket_i_or_idr(&context->m_buffer_packet[0], context->idr_based_identified) == true ) {            for (int i = 0; i < context->m_buffer_deep; i++) {                AVPacket* pkt = &context->m_buffer_packet[i];                ret = decode_video_internal(context, avctx, pkt, got_picture_ptr);            }        } else {            context->recovery_drop_packet = true;            ret = -1;            ALOGE("recovery error!!!!\n");        }        context->refresh_session = false;        return ret;    }//decode_video_internal 内部解码函数    return decode_video_internal(context, avctx, avpkt, got_picture_ptr);}static int decode_video_internal(Ijk_VideoToolBox_Opaque* context, AVCodecContext *avctx, const AVPacket *avpkt, int* got_picture_ptr){    ......//解码 由于在之前创建硬件解码器vtbsession_create函数中指定了 解码回调函数 outputCallback.decompressionOutputCallback = VTDecoderCallback;//    status = VTDecompressionSessionDecodeFrame(context->vt_session, sample_buff, decoder_flags, (void*)sample_info, 0);    if (status == noErr) {        if (ffp->is->videoq.abort_request)            goto failed;    }    if (status != 0) {        ALOGE("decodeFrame %d %s\n", (int)status, vtb_get_error_string(status));        //iOS硬件解码VideoToolbox在应用中进入后台VTDecompressionSession失效 此时需要重置解码器        //context->refresh_session = true; 此标志位置为YES会重新创建解码器        if (status == kVTInvalidSessionErr) {            context->refresh_session = true;        }        if (status == kVTVideoDecoderMalfunctionErr) {            context->recovery_drop_packet = true;            context->refresh_session = true;        }        goto failed;    }    ......}static void VTDecoderCallback(void *decompressionOutputRefCon,                       void *sourceFrameRefCon,                       OSStatus status,                       VTDecodeInfoFlags infoFlags,                       CVImageBufferRef imageBuffer,                       CMTime presentationTimeStamp,                       CMTime presentationDuration){    ......        if (ctx->new_seg_flag) {            ALOGI("new seg process!!!!");            while (ctx->m_queue_depth > 0) {                QueuePicture(ctx);            }            ctx->new_seg_flag = false;        }    ......        if ((ctx->m_queue_depth > ctx->fmt_desc.max_ref_frames)) {            QueuePicture(ctx);        }    ......}static void QueuePicture(Ijk_VideoToolBox_Opaque* ctx) {    ......        ffp_queue_picture(ctx->ffp, &picture, pts, duration, 0, ctx->ffp->is->viddec.pkt_serial);    ......}int ffp_queue_picture(FFPlayer *ffp, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial){    return queue_picture(ffp, src_frame, pts, duration, pos, serial);}//frame_queue_push(&is->pictq);//将解码后的数据放入pictq队列内static int queue_picture(FFPlayer *ffp, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial){    ......    /* if the frame is not skipped, then display it */    if (vp->bmp) {    ......        frame_queue_push(&is->pictq);    ......    }    return 0;}//视频的渲染(视频显示部分)从pictq队列内获取解码后的数据再显示SDL_VoutIos_CreateForGLES2//iOS端视频渲染SDL_VoutDummy_Create//虚拟视频渲染 对iOS端没用IjkMediaPlayer *ijkmp_ios_create(int (*msg_loop)(void*)){    IjkMediaPlayer *mp = ijkmp_create(msg_loop);    if (!mp)        goto fail;//iOS 视频渲染线程创建    mp->ffplayer->vout = SDL_VoutIos_CreateForGLES2();    if (!mp->ffplayer->vout)        goto fail;    mp->ffplayer->pipeline = ffpipeline_create_from_ios(mp->ffplayer);    if (!mp->ffplayer->pipeline)        goto fail;    return mp;fail:    ijkmp_dec_ref_p(&mp);    return NULL;}SDL_Vout *SDL_VoutIos_CreateForGLES2(){    SDL_Vout *vout = SDL_Vout_CreateInternal(sizeof(SDL_Vout_Opaque));    if (!vout)        return NULL;    SDL_Vout_Opaque *opaque = vout->opaque;    opaque->gl_view = nil;    vout->create_overlay = vout_create_overlay;    vout->free_l = vout_free_l;    vout->display_overlay = vout_display_overlay;    return vout;}//此处的vout->display_overlay 函数指针指向ijksdl_vout_ios_gles2.c里面的static int vout_display_overlay(SDL_Vout *vout, SDL_VoutOverlay *overlay)函数static int vout_display_overlay(SDL_Vout *vout, SDL_VoutOverlay *overlay){    @autoreleasepool {        SDL_LockMutex(vout->mutex);        int retval = vout_display_overlay_l(vout, overlay);        SDL_UnlockMutex(vout->mutex);        return retval;    }}//解码后的视频最终被送到 这个函数进行显示static int vout_display_overlay_l(SDL_Vout *vout, SDL_VoutOverlay *overlay){    SDL_Vout_Opaque *opaque = vout->opaque;    IJKSDLGLView *gl_view = opaque->gl_view;    if (!gl_view) {        ALOGE("vout_display_overlay_l: NULL gl_view\n");        return -1;    }    if (!overlay) {        ALOGE("vout_display_overlay_l: NULL overlay\n");        return -1;    }    if (overlay->w <= 0 || overlay->h <= 0) {        ALOGE("vout_display_overlay_l: invalid overlay dimensions(%d, %d)\n", overlay->w, overlay->h);        return -1;    }    [gl_view display:overlay];    return 0;}通过上面的分析我们 可以明白ijkplayer视频处理流程为:iOS平台上采用OpenGL渲染解码后的YUV图像,渲染线程为video_refresh_thread,最后渲染图像的方法为video_image_display2视频的处理流程 在decoder_decode_frame 方法中从解码前的video queue中取出一帧数据,送入decoder进行解码,解码后的数据在ffplay_video_thread中送入pictq解码后的数据被送到pictq后 我们观察视频渲染线程的static void video_image_display2(FFPlayer *ffp)函数vp = frame_queue_peek_last(&is->pictq);    ......    SDL_VoutDisplayYUVOverlay(ffp->vout, vp->bmp);video_image_display2函数会取出最新的解码后的视频数据然后交给SDL通过openGL来进行渲染


原创粉丝点击