avformat_find_stream_info读数据时的几个缓存数据的流向

来源:互联网 发布:福山机理书淘宝 编辑:程序博客网 时间:2024/04/28 01:04

简介

了解ffmpeg在使用几个读数据的函数时,原始数据是怎样读到内存中,在内存中又是从哪个结构体转到哪个结构体,
我觉得这是很关键的。因为ffmpeg就是处理数据的。只知道几个API的调用,不知道数据的流转方向,对于细致的
处理来说很难。

分析流信息时的数据流转

数据存放基本介绍

此图中涉及到的缓存区位置包括如下:
AVIOContext:AVFormatContext->pb,原始数据都是先进到这里。
AVFormatInternal :AVFormatContext->internal

struct AVFormatInternal {    /**     * Number of streams relevant for interleaving.     * Muxing only.     */    int nb_interleaved_streams;    /**     * This buffer is only needed when packets were already buffered but     * not decoded, for example to get the codec parameters in MPEG     * streams.     */    struct AVPacketList *packet_buffer;    struct AVPacketList *packet_buffer_end;    /* av_seek_frame() support */    int64_t data_offset; /**< offset of the first packet */    /**     * Raw packets from the demuxer, prior to parsing and decoding.     * This buffer is used for buffering packets until the codec can     * be identified, as parsing cannot be done without knowing the     * codec.     */    struct AVPacketList *raw_packet_buffer;    struct AVPacketList *raw_packet_buffer_end;    /**     * Packets split by the parser get queued here.     */    struct AVPacketList *parse_queue;    struct AVPacketList *parse_queue_end;    /**     * Remaining size available for raw_packet_buffer, in bytes.     */#define RAW_PACKET_BUFFER_SIZE 2500000    int raw_packet_buffer_remaining_size;    /**     * Offset to remap timestamps to be non-negative.     * Expressed in timebase units.     * @see AVStream.mux_ts_offset     */    int64_t offset;    /**     * Timebase for the timestamp offset.     */    AVRational offset_timebase;#if FF_API_COMPUTE_PKT_FIELDS2    int missing_ts_warning;#endif    int inject_global_side_data;    int avoid_negative_ts_use_pts;    /**     * Whether or not a header has already been written     */    int header_written;    int write_header_ret;};

raw_packet_buffer和raw_packet_buffer_end:在ff_read_packet()中会用到。这个还没分析清楚
parse_queue和parse_queue_end:在read_frame_internal()中由于读到的数据可能是多包,在此过程中就不能每分析到一包就返回,所以用这个缓存去来存放分析到为一包的数据。
packet_buffer和packet_buffer_end:在read_frame_internal()中会从parse_queue中读数据,将分析到的每包都放到这个链表中。是为了防止数据丢失,给之后的转码过程提供数据。在转码读数据的时候就是先查看这个链表里面有没有数据。

这些链表中存放的都是没有解码的数据。在分析码流的最后会尝试解码,并释放解码后的数据。

数据流转图

这里写图片描述

几个涉及到的函数

以下将对几个函数进行分析。从前到后对数据的开始读入到解码成功做一个梳理。

av_probe_input_buffer函数中的数据流向

av_probe_input_buffer函数中的数据流向
这篇文章对在探查帧格式的过程中,数据的缓存做了分析。这是对输入流的第一步分析。
最后将探查过的数据存放到了AVIOContext *pb的buf_ptr到buf_end中。

avformat_find_stream_info

最后分析完的数据都保存在了packet_buffer和packet_buffer_end中。提供给之后的转码流程。

int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options){    int i, count = 0, ret = 0, j;    int64_t read_size;    AVStream *st;    AVCodecContext *avctx;    AVPacket pkt1, *pkt;    int64_t old_offset  = avio_tell(ic->pb);    // new streams might appear, no options for those    int orig_nb_streams = ic->nb_streams;    int flush_codecs;    int64_t max_analyze_duration = ic->max_analyze_duration;    int64_t max_stream_analyze_duration;    int64_t max_subtitle_analyze_duration;    int64_t probesize = ic->probesize;    int eof_reached = 0;    flush_codecs = probesize > 0;    av_opt_set(ic, "skip_clear", "1", AV_OPT_SEARCH_CHILDREN);    max_stream_analyze_duration = max_analyze_duration;    max_subtitle_analyze_duration = max_analyze_duration;    if (!max_analyze_duration) {        max_stream_analyze_duration =        max_analyze_duration        = 5*AV_TIME_BASE;        max_subtitle_analyze_duration = 30*AV_TIME_BASE;        if (!strcmp(ic->iformat->name, "flv"))            max_stream_analyze_duration = 90*AV_TIME_BASE;        if (!strcmp(ic->iformat->name, "mpeg") || !strcmp(ic->iformat->name, "mpegts"))            max_stream_analyze_duration = 7*AV_TIME_BASE;    }    if (ic->pb)        av_log(ic, AV_LOG_DEBUG, "Before avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d nb_streams:%d\n",               avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count, ic->nb_streams);    for (i = 0; i < ic->nb_streams; i++) {        const AVCodec *codec;        AVDictionary *thread_opt = NULL;        st = ic->streams[i];        avctx = st->internal->avctx;        if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||            st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) {/*            if (!st->time_base.num)                st->time_base = */            if (!avctx->time_base.num)                avctx->time_base = st->time_base;        }        // only for the split stuff        if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE) && st->request_probe <= 0) {            st->parser = av_parser_init(st->codecpar->codec_id);            if (st->parser) {                if (st->need_parsing == AVSTREAM_PARSE_HEADERS) {                    st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;                } else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {                    st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;                }            } else if (st->need_parsing) {                av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "                       "%s, packets or times may be invalid.\n",                       avcodec_get_name(st->codecpar->codec_id));            }        }        /* check if the caller has overridden the codec id */#if FF_API_LAVF_AVCTXFF_DISABLE_DEPRECATION_WARNINGS        if (st->codec->codec_id != st->internal->orig_codec_id) {            st->codecpar->codec_id   = st->codec->codec_id;            st->codecpar->codec_type = st->codec->codec_type;            st->internal->orig_codec_id = st->codec->codec_id;        }FF_ENABLE_DEPRECATION_WARNINGS#endif        if (st->codecpar->codec_id != st->internal->orig_codec_id)            st->internal->orig_codec_id = st->codecpar->codec_id;        ret = avcodec_parameters_to_context(avctx, st->codecpar);        if (ret < 0)            goto find_stream_info_err;        if (st->request_probe <= 0)            st->internal->avctx_inited = 1;        codec = find_decoder(ic, st, st->codecpar->codec_id);        /* Force thread count to 1 since the H.264 decoder will not extract         * SPS and PPS to extradata during multi-threaded decoding. */        av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);        if (ic->codec_whitelist)            av_dict_set(options ? &options[i] : &thread_opt, "codec_whitelist", ic->codec_whitelist, 0);        /* Ensure that subtitle_header is properly set. */        if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE            && codec && !avctx->codec) {            if (avcodec_open2(avctx, codec, options ? &options[i] : &thread_opt) < 0)                av_log(ic, AV_LOG_WARNING,                       "Failed to open codec in av_find_stream_info\n");        }        // Try to just open decoders, in case this is enough to get parameters.        if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {            if (codec && !avctx->codec)                if (avcodec_open2(avctx, codec, options ? &options[i] : &thread_opt) < 0)                    av_log(ic, AV_LOG_WARNING,                           "Failed to open codec in av_find_stream_info\n");        }        if (!options)            av_dict_free(&thread_opt);    }    for (i = 0; i < ic->nb_streams; i++) {#if FF_API_R_FRAME_RATE        ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;#endif        ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;        ic->streams[i]->info->fps_last_dts  = AV_NOPTS_VALUE;    }    read_size = 0;    for (;;) {        int analyzed_all_streams;        if (ff_check_interrupt(&ic->interrupt_callback)) {            ret = AVERROR_EXIT;            av_log(ic, AV_LOG_DEBUG, "interrupted\n");            break;        }        /* check if one codec still needs to be handled */        for (i = 0; i < ic->nb_streams; i++) {            int fps_analyze_framecount = 20;            st = ic->streams[i];            if (!has_codec_parameters(st, NULL))                break;            /* If the timebase is coarse (like the usual millisecond precision             * of mkv), we need to analyze more frames to reliably arrive at             * the correct fps. */            if (av_q2d(st->time_base) > 0.0005)                fps_analyze_framecount *= 2;            if (!tb_unreliable(st->internal->avctx))                fps_analyze_framecount = 0;            if (ic->fps_probe_size >= 0)                fps_analyze_framecount = ic->fps_probe_size;            if (st->disposition & AV_DISPOSITION_ATTACHED_PIC)                fps_analyze_framecount = 0;            /* variable fps and no guess at the real fps */            if (!(st->r_frame_rate.num && st->avg_frame_rate.num) &&                st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {                int count = (ic->iformat->flags & AVFMT_NOTIMESTAMPS) ?                    st->info->codec_info_duration_fields/2 :                    st->info->duration_count;                if (count < fps_analyze_framecount)                    break;            }            if (st->parser && st->parser->parser->split &&                !st->codecpar->extradata)                break;            if (st->first_dts == AV_NOPTS_VALUE &&                !(ic->iformat->flags & AVFMT_NOTIMESTAMPS) &&                st->codec_info_nb_frames < ((st->disposition & AV_DISPOSITION_ATTACHED_PIC) ? 1 : ic->max_ts_probe) &&                (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||                 st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO))                break;        }        analyzed_all_streams = 0;        if (i == ic->nb_streams) {            analyzed_all_streams = 1;            /* NOTE: If the format has no header, then we need to read some             * packets to get most of the streams, so we cannot stop here. */            if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {                /* If we found the info for all the codecs, we can stop. */                ret = count;                av_log(ic, AV_LOG_DEBUG, "All info found\n");                flush_codecs = 0;                break;            }        }        /* We did not get all the codec info, but we read too much data. */        if (read_size >= probesize) {            ret = count;            av_log(ic, AV_LOG_DEBUG,                   "Probe buffer size limit of %"PRId64" bytes reached\n", probesize);            for (i = 0; i < ic->nb_streams; i++)                if (!ic->streams[i]->r_frame_rate.num &&                    ic->streams[i]->info->duration_count <= 1 &&                    ic->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&                    strcmp(ic->iformat->name, "image2"))                    av_log(ic, AV_LOG_WARNING,                           "Stream #%d: not enough frames to estimate rate; "                           "consider increasing probesize\n", i);            break;        }        /* NOTE: A new stream can be added there if no header in file         * (AVFMTCTX_NOHEADER). */        ret = read_frame_internal(ic, &pkt1);        if (ret == AVERROR(EAGAIN))            continue;        if (ret < 0) {            /* EOF or error*/            eof_reached = 1;            break;        }        pkt = &pkt1;        if (!(ic->flags & AVFMT_FLAG_NOBUFFER)) {            ret = add_to_pktbuf(&ic->internal->packet_buffer, pkt,                                &ic->internal->packet_buffer_end, 0);            if (ret < 0)                goto find_stream_info_err;        }        st = ic->streams[pkt->stream_index];        if (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC))            read_size += pkt->size;        avctx = st->internal->avctx;        if (!st->internal->avctx_inited) {            ret = avcodec_parameters_to_context(avctx, st->codecpar);            if (ret < 0)                goto find_stream_info_err;            st->internal->avctx_inited = 1;        }        if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {            /* check for non-increasing dts */            if (st->info->fps_last_dts != AV_NOPTS_VALUE &&                st->info->fps_last_dts >= pkt->dts) {                av_log(ic, AV_LOG_DEBUG,                       "Non-increasing DTS in stream %d: packet %d with DTS "                       "%"PRId64", packet %d with DTS %"PRId64"\n",                       st->index, st->info->fps_last_dts_idx,                       st->info->fps_last_dts, st->codec_info_nb_frames,                       pkt->dts);                st->info->fps_first_dts =                st->info->fps_last_dts  = AV_NOPTS_VALUE;            }            /* Check for a discontinuity in dts. If the difference in dts             * is more than 1000 times the average packet duration in the             * sequence, we treat it as a discontinuity. */            if (st->info->fps_last_dts != AV_NOPTS_VALUE &&                st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&                (pkt->dts - st->info->fps_last_dts) / 1000 >                (st->info->fps_last_dts     - st->info->fps_first_dts) /                (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {                av_log(ic, AV_LOG_WARNING,                       "DTS discontinuity in stream %d: packet %d with DTS "                       "%"PRId64", packet %d with DTS %"PRId64"\n",                       st->index, st->info->fps_last_dts_idx,                       st->info->fps_last_dts, st->codec_info_nb_frames,                       pkt->dts);                st->info->fps_first_dts =                st->info->fps_last_dts  = AV_NOPTS_VALUE;            }            /* update stored dts values */            if (st->info->fps_first_dts == AV_NOPTS_VALUE) {                st->info->fps_first_dts     = pkt->dts;                st->info->fps_first_dts_idx = st->codec_info_nb_frames;            }            st->info->fps_last_dts     = pkt->dts;            st->info->fps_last_dts_idx = st->codec_info_nb_frames;        }        if (st->codec_info_nb_frames>1) {            int64_t t = 0;            int64_t limit;            if (st->time_base.den > 0)                t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);            if (st->avg_frame_rate.num > 0)                t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q));            if (   t == 0                && st->codec_info_nb_frames>30                && st->info->fps_first_dts != AV_NOPTS_VALUE                && st->info->fps_last_dts  != AV_NOPTS_VALUE)                t = FFMAX(t, av_rescale_q(st->info->fps_last_dts - st->info->fps_first_dts, st->time_base, AV_TIME_BASE_Q));            if (analyzed_all_streams)                                limit = max_analyze_duration;            else if (avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) limit = max_subtitle_analyze_duration;            else                                                     limit = max_stream_analyze_duration;            if (t >= limit) {                av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %"PRId64" reached at %"PRId64" microseconds st:%d\n",                       limit,                       t, pkt->stream_index);                if (ic->flags & AVFMT_FLAG_NOBUFFER)                    av_packet_unref(pkt);                break;            }            if (pkt->duration) {                if (avctx->codec_type == AVMEDIA_TYPE_SUBTITLE && pkt->pts != AV_NOPTS_VALUE && pkt->pts >= st->start_time) {                    st->info->codec_info_duration = FFMIN(pkt->pts - st->start_time, st->info->codec_info_duration + pkt->duration);                } else                    st->info->codec_info_duration += pkt->duration;                st->info->codec_info_duration_fields += st->parser && st->need_parsing && avctx->ticks_per_frame ==2 ? st->parser->repeat_pict + 1 : 2;            }        }#if FF_API_R_FRAME_RATE        if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)            ff_rfps_add_frame(ic, st, pkt->dts);#endif        if (st->parser && st->parser->parser->split && !avctx->extradata) {            int i = st->parser->parser->split(avctx, pkt->data, pkt->size);            if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {                avctx->extradata_size = i;                avctx->extradata      = av_mallocz(avctx->extradata_size +                                                   AV_INPUT_BUFFER_PADDING_SIZE);                if (!avctx->extradata)                    return AVERROR(ENOMEM);                memcpy(avctx->extradata, pkt->data,                       avctx->extradata_size);            }        }        /* If still no information, we try to open the codec and to         * decompress the frame. We try to avoid that in most cases as         * it takes longer and uses more memory. For MPEG-4, we need to         * decompress for QuickTime.         *         * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at         * least one frame of codec data, this makes sure the codec initializes         * the channel configuration and does not only trust the values from         * the container. */        try_decode_frame(ic, st, pkt,                         (options && i < orig_nb_streams) ? &options[i] : NULL);        if (ic->flags & AVFMT_FLAG_NOBUFFER)            av_packet_unref(pkt);        st->codec_info_nb_frames++;        count++;    }    if (eof_reached) {        int stream_index;        for (stream_index = 0; stream_index < ic->nb_streams; stream_index++) {            st = ic->streams[stream_index];            avctx = st->internal->avctx;            if (!has_codec_parameters(st, NULL)) {                const AVCodec *codec = find_decoder(ic, st, st->codecpar->codec_id);                if (codec && !avctx->codec) {                    if (avcodec_open2(avctx, codec, (options && stream_index < orig_nb_streams) ? &options[stream_index] : NULL) < 0)                        av_log(ic, AV_LOG_WARNING,                            "Failed to open codec in av_find_stream_info\n");                }            }            // EOF already reached while reading the stream above.            // So continue with reoordering DTS with whatever delay we have.            if (ic->internal->packet_buffer && !has_decode_delay_been_guessed(st)) {                update_dts_from_pts(ic, stream_index, ic->internal->packet_buffer);            }        }    }    if (flush_codecs) {        AVPacket empty_pkt = { 0 };        int err = 0;        av_init_packet(&empty_pkt);        for (i = 0; i < ic->nb_streams; i++) {            st = ic->streams[i];            /* flush the decoders */            if (st->info->found_decoder == 1) {                do {                    err = try_decode_frame(ic, st, &empty_pkt,                                            (options && i < orig_nb_streams)                                            ? &options[i] : NULL);                } while (err > 0 && !has_codec_parameters(st, NULL));                if (err < 0) {                    av_log(ic, AV_LOG_INFO,                        "decoding for stream %d failed\n", st->index);                }            }        }    }    // close codecs which were opened in try_decode_frame()    for (i = 0; i < ic->nb_streams; i++) {        st = ic->streams[i];        avcodec_close(st->internal->avctx);    }    ff_rfps_calculate(ic);    for (i = 0; i < ic->nb_streams; i++) {        st = ic->streams[i];        avctx = st->internal->avctx;        if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {            if (avctx->codec_id == AV_CODEC_ID_RAWVIDEO && !avctx->codec_tag && !avctx->bits_per_coded_sample) {                uint32_t tag= avcodec_pix_fmt_to_codec_tag(avctx->pix_fmt);                if (avpriv_find_pix_fmt(avpriv_get_raw_pix_fmt_tags(), tag) == avctx->pix_fmt)                    avctx->codec_tag= tag;            }            /* estimate average framerate if not set by demuxer */            if (st->info->codec_info_duration_fields &&                !st->avg_frame_rate.num &&                st->info->codec_info_duration) {                int best_fps      = 0;                double best_error = 0.01;                if (st->info->codec_info_duration        >= INT64_MAX / st->time_base.num / 2||                    st->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den ||                    st->info->codec_info_duration        < 0)                    continue;                av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,                          st->info->codec_info_duration_fields * (int64_t) st->time_base.den,                          st->info->codec_info_duration * 2 * (int64_t) st->time_base.num, 60000);                /* Round guessed framerate to a "standard" framerate if it's                 * within 1% of the original estimate. */                for (j = 0; j < MAX_STD_TIMEBASES; j++) {                    AVRational std_fps = { get_std_framerate(j), 12 * 1001 };                    double error       = fabs(av_q2d(st->avg_frame_rate) /                                              av_q2d(std_fps) - 1);                    if (error < best_error) {                        best_error = error;                        best_fps   = std_fps.num;                    }                }                if (best_fps)                    av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,                              best_fps, 12 * 1001, INT_MAX);            }            if (!st->r_frame_rate.num) {                if (    avctx->time_base.den * (int64_t) st->time_base.num                    <= avctx->time_base.num * avctx->ticks_per_frame * (int64_t) st->time_base.den) {                    st->r_frame_rate.num = avctx->time_base.den;                    st->r_frame_rate.den = avctx->time_base.num * avctx->ticks_per_frame;                } else {                    st->r_frame_rate.num = st->time_base.den;                    st->r_frame_rate.den = st->time_base.num;                }            }            if (st->display_aspect_ratio.num && st->display_aspect_ratio.den) {                AVRational hw_ratio = { avctx->height, avctx->width };                st->sample_aspect_ratio = av_mul_q(st->display_aspect_ratio,                                                   hw_ratio);            }        } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {            if (!avctx->bits_per_coded_sample)                avctx->bits_per_coded_sample =                    av_get_bits_per_sample(avctx->codec_id);            // set stream disposition based on audio service type            switch (avctx->audio_service_type) {            case AV_AUDIO_SERVICE_TYPE_EFFECTS:                st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;                break;            case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:                st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;                break;            case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:                st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;                break;            case AV_AUDIO_SERVICE_TYPE_COMMENTARY:                st->disposition = AV_DISPOSITION_COMMENT;                break;            case AV_AUDIO_SERVICE_TYPE_KARAOKE:                st->disposition = AV_DISPOSITION_KARAOKE;                break;            }        }    }    if (probesize)        estimate_timings(ic, old_offset);    av_opt_set(ic, "skip_clear", "0", AV_OPT_SEARCH_CHILDREN);    if (ret >= 0 && ic->nb_streams)        /* We could not have all the codec parameters before EOF. */        ret = -1;    for (i = 0; i < ic->nb_streams; i++) {        const char *errmsg;        st = ic->streams[i];        /* if no packet was ever seen, update context now for has_codec_parameters */        if (!st->internal->avctx_inited) {            if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&                st->codecpar->format == AV_SAMPLE_FMT_NONE)                st->codecpar->format = st->internal->avctx->sample_fmt;            ret = avcodec_parameters_to_context(st->internal->avctx, st->codecpar);            if (ret < 0)                goto find_stream_info_err;        }        if (!has_codec_parameters(st, &errmsg)) {            char buf[256];            avcodec_string(buf, sizeof(buf), st->internal->avctx, 0);            av_log(ic, AV_LOG_WARNING,                   "Could not find codec parameters for stream %d (%s): %s\n"                   "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",                   i, buf, errmsg);        } else {            ret = 0;        }    }    compute_chapters_end(ic);    /* update the stream parameters from the internal codec contexts */    for (i = 0; i < ic->nb_streams; i++) {        st = ic->streams[i];        if (st->internal->avctx_inited) {            int orig_w = st->codecpar->width;            int orig_h = st->codecpar->height;            ret = avcodec_parameters_from_context(st->codecpar, st->internal->avctx);            if (ret < 0)                goto find_stream_info_err;            // The decoder might reduce the video size by the lowres factor.            if (av_codec_get_lowres(st->internal->avctx) && orig_w) {                st->codecpar->width = orig_w;                st->codecpar->height = orig_h;            }        }#if FF_API_LAVF_AVCTXFF_DISABLE_DEPRECATION_WARNINGS        ret = avcodec_parameters_to_context(st->codec, st->codecpar);        if (ret < 0)            goto find_stream_info_err;        // The old API (AVStream.codec) "requires" the resolution to be adjusted        // by the lowres factor.        if (av_codec_get_lowres(st->internal->avctx) && st->internal->avctx->width) {            av_codec_set_lowres(st->codec, av_codec_get_lowres(st->internal->avctx));            st->codec->width = st->internal->avctx->width;            st->codec->height = st->internal->avctx->height;        }        if (st->codec->codec_tag != MKTAG('t','m','c','d'))            st->codec->time_base = st->internal->avctx->time_base;        st->codec->framerate = st->avg_frame_rate;        if (st->internal->avctx->subtitle_header) {            st->codec->subtitle_header = av_malloc(st->internal->avctx->subtitle_header_size);            if (!st->codec->subtitle_header)                goto find_stream_info_err;            st->codec->subtitle_header_size = st->internal->avctx->subtitle_header_size;            memcpy(st->codec->subtitle_header, st->internal->avctx->subtitle_header,                   st->codec->subtitle_header_size);        }        // Fields unavailable in AVCodecParameters        st->codec->coded_width = st->internal->avctx->coded_width;        st->codec->coded_height = st->internal->avctx->coded_height;        st->codec->properties = st->internal->avctx->properties;FF_ENABLE_DEPRECATION_WARNINGS#endif        st->internal->avctx_inited = 0;    }find_stream_info_err:    for (i = 0; i < ic->nb_streams; i++) {        st = ic->streams[i];        if (st->info)            av_freep(&st->info->duration_error);        av_freep(&ic->streams[i]->info);    }    if (ic->pb)        av_log(ic, AV_LOG_DEBUG, "After avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d frames:%d\n",               avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count, count);    return ret;}

ff_read_packet

int ff_read_packet(AVFormatContext *s, AVPacket *pkt){    int ret, i, err;    AVStream *st;    for (;;) {        //首先在这个链表中找。        AVPacketList *pktl = s->internal->raw_packet_buffer;        //如果链表中有包从链表中取,一开始进来时是空的。        if (pktl) {            *pkt = pktl->pkt;            st   = s->streams[pkt->stream_index];            //默认是RAW_PACKET_BUFFER_SIZE=2500000            if (s->internal->raw_packet_buffer_remaining_size <= 0)                if ((err = probe_codec(s, st, NULL)) < 0)                    return err;            if (st->request_probe <= 0) {                s->internal->raw_packet_buffer                 = pktl->next;                s->internal->raw_packet_buffer_remaining_size += pkt->size;                av_free(pktl);                return 0;            }        }        pkt->data = NULL;        pkt->size = 0;        av_init_packet(pkt);        //此函数如果是h264的原始流的话调用的是ff_raw_read_partial_packet,        //在文件libavformat\rawdec.c中        ret = s->iformat->read_packet(s, pkt);        if (ret < 0) {            /* Some demuxers return FFERROR_REDO when they consume               data and discard it (ignored streams, junk, extradata).               We must re-call the demuxer to get the real packet. */            if (ret == FFERROR_REDO)                continue;            if (!pktl || ret == AVERROR(EAGAIN))                return ret;            for (i = 0; i < s->nb_streams; i++) {                st = s->streams[i];                if (st->probe_packets)                    if ((err = probe_codec(s, st, NULL)) < 0)                        return err;                av_assert0(st->request_probe <= 0);            }            continue;        }        if (!pkt->buf) {            AVPacket tmp = { 0 };            ret = av_packet_ref(&tmp, pkt);            if (ret < 0)                return ret;            *pkt = tmp;        }        if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&            (pkt->flags & AV_PKT_FLAG_CORRUPT)) {            av_log(s, AV_LOG_WARNING,                   "Dropped corrupted packet (stream = %d)\n",                   pkt->stream_index);            av_packet_unref(pkt);            continue;        }        if (pkt->stream_index >= (unsigned)s->nb_streams) {            av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);            continue;        }        st = s->streams[pkt->stream_index];        if (update_wrap_reference(s, st, pkt->stream_index, pkt) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {            // correct first time stamps to negative values            if (!is_relative(st->first_dts))                st->first_dts = wrap_timestamp(st, st->first_dts);            if (!is_relative(st->start_time))                st->start_time = wrap_timestamp(st, st->start_time);            if (!is_relative(st->cur_dts))                st->cur_dts = wrap_timestamp(st, st->cur_dts);        }        pkt->dts = wrap_timestamp(st, pkt->dts);        pkt->pts = wrap_timestamp(st, pkt->pts);        force_codec_ids(s, st);        /* TODO: audio: time filter; video: frame reordering (pts != dts) */        if (s->use_wallclock_as_timestamps)            pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);        if (!pktl && st->request_probe <= 0)            return ret;        err = add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,                            &s->internal->raw_packet_buffer_end, 0);        if (err)            return err;        s->internal->raw_packet_buffer_remaining_size -= pkt->size;        if ((err = probe_codec(s, st, pkt)) < 0)            return err;    }}

ff_raw_read_partial_packet

int ff_raw_read_partial_packet(AVFormatContext *s, AVPacket *pkt){    int ret, size;    size = RAW_PACKET_SIZE;    //为pkt分配必要的内存,后面有详解    if (av_new_packet(pkt, size) < 0)        return AVERROR(ENOMEM);    pkt->pos= avio_tell(s->pb);    pkt->stream_index = 0;    //调用在s->pb中注册的读函数将size个数据读入pkt->data中。后面有详解    ret = ffio_read_partial(s->pb, pkt->data, size);    if (ret < 0) {        //pkt是带引用计数的。如果失败就拿这个函数释放。        av_packet_unref(pkt);        return ret;    }    //把返回的ret当pkt中数据的大小进行设置,并将最后的AV_INPUT_BUFFER_PADDING_SIZE个字节设为0.    av_shrink_packet(pkt, ret);    return ret;}

av_new_packet

给AVPacket分配具体放数据的空间,

int av_new_packet(AVPacket *pkt, int size){    AVBufferRef *buf = NULL;    //这个函数做了几个内存分配    /*    AVBufferRef *buf(malloc)    AVBuffer    *buf->buffer(malloc)    uint8_t *buf->buffer->data(malloc)    buf->data = buf->buffer->data    buf->size   = buf->data的size    buf->buffer->refcount =1引用计数    */    int ret = packet_alloc(&buf, size);    if (ret < 0)        return ret;    //最后将所有分配的内存都放到pkt中    av_init_packet(pkt);    pkt->buf      = buf;    pkt->data     = buf->data;    pkt->size     = size;    return 0;}struct AVBuffer {    uint8_t *data; /**< data described by this buffer */    int      size; /**< size of data in bytes */    /**     *  number of existing AVBufferRef instances referring to this buffer     */    volatile int refcount;    /**     * a callback for freeing the data     */    void (*free)(void *opaque, uint8_t *data);    /**     * an opaque pointer, to be used by the freeing callback     */    void *opaque;    /**     * A combination of BUFFER_FLAG_*     */    int flags;};/** * A reference to a data buffer. * * The size of this struct is not a part of the public ABI and it is not meant * to be allocated directly. */typedef struct AVBufferRef {    AVBuffer *buffer;    /**     * The data buffer. It is considered writable if and only if     * this is the only reference to the buffer, in which case     * av_buffer_is_writable() returns 1.     */    uint8_t *data;    /**     * Size of data in bytes.     */    int      size;} AVBufferRef;

avio_tell

/** * ftell() equivalent for AVIOContext. * @return position or AVERROR. */static av_always_inline int64_t avio_tell(AVIOContext *s){    return avio_seek(s, 0, SEEK_CUR);}

ffio_read_partial

最终调用的还是在s中注册的read函数。如果是内存读取如下注册。

AVIOContext * inpb = avio_alloc_context(Buf, BUF_SIZE, 0, dataFromUser, read_data, NULL, NULL);

s->read_packet(s->opaque, buf, size);调用的就是上面注册的read_data函数。

int ffio_read_partial(AVIOContext *s, unsigned char *buf, int size){    int len;    if (size < 0)        return -1;    if (s->read_packet && s->write_flag) {        len = s->read_packet(s->opaque, buf, size);        if (len > 0)            s->pos += len;        return len;    }    //如果s的buff中无数据就用fill_buffer函数读取,并放到s的buff中。具体介绍在《av_probe_input_buffer函数中的数据流向》中有介绍    len = s->buf_end - s->buf_ptr;    if (len == 0) {        /* Reset the buf_end pointer to the start of the buffer, to make sure         * the fill_buffer call tries to read as much data as fits into the         * full buffer, instead of just what space is left after buf_end.         * This avoids returning partial packets at the end of the buffer,         * for packet based inputs.         */        s->buf_end = s->buf_ptr = s->buffer;        fill_buffer(s);        len = s->buf_end - s->buf_ptr;    }    //取数据返回。    if (len > size)        len = size;    memcpy(buf, s->buf_ptr, len);    s->buf_ptr += len;    if (!len) {        if (s->error)      return s->error;        if (avio_feof(s))  return AVERROR_EOF;    }    return len;}

av_shrink_packet

如果读到的比分配的空间小的情况下,需要将pkt中的大小重新设置,并将真是数据的后AV_INPUT_BUFFER_PADDING_SIZE个
字节设为0.防止读越界。

void av_shrink_packet(AVPacket *pkt, int size){    if (pkt->size <= size)        return;    pkt->size = size;    memset(pkt->data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);}

parse_packet

将数据放到av_parser_parse2函数去解析,如果解析成功,并且有返回的包就将包放到s->internal->parse_queue的链表中。在av_parser_parse2中应该有自己的缓存处理之前放入的数据。

/** * Parse a packet, add all split parts to parse_queue. * * @param pkt Packet to parse, NULL when flushing the parser at end of stream. */static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index){    AVPacket out_pkt = { 0 }, flush_pkt = { 0 };    AVStream *st = s->streams[stream_index];    uint8_t *data = pkt ? pkt->data : NULL;    int size      = pkt ? pkt->size : 0;    int ret = 0, got_output = 0;    if (!pkt) {        av_init_packet(&flush_pkt);        pkt        = &flush_pkt;        got_output = 1;    } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {        // preserve 0-size sync packets        compute_pkt_fields(s, st, st->parser, pkt, AV_NOPTS_VALUE, AV_NOPTS_VALUE);    }    while (size > 0 || (pkt == &flush_pkt && got_output)) {        int len;        int64_t next_pts = pkt->pts;        int64_t next_dts = pkt->dts;        av_init_packet(&out_pkt);        len = av_parser_parse2(st->parser, st->internal->avctx,                               &out_pkt.data, &out_pkt.size, data, size,                               pkt->pts, pkt->dts, pkt->pos);        pkt->pts = pkt->dts = AV_NOPTS_VALUE;        pkt->pos = -1;        /* increment read pointer */        data += len;        size -= len;        got_output = !!out_pkt.size;        if (!out_pkt.size)            continue;        if (pkt->side_data) {            out_pkt.side_data       = pkt->side_data;            out_pkt.side_data_elems = pkt->side_data_elems;            pkt->side_data          = NULL;            pkt->side_data_elems    = 0;        }        /* set the duration */        out_pkt.duration = (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? pkt->duration : 0;        if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {            if (st->internal->avctx->sample_rate > 0) {                out_pkt.duration =                    av_rescale_q_rnd(st->parser->duration,                                     (AVRational) { 1, st->internal->avctx->sample_rate },                                     st->time_base,                                     AV_ROUND_DOWN);            }        }        out_pkt.stream_index = st->index;        out_pkt.pts          = st->parser->pts;        out_pkt.dts          = st->parser->dts;        out_pkt.pos          = st->parser->pos;        if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW)            out_pkt.pos = st->parser->frame_offset;        if (st->parser->key_frame == 1 ||            (st->parser->key_frame == -1 &&             st->parser->pict_type == AV_PICTURE_TYPE_I))            out_pkt.flags |= AV_PKT_FLAG_KEY;        if (st->parser->key_frame == -1 && st->parser->pict_type ==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))            out_pkt.flags |= AV_PKT_FLAG_KEY;        compute_pkt_fields(s, st, st->parser, &out_pkt, next_dts, next_pts);        ret = add_to_pktbuf(&s->internal->parse_queue, &out_pkt,                            &s->internal->parse_queue_end, 1);        av_packet_unref(&out_pkt);        if (ret < 0)            goto fail;    }    /* end of the stream => close and free the parser */    if (pkt == &flush_pkt) {        av_parser_close(st->parser);        st->parser = NULL;    }fail:    av_packet_unref(pkt);    return ret;}
0 0