ffmpeg源码跟踪笔记之avformat_open_input

来源:互联网 发布:鲸鱼搁浅的原因知乎 编辑:程序博客网 时间:2024/05/16 15:15

1、函数调用图



2、带注释的源码

int avformat_open_input(AVFormatContext **ps, const char *filename,                        AVInputFormat *fmt, AVDictionary **options){    AVFormatContext *s = *ps;    int ret = 0;    AVDictionary *tmp = NULL;    ID3v2ExtraMeta *id3v2_extra_meta = NULL;    //为context申请空间,并赋初值,为internal申请空间    if (!s && !(s = avformat_alloc_context()))        return AVERROR(ENOMEM);    if (!s->av_class) {        av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");        return AVERROR(EINVAL);    }    if (fmt)//如果指定了AVInputFormat则使用指定的        s->iformat = fmt;    if (options)//把输入参数拷贝一份        av_dict_copy(&tmp, *options, 0);    //如果AVFormatContext的AVClass中有这个参数的话,那就设置参数,AVFormatContext具体有哪些参数可以查看options_table.h里的avformat_options    if ((ret = av_opt_set_dict(s, &tmp)) < 0)        goto fail;    //Open input file and probe the format if necessary    if ((ret = init_input(s, filename, &tmp)) < 0)        goto fail;    s->probe_score = ret;    if (s->format_whitelist && av_match_list(s->iformat->name, s->format_whitelist, ',') <= 0) {        av_log(s, AV_LOG_ERROR, "Format not on whitelist\n");        ret = AVERROR(EINVAL);        goto fail;    }    avio_skip(s->pb, s->skip_initial_bytes);    /* Check filename in case an image number is expected. */    if (s->iformat->flags & AVFMT_NEEDNUMBER) {        if (!av_filename_number_test(filename)) {            ret = AVERROR(EINVAL);            goto fail;        }    }    s->duration = s->start_time = AV_NOPTS_VALUE;    av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));    //为priv_data申请空间并根据传入的输入参数赋值,其实这个priv_data大多数是为了给AVInputFormat的read_header函数传递参数用的    //AVInputFormat具体支持哪些参数就需要具体看AVInputFormat的定义了,拿yuv来举例,查看rawvideodec.c的rawvideo_options就知道yuv具体支持哪些参数了    if (s->iformat->priv_data_size > 0) {        if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {            ret = AVERROR(ENOMEM);            goto fail;        }        if (s->iformat->priv_class) {            *(const AVClass **) s->priv_data = s->iformat->priv_class;            av_opt_set_defaults(s->priv_data);            if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)                goto fail;        }    }    /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */    if (s->pb)//还没研究过为什么老是有关于id3v2的代码        ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta, 0);    //如果有这个read_header函数指针那么就调用,其实就是设置某些初始值,拿yuv来说就是设置宽、高、像素格式这些东西,参数会通过AVFormatContext的priv_data来传递    if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)        if ((ret = s->iformat->read_header(s)) < 0)            goto fail;    if (id3v2_extra_meta) {        if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||            !strcmp(s->iformat->name, "tta")) {            if ((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)                goto fail;        } else            av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");    }    ff_id3v2_free_extra_meta(&id3v2_extra_meta);    if ((ret = avformat_queue_attached_pictures(s)) < 0)        goto fail;    if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->internal->data_offset)        s->internal->data_offset = avio_tell(s->pb);    s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;    if (options) {        av_dict_free(options);        *options = tmp;    }    *ps = s;    return 0;fail:    ff_id3v2_free_extra_meta(&id3v2_extra_meta);    av_dict_free(&tmp);    if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))        avio_closep(&s->pb);    avformat_free_context(s);    *ps = NULL;    return ret;}

3、简单解释

此函数最重要的也就5个点。
第一个点:
avformat_alloc_context申请空间并设默认值。
第二个点:
av_opt_set_dict为AVFormatContext设置输入参数。
个点:
init_input打开文件并把格式匹配出来
第四个点:
av_opt_set_dict为为priv_data申请空间并根据传入的输入参数赋值
第五个点:
read_header为InputFormat初始化,并new stream

4、av_opt_set_dict如何设置值

根据上面的函数调用图知道此函数是通过av_opt_set来设置值的。
看看av_opt_set的代码:
int av_opt_set(void *obj, const char *name, const char *val, int search_flags){    int ret = 0;    void *dst, *target_obj;    const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);    if (!o || !target_obj)        return AVERROR_OPTION_NOT_FOUND;    if (!val && (o->type != AV_OPT_TYPE_STRING &&                 o->type != AV_OPT_TYPE_PIXEL_FMT && o->type != AV_OPT_TYPE_SAMPLE_FMT &&                 o->type != AV_OPT_TYPE_IMAGE_SIZE && o->type != AV_OPT_TYPE_VIDEO_RATE &&                 o->type != AV_OPT_TYPE_DURATION && o->type != AV_OPT_TYPE_COLOR &&                 o->type != AV_OPT_TYPE_CHANNEL_LAYOUT))        return AVERROR(EINVAL);    if (o->flags & AV_OPT_FLAG_READONLY)        return AVERROR(EINVAL);    dst = ((uint8_t*)target_obj) + o->offset;    switch (o->type) {    case AV_OPT_TYPE_STRING:   return set_string(obj, o, val, dst);    case AV_OPT_TYPE_BINARY:   return set_string_binary(obj, o, val, dst);    case AV_OPT_TYPE_FLAGS:    case AV_OPT_TYPE_INT:    case AV_OPT_TYPE_INT64:    case AV_OPT_TYPE_FLOAT:    case AV_OPT_TYPE_DOUBLE:    case AV_OPT_TYPE_RATIONAL: return set_string_number(obj, target_obj, o, val, dst);    case AV_OPT_TYPE_IMAGE_SIZE: return set_string_image_size(obj, o, val, dst);    case AV_OPT_TYPE_VIDEO_RATE: return set_string_video_rate(obj, o, val, dst);    case AV_OPT_TYPE_PIXEL_FMT:  return set_string_pixel_fmt(obj, o, val, dst);    case AV_OPT_TYPE_SAMPLE_FMT: return set_string_sample_fmt(obj, o, val, dst);    case AV_OPT_TYPE_DURATION:        if (!val) {            *(int64_t *)dst = 0;            return 0;        } else {            if ((ret = av_parse_time(dst, val, 1)) < 0)                av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as duration\n", val);            return ret;        }        break;    case AV_OPT_TYPE_COLOR:      return set_string_color(obj, o, val, dst);    case AV_OPT_TYPE_CHANNEL_LAYOUT:        if (!val || !strcmp(val, "none")) {            *(int64_t *)dst = 0;        } else {#if FF_API_GET_CHANNEL_LAYOUT_COMPAT            int64_t cl = ff_get_channel_layout(val, 0);#else            int64_t cl = av_get_channel_layout(val);#endif            if (!cl) {                av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as channel layout\n", val);                ret = AVERROR(EINVAL);            }            *(int64_t *)dst = cl;            return ret;        }        break;    }    av_log(obj, AV_LOG_ERROR, "Invalid option type.\n");    return AVERROR(EINVAL);}
代码大致流程是,先调用av_opt_find2找到与输入参数对应的AVOption和目标对象指针也就是target_obj,具体设置那个值是由offset来确定的:dst = ((uint8_t*)target_obj) + o->offset;根据o->type类型来调用不同的参数达到为目标对象的指定字段设置值的目的。
看一下yuv的option:
static const AVOption rawvideo_options[] = {    { "video_size", "set frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },    { "pixel_format", "set pixel format", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = "yuv420p"}, 0, 0, DEC },    { "framerate", "set frame rate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, DEC },    { NULL },};
如果我们传入video_size 1280x720,那么av_opt_find2会返回&rawvideo_options[0],然后AV_OPT_TYPE_IMAGE_SIZE类型会调用set_string_image_size函数来设置值,看看set_string_image_size函数:
static int set_string_image_size(void *obj, const AVOption *o, const char *val, int *dst){    int ret;    if (!val || !strcmp(val, "none")) {        dst[0] =        dst[1] = 0;        return 0;    }    ret = av_parse_video_size(dst, dst + 1, val);    if (ret < 0)        av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as image size\n", val);    return ret;}
av_parse_video_size函数:
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str){    int i;    int n = FF_ARRAY_ELEMS(video_size_abbrs);    const char *p;    int width = 0, height = 0;    for (i = 0; i < n; i++) {        if (!strcmp(video_size_abbrs[i].abbr, str)) {            width  = video_size_abbrs[i].width;            height = video_size_abbrs[i].height;            break;        }    }    if (i == n) {        width = strtol(str, (void*)&p, 10);        if (*p)            p++;        height = strtol(p, (void*)&p, 10);        /* trailing extraneous data detected, like in 123x345foobar */        if (*p)            return AVERROR(EINVAL);    }    if (width <= 0 || height <= 0)        return AVERROR(EINVAL);    *width_ptr  = width;    *height_ptr = height;    return 0;}
那么这个目标对象是如何来的?
在yuv demuxer定义中:
AVInputFormat ff_rawvideo_demuxer = {    .name           = "rawvideo",    .long_name      = NULL_IF_CONFIG_SMALL("raw video"),    .priv_data_size = sizeof(RawVideoDemuxerContext),    .read_header    = rawvideo_read_header,    .read_packet    = rawvideo_read_packet,    .flags          = AVFMT_GENERIC_INDEX,    .extensions     = "yuv,cif,qcif,rgb",    .raw_codec_id   = AV_CODEC_ID_RAWVIDEO,    .priv_class     = &rawvideo_demuxer_class,};
我们发现有一个RawVideoDemuxerContext的结构,定义如下:
typedef struct RawVideoDemuxerContext {    const AVClass *class;     /**< Class for private options. */    int width, height;        /**< Integers describing video size, set by a private option. */    char *pixel_format;       /**< Set by a private option. */    AVRational framerate;     /**< AVRational describing framerate, set by a private option. */} RawVideoDemuxerContext;
这个结构就包含了yuv demuxer需要的输入参数,来在avformat_open_input的时候,会根据此结构来初始化priv_data,传给av_opt_set_dict的参数就是此结构的指针:
//为priv_data申请空间并根据传入的输入参数赋值,其实这个priv_data大多数是为了给AVInputFormat的read_header函数传递参数用的    //AVInputFormat具体支持哪些参数就需要具体看AVInputFormat的定义了,拿yuv来举例,查看rawvideodec.c的rawvideo_options就知道yuv具体支持哪些参数了    if (s->iformat->priv_data_size > 0) {        if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {            ret = AVERROR(ENOMEM);            goto fail;        }        if (s->iformat->priv_class) {            *(const AVClass **) s->priv_data = s->iformat->priv_class;            av_opt_set_defaults(s->priv_data);            if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)                goto fail;        }    }
这样就成功的把参数设置到priv_data并且传入InputFormatr的read_header函数了,
看看yuv demuxer的rawvideo_read_header代码:
static int rawvideo_read_header(AVFormatContext *ctx){    RawVideoDemuxerContext *s = ctx->priv_data;    enum AVPixelFormat pix_fmt;    AVStream *st;    st = avformat_new_stream(ctx, NULL);    if (!st)        return AVERROR(ENOMEM);    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;    st->codec->codec_id = ctx->iformat->raw_codec_id;    if ((pix_fmt = av_get_pix_fmt(s->pixel_format)) == AV_PIX_FMT_NONE) {        av_log(ctx, AV_LOG_ERROR, "No such pixel format: %s.\n",               s->pixel_format);        return AVERROR(EINVAL);    }    avpriv_set_pts_info(st, 64, s->framerate.den, s->framerate.num);    st->codec->width  = s->width;    st->codec->height = s->height;    st->codec->pix_fmt = pix_fmt;    st->codec->bit_rate = av_rescale_q(avpicture_get_size(st->codec->pix_fmt, s->width, s->height),                                       (AVRational){8,1}, st->time_base);    return 0;}
成功的把AVFormatContext 的priv_data转为RawVideoDemuxerContext结构来做参数了。



0 0
原创粉丝点击