ijkplayer 0.6.0 enable resolution change 代码研究

来源:互联网 发布:mit 算法导论 pdf 编辑:程序博客网 时间:2024/05/19 20:56

代码地址
https://github.com/Bilibili/ijkplayer/blob/e28708feb0ec96b7a701f965b8b43a8a15b1eec9/ijkmedia/ijkplayer/android/pipeline/ffpipenode_android_mediacodec_vdec.c#L371
文中提到的行号以该github页面行号为基准

更新日志

  • 160802
    创建文档
    可以搜索数字来找到对应的改动点
  • 160803
    修正:
    avcodec_open2 中的 ff_set_dimensions() 不会执行,所以不会给coded_width/hight 赋值,即也不会与lowres值有关
  • 160803.2
    修正:
    发现了avctx->width/height的修改位置,通顺了代码的执行
  • 160804
    以后研究可能需要用到的git log
    https://github.com/Bilibili/ijkplayer/commit/eda66e626699c28b17ee3d419b812cacd9d56d6a
    有关surface重绘

目前讨论这一新特性的人太少了,如果有兴趣探讨欢迎邮件联系
可从github页面获取邮箱地址
https://github.com/CzachQ

要点1-Line434

if (opaque->ffp->mediacodec_handle_resolution_change &&                opaque->codecpar->codec_id == AV_CODEC_ID_H264) 

判断本地播放器设置和编码流的编解码器ID

编解码器ID
https://www.ffmpeg.org/doxygen/2.7/group__lavc__core.html#gaadca229ad2c20e060a14fec08a5cc7ce

其中opaque变量定义为:

IJKFF_Pipenode_Opaque *opaque = node->opaque;

IJKFF_Pipenode_Opaque类的定义为:

typedef struct IJKFF_Pipenode_Opaque {
FFPlayer *ffp;
↑https://github.com/Bilibili/ijkplayer/blob/e28708feb0ec96b7a701f965b8b43a8a15b1eec9/ijkmedia/ijkplayer/ff_ffplay_def.h#L498
IJKFF_Pipeline *pipeline;
Decoder *decoder;
SDL_Vout *weak_vout;
ijkmp_mediacodecinfo_context mcc;
jobject jsurface;
SDL_AMediaFormat *input_aformat;
SDL_AMediaCodec *acodec;
SDL_AMediaFormat *output_aformat;
char acodec_name[128];
int frame_width;
int frame_height;
int frame_rotate_degrees;
AVCodecContext *avctx; // not own
AVCodecParameters *codecpar;
AVBitStreamFilterContext *bsfc; // own
#if AMC_USE_AVBITSTREAM_FILTER
uint8_t *orig_extradata;
int orig_extradata_size; #else
size_t nal_size; #endif
SDL_Thread _enqueue_thread;
SDL_Thread *enqueue_thread;
SDL_mutex *acodec_mutex;
SDL_cond *acodec_cond;
volatile bool acodec_flush_request;
volatile bool acodec_reconfigure_request;
SDL_mutex *acodec_first_dequeue_output_mutex;
SDL_cond *acodec_first_dequeue_output_cond;
volatile bool acodec_first_dequeue_output_request;
bool aformat_need_recreate;
SDL_mutex *any_input_mutex;
SDL_cond *any_input_cond;
int input_packet_count;
int input_error_count;
int output_error_count;
bool quirk_reconfigure_with_new_codec;
int n_buf_out;
AMC_Buf_Out *amc_buf_out;
int off_buf_out;
double last_queued_pts;
SDL_SpeedSampler sampler; } IJKFF_Pipenode_Opaque;

opaque->codecpar:

This struct describes the properties of an encoded stream.
这一数结构描述了一个编码流的属性
http://ffmpeg.org/doxygen/3.1/structAVCodecParameters.html


要点2-Line440

if (size_data && size_data_size > AV_INPUT_BUFFER_PADDING_SIZE) 

其中size_data和size_data_size的定义为:

uint8_t *size_data = NULL;
int size_data_size = 0;
size_data = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &size_data_size);

/**
* The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format
* that the extradata buffer was changed and the receiving side should
* act upon it appropriately. The new extradata is embedded in the side
* data buffer and should be immediately used for processing the current
* frame or packet.
*/

根据理解,size_data中如果得到了数据,表示packet中的extradata更新了,接收端需要更新自己的extradata,所以之后会有代码memcpy(new_avctx->extradata, size_data, size_data_size);完成这一操作
extradata中存储的是特定编解码器包含的特殊信息,类型为uint8_t*

补充说明:

av_packet_get_side_data:
Get side information from packet. (a FFmpeg function)

AVPacket *avpkt = &d->pkt_temp;

+(160803)
https://github.com/Bilibili/ijkplayer/commit/5fc8be80d75af50ca9c630b46fd76d82ccffff6c
从这个代码更新log可以看到

if (opaque->codecpar->codec_id == AV_CODEC_ID_H264)
//此时还未增加选项开关,是在之后的代码更新中增加的 //(add option
mediacodec-handle-resolution-change):
//https://github.com/Bilibili/ijkplayer/commit/46eb7dec4f6c478ef81dfb73ed53aa411f834c76
这一判断下的所有代码的新增目的是:

handle new extra data

可以看出,之前的理解是正确的,这段代码的起始条件是extradata有了更新。

注:从另一篇文章中找到有关extradata的注释: http://blog.csdn.net/leixiaohua1020/article/details/14214859
The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger than extradata_size to avoid prolems if it is read with the
bitstream reader.
包括:
mpeg4: global headers (they can be in the bitstream or here)


疑点1

要点2中的判断来源是
node->opaque->ffp(FFPlayer)->is(VideoState)->d(Decoder)->pkt_temp(AVPacket)
这个包中的extradata更新
且这个更新赋值给了new_avctx (AVCodecContext)

//Line451-457new_avctx->extradata = av_mallocz(size_data_size);if (!new_avctx->extradata) {     avcodec_free_context(&new_avctx);     return AVERROR(ENOMEM);} memcpy(new_avctx->extradata, size_data, size_data_size);new_avctx->extradata_size = size_data_size;

而对new_avctx的其他赋值使用的是
node->opaque->codecpar中的数值(要点3中详解)

//line449avcodec_parameters_to_context(new_avctx, opaque->codecpar);//int avcodec_parameters_to_context(AVCodecContext * codec,const AVCodecParameters * par)   //Fill the codec context based on the values from the supplied codec parameters.//code:http://ffmpeg.org/doxygen/3.1/libavcodec_2utils_8c_source.html#l04133

pkt_temp中extradata的更新是目前这些操作的前置条件
那么为什么之后的resolution设置都采用的是codecpar中的数据?
这两个数据来源是同步的么?


要点3-Line449

avcodec_parameters_to_context(new_avctx, opaque->codecpar);

int avcodec_parameters_to_context(AVCodecContext * codec,const AVCodecParameters * par)
Fill the codec context based on the values from the supplied codec parameters.
code:http://ffmpeg.org/doxygen/3.1/libavcodec_2utils_8c_source.html#l04133
其中包含(之后用到):
codec->width = par->width;
codec->height = par->height;


要点4-Line460

ret = avcodec_open2(new_avctx, codec, &codec_opts);

变量codec的定义:

(Line444)
const AVCodec *codec = opaque->decoder->avctx->codec;

方法avcodec_open2的说明:

作用:Initialize the AVCodecContext to use the given AVCodec.
CODE:
http://ffmpeg.org/doxygen/trunk/libavcodec_2utils_8c_source.html#l01208


avcodec_open2包含如下代码(L1221-1227&L1333):
if ((codec && avctx->codec && codec != avctx->codec)) {
av_log(avctx, AV_LOG_ERROR, “This AVCodecContext was allocated for %s, ”
“but %s passed to avcodec_open2()\n”, avctx->codec->name, codec->name);
return AVERROR(EINVAL);
}
if (!codec)
codec = avctx->codec;
//···中间包含很多avctx的空间申请(avctx->internal)和赋值操作(avctx中的一些属性)
avctx->codec = codec;
//avctx->internal : Private context used for internal data.
//Unlike priv_data, this is not codec-specific. It is used in general libavcodec functions.
* 这里有些疑问,因为中间的代码没有注意到这两者发生了什么区别,为什么一定要用两个变量来实现这些代码呢


-(160803)
avcodec_open2包含如下代码(L1298-1301):
if (avctx->coded_width && avctx->coded_height)
ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height);
else if (avctx->width && avctx->height)
ret = ff_set_dimensions(avctx, avctx->width, avctx->height);

+(160803)
这段代码含有前置判断条件,不会执行!!!!
// only call ff_set_dimensions() for non H.264/VP6F/DXV codecs so as not to overwrite previously setup dimensions


-(160803)
变量coded_width/heigth:
Bitstream width / height, may be different from width/height when the decoded frame is cropped before being output or lowres is enabled.

+(160803)
没有使用到这两个变量


int ff_set_dimensions(AVCodecContext *s, int width, int height)
{
int ret = av_image_check_size(width, height, 0, s);
if (ret < 0)
width = height = 0;
s->coded_width = width;
s->coded_height = height;
s->width = AV_CEIL_RSHIFT(width, s->lowres);
s->height = AV_CEIL_RSHIFT(height, s->lowres);
return ret;
}*/


int AVCodecContext::lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size


#define AV_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \ : ((a) + (1<<(b)) - 1) >> (b))
测试这两个式子得到结果相同,都与上面变量lowres变量的作用一致

-(160803)
**注**:
此时new_avctx中的coded_width/heigth中的结果为node->opaque->codecpar中的width/heigth,而width/height中存放的是压缩后的数值。如果lowres为0则两种数值相等。


要点5-Line467

ret = avcodec_decode_video2(new_avctx, frame, &got_picture, avpkt);

作用:
Decode the video frame of size avpkt->size from avpkt->data into picture.

理解:
这里对数据包解码成frame之后,这一段最后释放了frame空间
所以它的目的不是解码,而是确定数据包里确实有图像数据
即got_picture!=0用于之后的代码

+(160803.2)
补充:
这个方法里有对new_avctx的修改,数据来源是avpkt
所以它的目的还有根据数据包内的改动来修改解码器的设定
详情参见:疑点2–ANSWER


要点6-Line473

if (got_picture) {    if (opaque->codecpar->width  != new_avctx->width &&         opaque->codecpar->height != new_avctx->height) {        ALOGW("AV_PKT_DATA_NEW_EXTRADATA: %d x %d\n", new_avctx->width, new_avctx->height);        avcodec_parameters_from_context(opaque->codecpar, new_avctx);        opaque->aformat_need_recreate = true;        ffpipeline_set_surface_need_reconfigure_l(pipeline, true);    }}    av_frame_unref(frame);    avcodec_free_context(&new_avctx);

其中关键的代码为

avcodec_parameters_from_context(opaque->codecpar, new_avctx);opaque->aformat_need_recreate = true;ffpipeline_set_surface_need_reconfigure_l(pipeline, true);

解析-1

avcodec_parameters_from_context(opaque->codecpar, new_avctx);

Fill the parameters struct based on the values from the supplied codec context.
http://ffmpeg.org/doxygen/trunk/libavcodec_2utils_8c_source.html#l04077
这行代码对应之前 要点3 中的代码,二者的赋值方向恰恰相反
此处使用new_avctx为opaque->codecpar赋值


解析-2

opaque->aformat_need_recreate = true;ffpipeline_set_surface_need_reconfigure_l(pipeline, true);

将两个需要进行重置操作的条件码设为true


疑点2(已解决)

在要点6中有判断代码:
if (opaque->codecpar->width != new_avctx->width &&
opaque->codecpar->height != new_avctx->height)

在要点3中的代码所做的赋值操作包含:
new_avctx->height=opaque->codecpar->height;
new_avctx->width=opaque->codecpar->width;

-(160803)
如果要点4中lowres为0即不对源进行压缩分辨率解码
new_avctx->height/width的值不会变化,仍旧等于opaque-codecpar中的值

这样这条if判断得到false不会执行内部的语句
即一开始的size_data有内容、得到了新的解码器数据
但没有把重置操作的条件值设置为true

这是怎么回事呢?


疑点2–ANSWER

+(160803.2)
在执行要点5中的这句代码时:

avcodec_decode_video2(new_avctx, frame, &got_picture, avpkt);

查看avcodec_decode_video2的源码
https://www.ffmpeg.org/doxygen/2.7/libavcodec_2utils_8c_source.html#l02352
可以看到代码(L2376):

apply_param_change(avctx, &tmp);//AVPacket tmp = *avpkt;

apply_param_change 的源码
https://www.ffmpeg.org/doxygen/2.7/libavcodec_2utils_8c_source.html#l02237
可以看到代码(L2277):

if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) {     if (size < 8)         goto fail;     avctx->width  = bytestream_get_le32(&data);     avctx->height = bytestream_get_le32(&data);     size -= 8;     ret = ff_set_dimensions(avctx, avctx->width, avctx->height);     if (ret < 0)         return ret;}//flags = bytestream_get_le32(&data);//data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size);

AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS 是 enum AVSideDataParamChangeFlags 其中之一

enum AVSideDataParamChangeFlags {AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT  = 0x0001,AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE    = 0x0004,AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS     = 0x0008,};

所以,new_avctx中的width和height根据packet中的参数进行了修改
这一改动由 要点6 中的代码传递给了opaque->codecpar


要点7-重设解码器

Line559-566 reconfig surface

if (ffpipeline_is_surface_need_reconfigure_l(pipeline)) {    jobject new_surface = NULL;    // request reconfigure before lock, or never get mutex    ffpipeline_lock_surface(pipeline);        ffpipeline_set_surface_need_reconfigure_l(pipeline, false);    new_surface = ffpipeline_get_surface_as_global_ref_l(env, pipeline);    ffpipeline_unlock_surface(pipeline);    ···

IJKFF_Pipeline *pipeline = opaque->pipeline;

struct IJKFF_Pipeline {
SDL_Class *opaque_class;
IJKFF_Pipeline_Opaque *opaque;
void (*func_destroy) (IJKFF_Pipeline *pipeline);
IJKFF_Pipenode *(*func_open_video_decoder) (IJKFF_Pipeline *pipeline, FFPlayer *ffp);
SDL_Aout *(*func_open_audio_output) (IJKFF_Pipeline *pipeline, FFPlayer *ffp); };

typedef struct IJKFF_Pipeline_Opaque {
FFPlayer *ffp;
SDL_mutex *surface_mutex;
jobject jsurface;
volatile bool is_surface_need_reconfigure;
bool (*mediacodec_select_callback)(void *opaque, ijkmp_mediacodecinfo_context *mcc);
void *mediacodec_select_callback_opaque;
SDL_Vout *weak_vout;
float left_volume;
float right_volume; } IJKFF_Pipeline_Opaque;

int ffpipeline_lock_surface(IJKFF_Pipeline* pipeline){    IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;    return SDL_LockMutex(opaque->surface_mutex);}
jobject ffpipeline_get_surface_as_global_ref_l(JNIEnv *env, IJKFF_Pipeline* pipeline){    if (!check_ffpipeline(pipeline, __func__))        return NULL;    IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;    if (!opaque->surface_mutex)        return NULL;    jobject global_ref = NULL;    if (opaque->jsurface)        global_ref = (*env)->NewGlobalRef(env, opaque->jsurface);    return global_ref;}

Line574-576 recreate format

if (opaque->aformat_need_recreate) {    ALOGI("%s: recreate aformat\n", __func__);    ret = recreate_format_l(env, node);    if (ret) {        ALOGE("amc: recreate_format_l failed\n");        goto fail;    }    opaque->aformat_need_recreate = false;}

recreate_format_l:
https://github.com/Bilibili/ijkplayer/blob/48cf5a2aa7b6c81bfd8ff4a4dc41e566befca689/ijkmedia/ijkplayer/android/pipeline/ffpipenode_android_mediacodec_vdec.c#L163

关键代码:
opaque->input_aformat = SDL_AMediaFormatJava_createVideoFormat(env, opaque->mcc.mime_type, opaque->codecpar->width, opaque->codecpar->height);

Line584-589 reconfig codec

opaque->acodec_reconfigure_request = true;SDL_LockMutex(opaque->acodec_mutex);ret = reconfigure_codec_l(env, node, new_surface);opaque->acodec_reconfigure_request = false;SDL_CondSignal(opaque->acodec_cond);SDL_UnlockMutex(opaque->acodec_mutex);

在reconfigure_codec_l中有以下关键代码
https://github.com/Bilibili/ijkplayer/blob/48cf5a2aa7b6c81bfd8ff4a4dc41e566befca689/ijkmedia/ijkplayer/android/pipeline/ffpipenode_android_mediacodec_vdec.c
(L266 ↓ )
opaque->jsurface = (*env)->NewGlobalRef(env, new_surface);
(L276 ↓ )
opaque->acodec = create_codec_l(env, node);
(L306 ↓ )
amc_ret = SDL_AMediaCodec_configure_surface(env, opaque->acodec, opaque->input_aformat, opaque->jsurface, NULL, 0);
(L313 ↓ )
amc_ret = SDL_AMediaCodec_start(opaque->acodec);
···


要点8-解码

Line625

input_buffer_index = SDL_AMediaCodec_dequeueInputBuffer(opaque->acodec, timeUs);

ssize_t SDL_AMediaCodec_dequeueInputBuffer(SDL_AMediaCodec* acodec, int64_t timeoutUs)
{
assert(acodec->func_dequeueInputBuffer);
return acodec->func_dequeueInputBuffer(acodec, timeoutUs);
}


*** 注:
acodec->func_dequeueInputBuffer =
SDL_AMediaCodecDummy_dequeueInputBuffer;


static ssize_t SDL_AMediaCodecDummy_dequeueInputBuffer(SDL_AMediaCodec* acodec, int64_t timeoutUs)
{
DMY_TRACE(“%s”, func);
return SDL_AMediaCodec_FakeFifo_dequeueInputBuffer(&acodec->opaque->dummy_fifo, timeoutUs);
}


ssize_t SDL_AMediaCodec_FakeFifo_dequeueInputBuffer(SDL_AMediaCodec_FakeFifo* fifo, int64_t timeoutUs)
{
int ret_index = -1;
if (fifo->should_abort)
return SDL_AMEDIA_ERROR_UNKNOWN;
SDL_LockMutex(fifo->mutex);
if (!fifo->should_abort) {
if (fifo->size >= FAKE_BUFFER_QUEUE_SIZE) {
SDL_CondWaitTimeout(fifo->wakeup_enqueue_cond, fifo->mutex, timeoutUs / 1000);
}
if (fifo->size < FAKE_BUFFER_QUEUE_SIZE) {
ret_index = fifo->end;
}
}
SDL_UnlockMutex(fifo->mutex);
if (fifo->should_abort)
return -1;
return ret_index;
}

Line639

copy_size = SDL_AMediaCodec_writeInputData(opaque->acodec, input_buffer_index, d->pkt_temp.data, d->pkt_temp.size);

Line647

time_stamp = d->pkt_temp.pts;

Line656

amc_ret = SDL_AMediaCodec_queueInputBuffer(opaque->acodec, input_buffer_index, 0, copy_size, time_stamp, queue_flags);
0 0
原创粉丝点击