[Android N]MediaRecorder系列之StagefrightRecorder录制TS流flow(一)

来源:互联网 发布:云计算工程师工资多少 编辑:程序博客网 时间:2024/05/01 13:58

mediaRecorder参数设置

要录制出ts的码流,需要设置如下

      //设置audio的采集源     mRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);       // 设置从摄像头采集图像       mRecorder.setVideoSource(MediaRecorder.VideoSource.CAMERA);       // 设置视频文件的输出格式       // 必须在设置声音编码格式、图像编码格式之前设置       mRecorder.setOutputFormat(OUTPUT_FORMAT_MPEG2TS);       // 设置声音编码的格式       mRecorder.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);       // 设置图像编码的格式       mRecorder.setVideoEncoder(MediaRecorder.VideoEncoder.H264);      //设置文件保存的路径     mRecorder.setOutputFile(videoFile.getAbsolutePath());      //多camera系统     mRecorder.setCamera()     mRecorder.setParameters() 

下面先看video的设置flow

setVideoSource

这个函数设定了mVideoSource是哪个source,这个变量在后面会被用到

status_t StagefrightRecorder::setVideoSource(video_source vs) {    ALOGV("setVideoSource: %d", vs);    if (vs < VIDEO_SOURCE_DEFAULT ||        vs >= VIDEO_SOURCE_LIST_END) {        ALOGE("Invalid video source: %d", vs);        return BAD_VALUE;    }    if (vs == VIDEO_SOURCE_DEFAULT) {        mVideoSource = VIDEO_SOURCE_CAMERA;    } else {        mVideoSource = vs;    }    return OK;}

设置video的encoder是哪一个

status_t StagefrightRecorder::setVideoEncoder(video_encoder ve) {    ALOGV("setVideoEncoder: %d", ve);    if (ve < VIDEO_ENCODER_DEFAULT ||        ve >= VIDEO_ENCODER_LIST_END) {        ALOGE("Invalid video encoder: %d", ve);        return BAD_VALUE;    }    mVideoEncoder = ve;    return OK;}

设置输出的格式是哪一种,mOutputFormat在后面能用到

status_t StagefrightRecorder::setOutputFormat(output_format of) {    ALOGV("setOutputFormat: %d", of);    if (of < OUTPUT_FORMAT_DEFAULT ||        of >= OUTPUT_FORMAT_LIST_END) {        ALOGE("Invalid output format: %d", of);        return BAD_VALUE;    }    if (of == OUTPUT_FORMAT_DEFAULT) {        mOutputFormat = OUTPUT_FORMAT_THREE_GPP;    } else {        mOutputFormat = of;    }    return OK;}

prepare()

mediarecorder执行prepare,会将会把前面的东西都用到。

status_t StagefrightRecorder::prepare() {    if (mVideoSource == VIDEO_SOURCE_SURFACE) {        return prepareInternal();    }    return OK;}
status_t StagefrightRecorder::prepareInternal() {    ALOGV("prepare");    if (mOutputFd < 0) {        ALOGE("Output file descriptor is invalid");        return INVALID_OPERATION;    }    // Get UID and PID here for permission checking    mClientUid = IPCThreadState::self()->getCallingUid();    mClientPid = IPCThreadState::self()->getCallingPid();    switch (mOutputFormat) {        ....        case OUTPUT_FORMAT_MPEG2TS:            status = setupMPEG2TSRecording();            break;        ....    }    ALOGV("Recording frameRate: %d captureFps: %f",            mFrameRate, mCaptureFps);    return status;}

说实话,这段code写的逻辑是怪怪的。主要的功能,其实就是给writer.addSource()添加两个source,一个是audioSource,另外一个是cameraSource。

status_t StagefrightRecorder::setupMPEG2TSRecording() {    CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_MPEG2TS);    //创建了一个,TS打包器,这里传递了一个参数,mOutputFd,写到某个文件目录    sp<MediaWriter> writer = new MPEG2TSWriter(mOutputFd);    //设置audio相关的    if (mAudioSource != AUDIO_SOURCE_CNT) {        if (mAudioEncoder != AUDIO_ENCODER_AAC &&            mAudioEncoder != AUDIO_ENCODER_HE_AAC &&            mAudioEncoder != AUDIO_ENCODER_AAC_ELD) {            return ERROR_UNSUPPORTED;        }        //①设置audioEncoder,因为ts流是需要audio的加入的,这个函数创建audioSource赋给writer        status_t err = setupAudioEncoder(writer);        if (err != OK) {            return err;        }    }    //设置video相关的    if (mVideoSource < VIDEO_SOURCE_LIST_END) {        if (mVideoEncoder != VIDEO_ENCODER_H264) {            ALOGE("MPEG2TS recording only supports H.264 encoding!");            return ERROR_UNSUPPORTED;        }        sp<MediaSource> mediaSource;        //②设置mediaSource,videosource,即设置camerasource        status_t err = setupMediaSource(&mediaSource);        if (err != OK) {            return err;        }        sp<MediaCodecSource> encoder;        //③设置mediaSource对应的encoder,即让MediaCodecSource指向camerasource        err = setupVideoEncoder(mediaSource, &encoder);        if (err != OK) {            return err;        }        //④把encoder赋值给ts打包器        writer->addSource(encoder);        mVideoEncoderSource = encoder;    }    if (mMaxFileDurationUs != 0) {        writer->setMaxFileDuration(mMaxFileDurationUs);    }    if (mMaxFileSizeBytes != 0) {        writer->setMaxFileSize(mMaxFileSizeBytes);    }    mWriter = writer;    return OK;}

① status_t err = setupAudioEncoder(writer)

status_t StagefrightRecorder::setupAudioEncoder(const sp<MediaWriter>& writer) {    status_t status = BAD_VALUE;    if (OK != (status = checkAudioEncoderCapabilities())) {        return status;    }    switch(mAudioEncoder) {        case AUDIO_ENCODER_AMR_NB:        case AUDIO_ENCODER_AMR_WB:        case AUDIO_ENCODER_AAC:        case AUDIO_ENCODER_HE_AAC:        case AUDIO_ENCODER_AAC_ELD:            break;        default:            ALOGE("Unsupported audio encoder: %d", mAudioEncoder);            return UNKNOWN_ERROR;    }    //创建一个MediaCodecSource对象audioEncoder,给后面的addSource用,MediaCodecSource用来装audioSource    sp<MediaCodecSource> audioEncoder = createAudioSource();    if (audioEncoder == NULL) {        return UNKNOWN_ERROR;    }    writer->addSource(audioEncoder);    mAudioEncoderSource = audioEncoder;    return OK;}

②setupMediaSource(&mediaSource);
这里写图片描述

③ err = setupVideoEncoder(mediaSource, &encoder);

status_t StagefrightRecorder::setupVideoEncoder(        sp<MediaSource> cameraSource,        sp<MediaCodecSource> *source) {    source->clear();    sp<AMessage> format = new AMessage();    switch (mVideoEncoder) {        .....        case VIDEO_ENCODER_H264:            //设定mime字段            format->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);            break;        .......        default:            CHECK(!"Should not be here, unsupported video encoding.");            break;    }    if (cameraSource != NULL) {        sp<MetaData> meta = cameraSource->getFormat();        int32_t width, height, stride, sliceHeight, colorFormat;        CHECK(meta->findInt32(kKeyWidth, &width));        CHECK(meta->findInt32(kKeyHeight, &height));        CHECK(meta->findInt32(kKeyStride, &stride));        CHECK(meta->findInt32(kKeySliceHeight, &sliceHeight));        CHECK(meta->findInt32(kKeyColorFormat, &colorFormat));        format->setInt32("width", width);        format->setInt32("height", height);        format->setInt32("stride", stride);        format->setInt32("slice-height", sliceHeight);        format->setInt32("color-format", colorFormat);    } else {        format->setInt32("width", mVideoWidth);        format->setInt32("height", mVideoHeight);        format->setInt32("stride", mVideoWidth);        format->setInt32("slice-height", mVideoHeight);        format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);        // set up time lapse/slow motion for surface source        if (mCaptureFpsEnable) {            if (mTimeBetweenCaptureUs <= 0) {                ALOGE("Invalid mTimeBetweenCaptureUs value: %lld",                        (long long)mTimeBetweenCaptureUs);                return BAD_VALUE;            }            format->setInt64("time-lapse", mTimeBetweenCaptureUs);        }    }    format->setInt32("bitrate", mVideoBitRate);    format->setInt32("frame-rate", mFrameRate);    format->setInt32("i-frame-interval", mIFramesIntervalSec);    if (mVideoTimeScale > 0) {        format->setInt32("time-scale", mVideoTimeScale);    }    if (mVideoEncoderProfile != -1) {        format->setInt32("profile", mVideoEncoderProfile);    }    if (mVideoEncoderLevel != -1) {        format->setInt32("level", mVideoEncoderLevel);    }    uint32_t tsLayers = 1;    bool preferBFrames = true; // we like B-frames as it produces better quality per bitrate    format->setInt32("priority", 0 /* realtime */);    float maxPlaybackFps = mFrameRate; // assume video is only played back at normal speed    if (mCaptureFpsEnable) {        format->setFloat("operating-rate", mCaptureFps);        // enable layering for all time lapse and high frame rate recordings        if (mFrameRate / mCaptureFps >= 1.9) { // time lapse            preferBFrames = false;            tsLayers = 2; // use at least two layers as resulting video will likely be sped up        } else if (mCaptureFps > maxPlaybackFps) { // slow-mo            maxPlaybackFps = mCaptureFps; // assume video will be played back at full capture speed            preferBFrames = false;        }    }    .......    uint32_t flags = 0;    if (cameraSource == NULL) {        flags |= MediaCodecSource::FLAG_USE_SURFACE_INPUT;    } else {        // require dataspace setup even if not using surface input        format->setInt32("android._using-recorder", 1);    }    //和上面的audio类似,这创建的是video对象,给后面addsource使用    sp<MediaCodecSource> encoder = MediaCodecSource::Create(            mLooper, format, cameraSource, mPersistentSurface, flags);    if (encoder == NULL) {        ALOGE("Failed to create video encoder");        // When the encoder fails to be created, we need        // release the camera source due to the camera's lock        // and unlock mechanism.        if (cameraSource != NULL) {            cameraSource->stop();        }        return UNKNOWN_ERROR;    }    if (cameraSource == NULL) {        mGraphicBufferProducer = encoder->getGraphicBufferProducer();    }    //把encoder带出去    *source = encoder;    return OK;}

这里比较绕的是,writer,MediaCodecSource,AudioSource,CameraSource这几个的关系。

看下下面的类的定义就知道是什么东西了。

class CameraSource : public MediaSource, public MediaBufferObserver struct AudioSource : public MediaSource, public MediaBufferObserverstruct MediaCodecSource : public MediaSource,                          public MediaBufferObserver virtual status_t addSource(const sp<IMediaSource> &source);class IMediaSource : public IInterface {public:    DECLARE_META_INTERFACE(MediaSource);

深挖一下
上面创建的两个audioencoder和videoEncoder对象的逻辑:

sp<MediaCodecSource> audioEncoder =
MediaCodecSource::Create(mLooper, format, audioSource);

sp<MediaCodecSource> encoder = MediaCodecSource::Create(
mLooper, format, cameraSource, mPersistentSurface, flags);

所以,这里,我们传递了一个ALooper,AMessage用于消息的接收。还有就是MediaSource的对象,当然分别对到audioSource和cameraSource

sp<MediaCodecSource> MediaCodecSource::Create(        const sp<ALooper> &looper,        const sp<AMessage> &format,        const sp<MediaSource> &source,        const sp<IGraphicBufferConsumer> &consumer,        uint32_t flags) {    sp<MediaCodecSource> mediaSource =            new MediaCodecSource(looper, format, source, consumer, flags);    if (mediaSource->init() == OK) {        return mediaSource;    }    return NULL;}
MediaCodecSource::MediaCodecSource(        const sp<ALooper> &looper,        const sp<AMessage> &outputFormat,        const sp<MediaSource> &source,        const sp<IGraphicBufferConsumer> &consumer,        uint32_t flags)    : mLooper(looper),      mOutputFormat(outputFormat),      mMeta(new MetaData),      mFlags(flags),      mIsVideo(false),      mStarted(false),      mStopping(false),      mDoMoreWorkPending(false),      mSetEncoderFormat(false),      mEncoderFormat(0),      mEncoderDataSpace(0),      mGraphicBufferConsumer(consumer),      mInputBufferTimeOffsetUs(0),      mFirstSampleSystemTimeUs(-1ll),      mPausePending(false),      mFirstSampleTimeUs(-1ll),      mGeneration(0) {    CHECK(mLooper != NULL);    AString mime;    CHECK(mOutputFormat->findString("mime", &mime));    if (!strncasecmp("video/", mime.c_str(), 6)) {        mIsVideo = true;    }    //这里重点记住一下,MediaCodecSource把我们前面传过来的audioSource,cameraSource,给了mPuller构造函数做参数    if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {        mPuller = new Puller(source);    }}
0 0
原创粉丝点击