Android Audio代码分析9 - AudioTrack::write函数

来源:互联网 发布:幽默防网络诈骗标语 编辑:程序博客网 时间:2024/04/30 03:19
在最初的代码testWriteByte中,创建完AudioTrack对象后,调用了AudioTrack对象的write函数实现播放。
今天就来看看write函数的实现。


*****************************************源码*************************************************
    public int write(byte[] audioData,int offsetInBytes, int sizeInBytes) {        if ((mDataLoadMode == MODE_STATIC)                && (mState == STATE_NO_STATIC_DATA)                && (sizeInBytes > 0)) {            mState = STATE_INITIALIZED;        }        if (mState != STATE_INITIALIZED) {            return ERROR_INVALID_OPERATION;        }        if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)                 || (offsetInBytes + sizeInBytes > audioData.length)) {            return ERROR_BAD_VALUE;        }        return native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat);    }


**********************************************************************************************
源码路径:
frameworks\base\media\java\android\media\AudioTrack.java


#################说明################################################
    /**     * Writes the audio data to the audio hardware for playback.     * @param audioData the array that holds the data to play.     * @param offsetInBytes the offset expressed in bytes in audioData where the data to play      *    starts.     * @param sizeInBytes the number of bytes to read in audioData after the offset.     * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION}     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if     *    the parameters don't resolve to valid data and indexes.     */// 先看看注释,有一点需要注意,offsetInBytes是指要播放的数据是从参数audioData的哪个地方开始    public int write(byte[] audioData,int offsetInBytes, int sizeInBytes) {        if ((mDataLoadMode == MODE_STATIC)                && (mState == STATE_NO_STATIC_DATA)                && (sizeInBytes > 0)) {            mState = STATE_INITIALIZED;        }        if (mState != STATE_INITIALIZED) {            return ERROR_INVALID_OPERATION;        }        if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)                 || (offsetInBytes + sizeInBytes > audioData.length)) {            return ERROR_BAD_VALUE;        }// 前面主要检查了状态及参数,真正干活的在native中。        return native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat);// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++// 路径:frameworks\base\core\jni\android_media_AudioTrack.cpp// 对应的native侧的函数为android_media_AudioTrack_native_write,其实现如下:static jint android_media_AudioTrack_native_write(JNIEnv *env,  jobject thiz,                                                  jbyteArray javaAudioData,                                                  jint offsetInBytes, jint sizeInBytes,                                                  jint javaAudioFormat) {    jbyte* cAudioData = NULL;    AudioTrack *lpTrack = NULL;    //LOGV("android_media_AudioTrack_native_write(offset=%d, sizeInBytes=%d) called",    //    offsetInBytes, sizeInBytes);        // get the audio track to load with samples// 我们创建AudioTrack对象的时间将其保存到了java侧,// 现在要使用它了,所以把它取出来    lpTrack = (AudioTrack *)env->GetIntField(thiz, javaAudioTrackFields.nativeTrackInJavaObj);    if (lpTrack == NULL) {        jniThrowException(env, "java/lang/IllegalStateException",            "Unable to retrieve AudioTrack pointer for write()");        return 0;    }    // get the pointer for the audio data from the java array    if (javaAudioData) {        cAudioData = (jbyte *)env->GetPrimitiveArrayCritical(javaAudioData, NULL);        if (cAudioData == NULL) {            LOGE("Error retrieving source of audio data to play, can't play");            return 0; // out of memory or no data to load        }    } else {        LOGE("NULL java array of audio data to play, can't play");        return 0;    }    jint written = writeToTrack(lpTrack, javaAudioFormat, cAudioData, offsetInBytes, sizeInBytes);// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++jint writeToTrack(AudioTrack* pTrack, jint audioFormat, jbyte* data,                  jint offsetInBytes, jint sizeInBytes) {    // give the data to the native AudioTrack object (the data starts at the offset)    ssize_t written = 0;    // regular write() or copy the data to the AudioTrack's shared memory?// 判断shareBuffer是否为0.// 如果是stream模式,shareBuffer为0,即不需要共享内存,因为数据是播放的时候一次一次写过来的// 如果是direct模式,需要共享内存,因为数据是开始一次写过来的,后来再播放的时候,只是去共享内存中取    if (pTrack->sharedBuffer() == 0) {// stream模式的情况下,直接调用AudioTrack对象的write函数。        written = pTrack->write(data + offsetInBytes, sizeInBytes);++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ssize_t AudioTrack::write(const void* buffer, size_t userSize){// 如果存在共享内存的话,说明不应该调到这儿来    if (mSharedBuffer != 0) return INVALID_OPERATION;// 不要相信用户    if (ssize_t(userSize) < 0) {        // sanity-check. user is most-likely passing an error code.        LOGE("AudioTrack::write(buffer=%p, size=%u (%d)",                buffer, userSize, userSize);        return BAD_VALUE;    }    LOGV("write %p: %d bytes, mActive=%d", this, userSize, mActive);    ssize_t written = 0;    const int8_t *src = (const int8_t *)buffer;    Buffer audioBuffer;// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++// 看看Buffer的实现:    /* Create Buffer on the stack and pass it to obtainBuffer()     * and releaseBuffer().     */    class Buffer    {    public:        enum {            MUTE    = 0x00000001        };        uint32_t    flags;        int         channelCount;        int         format;        size_t      frameCount;        size_t      size;        union {            void*       raw;            short*      i16;            int8_t*     i8;        };    };// ----------------------------------------------------------------    do {        audioBuffer.frameCount = userSize/frameSize();        // Calling obtainBuffer() with a negative wait count causes        // an (almost) infinite wait time.// 获取写数据用的buffer        status_t err = obtainBuffer(&audioBuffer, -1);        if (err < 0) {            // out of buffers, return #bytes written            if (err == status_t(NO_MORE_BUFFERS))                break;            return ssize_t(err);        }// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount){    int active;    status_t result;    audio_track_cblk_t* cblk = mCblk;    uint32_t framesReq = audioBuffer->frameCount;    uint32_t waitTimeMs = (waitCount < 0) ? cblk->bufferTimeoutMs : WAIT_PERIOD_MS;    audioBuffer->frameCount  = 0;    audioBuffer->size = 0;// audio_track_cblk_t是个什么东东?其实,它是个蛮重要的东东。// 之前,我们也有看到过。今天找一下它的准确诞生地// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++/* audio_track_cblk_t对象是在创建AudioTrack的时候创建的。如果调到AudioTrack的构造函数,就不再说了。AudioTrack构造函数之后的调用关系如下:1、AudioTrack的构造函数调用了函数AudioTrack::set。    mStatus = set(streamType, sampleRate, format, channels,            0, flags, cbf, user, notificationFrames,            sharedBuffer, false, sessionId);2、函数AudioTrack::set调用了函数AudioTrack::createTrack。    // create the IAudioTrack    status_t status = createTrack(streamType, sampleRate, format, channelCount,                                  frameCount, flags, sharedBuffer, output, true);3、函数AudioTrack::createTrack调用了函数AudioFlinger::createTrack。    sp<IAudioTrack> track = audioFlinger->createTrack(getpid(),                                                      streamType,                                                      sampleRate,                                                      format,                                                      channelCount,                                                      frameCount,                                                      ((uint16_t)flags) << 16,                                                      sharedBuffer,                                                      output,                                                      &mSessionId,                                                      &status);并对成员变量mCblk进行赋值。    mCblk = static_cast<audio_track_cblk_t*>(cblk->pointer());    mCblk->flags |= CBLK_DIRECTION_OUT;4、函数AudioFlinger::createTrack调用了函数AudioFlinger::PlaybackThread::createTrack_l。        track = thread->createTrack_l(client, streamType, sampleRate, format,                channelCount, frameCount, sharedBuffer, lSessionId, &lStatus);5、函数AudioFlinger::PlaybackThread::createTrack_l中创建了AudioFlinger::PlaybackThread::Track对象。        track = new Track(this, client, streamType, sampleRate, format,                channelCount, frameCount, sharedBuffer, sessionId);6、类AudioFlinger::PlaybackThread::Track,是类AudioFlinger::ThreadBase::TrackBase的子类。7、最终的诞生地,在AudioFlinger::ThreadBase::TrackBase的构造函数中mCblkMemory = client->heap()->allocate(size);mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());*/// ----------------------------------------------------------------    uint32_t framesAvail = cblk->framesAvailable();// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++uint32_t audio_track_cblk_t::framesAvailable(){    Mutex::Autolock _l(lock);    return framesAvailable_l();// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++uint32_t audio_track_cblk_t::framesAvailable_l(){    uint64_t u = this->user;    uint64_t s = this->server;// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#define CBLK_DIRECTION_MSK      0x0002#define CBLK_DIRECTION_OUT      0x0002  // this cblk is for an AudioTrack#define CBLK_DIRECTION_IN       0x0000  // this cblk is for an AudioRecord// ----------------------------------------------------------------// 可见,CBLK_DIRECTION_MSK和CBLK_DIRECTION_OUT是相同的// 判断CBLK_DIRECTION_MSK,其实也就是判断CBLK_DIRECTION_OUT。// 我们是用它来播放的,此处当然是CBLK_DIRECTION_OUT了。    if (flags & CBLK_DIRECTION_MSK) {        uint64_t limit = (s < loopStart) ? s : loopStart;        return limit + frameCount - u;    } else {        return frameCount + u - s;    }}// ----------------------------------------------------------------}// ----------------------------------------------------------------// 此处会不断循环,直到framesAvail不为0    if (framesAvail == 0) {        cblk->lock.lock();        goto start_loop_here;        while (framesAvail == 0) {            active = mActive;            if (UNLIKELY(!active)) {                LOGV("Not active and NO_MORE_BUFFERS");                cblk->lock.unlock();                return NO_MORE_BUFFERS;            }            if (UNLIKELY(!waitCount)) {                cblk->lock.unlock();                return WOULD_BLOCK;            }            if (!(cblk->flags & CBLK_INVALID_MSK)) {                result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));            }            if (cblk->flags & CBLK_INVALID_MSK) {                LOGW("obtainBuffer() track %p invalidated, creating a new one", this);                // no need to clear the invalid flag as this cblk will not be used anymore                cblk->lock.unlock();                goto create_new_track;            }            if (__builtin_expect(result!=NO_ERROR, false)) {                cblk->waitTimeMs += waitTimeMs;                if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {                    // timing out when a loop has been set and we have already written upto loop end                    // is a normal condition: no need to wake AudioFlinger up.                    if (cblk->user < cblk->loopEnd) {                        LOGW(   "obtainBuffer timed out (is the CPU pegged?) %p "                                "user=%08llx, server=%08llx", this, cblk->user, cblk->server);                        //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140)                        cblk->lock.unlock();                        result = mAudioTrack->start();                        if (result == DEAD_OBJECT) {                            LOGW("obtainBuffer() dead IAudioTrack: creating a new one");create_new_track:                            result = createTrack(mStreamType, cblk->sampleRate, mFormat, mChannelCount,                                                 mFrameCount, mFlags, mSharedBuffer, getOutput(), false);                            if (result == NO_ERROR) {                                cblk = mCblk;                                cblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;                                mAudioTrack->start();                            }                        }                        cblk->lock.lock();                    }                    cblk->waitTimeMs = 0;                }                if (--waitCount == 0) {                    cblk->lock.unlock();                    return TIMED_OUT;                }            }            // read the server count again        start_loop_here:            framesAvail = cblk->framesAvailable_l();        }        cblk->lock.unlock();    }    // restart track if it was disabled by audioflinger due to previous underrun    if (cblk->flags & CBLK_DISABLED_MSK) {        cblk->flags &= ~CBLK_DISABLED_ON;        LOGW("obtainBuffer() track %p disabled, restarting", this);        mAudioTrack->start();    }    cblk->waitTimeMs = 0;    if (framesReq > framesAvail) {        framesReq = framesAvail;    }    uint64_t u = cblk->user;    uint64_t bufferEnd = cblk->userBase + cblk->frameCount;    if (u + framesReq > bufferEnd) {        framesReq = (uint32_t)(bufferEnd - u);    }    audioBuffer->flags = mMuted ? Buffer::MUTE : 0;    audioBuffer->channelCount = mChannelCount;    audioBuffer->frameCount = framesReq;    audioBuffer->size = framesReq * cblk->frameSize;    if (AudioSystem::isLinearPCM(mFormat)) {        audioBuffer->format = AudioSystem::PCM_16_BIT;    } else {        audioBuffer->format = mFormat;    }    audioBuffer->raw = (int8_t *)cblk->buffer(u);// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++void* audio_track_cblk_t::buffer(uint64_t offset) const{    return (int8_t *)this->buffers + (offset - userBase) * this->frameSize;}// ----------------------------------------------------------------    active = mActive;    return active ? status_t(NO_ERROR) : status_t(STOPPED);}// ----------------------------------------------------------------        size_t toWrite;        if (mFormat == AudioSystem::PCM_8_BIT && !(mFlags & AudioSystem::OUTPUT_FLAG_DIRECT)) {            // Divide capacity by 2 to take expansion into account            toWrite = audioBuffer.size>>1;            // 8 to 16 bit conversion            int count = toWrite;            int16_t *dst = (int16_t *)(audioBuffer.i8);            while(count--) {                *dst++ = (int16_t)(*src++^0x80) << 8;            }        } else {            toWrite = audioBuffer.size;            memcpy(audioBuffer.i8, src, toWrite);            src += toWrite;        }        userSize -= toWrite;        written += toWrite;        releaseBuffer(&audioBuffer);// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++void AudioTrack::releaseBuffer(Buffer* audioBuffer){    audio_track_cblk_t* cblk = mCblk;    cblk->stepUser(audioBuffer->frameCount);// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++uint64_t audio_track_cblk_t::stepUser(uint32_t frameCount){    uint64_t u = this->user;    u += frameCount;    // Ensure that user is never ahead of server for AudioRecord    if (flags & CBLK_DIRECTION_MSK) {        // If stepServer() has been called once, switch to normal obtainBuffer() timeout period        if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS-1) {            bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;        }    } else if (u > this->server) {        LOGW("stepServer occured after track reset");        u = this->server;    }    if (u >= userBase + this->frameCount) {        userBase += this->frameCount;    }    this->user = u;    // Clear flow control error condition as new data has been written/read to/from buffer.    flags &= ~CBLK_UNDERRUN_MSK;    return u;}// ----------------------------------------------------------------}// ----------------------------------------------------------------    } while (userSize);    return written;}// ----------------------------------------------------------------    } else {// direct模式的话,将数据copy到共享内存。// 注意,如果格式为PCM8,需要做下处理        if (audioFormat == javaAudioTrackFields.PCM16) {            // writing to shared memory, check for capacity            if ((size_t)sizeInBytes > pTrack->sharedBuffer()->size()) {                sizeInBytes = pTrack->sharedBuffer()->size();            }            memcpy(pTrack->sharedBuffer()->pointer(), data + offsetInBytes, sizeInBytes);            written = sizeInBytes;        } else if (audioFormat == javaAudioTrackFields.PCM8) {            // data contains 8bit data we need to expand to 16bit before copying            // to the shared memory            // writing to shared memory, check for capacity,            // note that input data will occupy 2X the input space due to 8 to 16bit conversion            if (((size_t)sizeInBytes)*2 > pTrack->sharedBuffer()->size()) {                sizeInBytes = pTrack->sharedBuffer()->size() / 2;            }            int count = sizeInBytes;            int16_t *dst = (int16_t *)pTrack->sharedBuffer()->pointer();            const int8_t *src = (const int8_t *)(data + offsetInBytes);            while(count--) {                *dst++ = (int16_t)(*src++^0x80) << 8;            }            // even though we wrote 2*sizeInBytes, we only report sizeInBytes as written to hide            // the 8bit mixer restriction from the user of this function            written = sizeInBytes;        }    }    return written;}// ----------------------------------------------------------------    env->ReleasePrimitiveArrayCritical(javaAudioData, cAudioData, 0);    //LOGV("write wrote %d (tried %d) bytes in the native AudioTrack with offset %d",    //     (int)written, (int)(sizeInBytes), (int)offsetInBytes);    return written;}// ----------------------------------------------------------------    }


###################################################################


&&&&&&&&&&&总结&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1、写播放数据,其实最终写到了一个audio_track_cblk_t结构体中。
2、audio_track_cblk_t结构体在AudioFlinger中的TrackBase类的构造函数中创建。
    创建的时候首先从Client申请一块内存,然后将内存地址强制转换成audio_track_cblk_t的指针。
结构体audio_track_cblk_t的最后一个成员便是指向数据的指针。
3、至此,只是将数据写到了AudioFlinger,AudioFling如何使用这些数据,最终实现播放,还需要继续学习。
&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&