初始化AudioTrace学习笔记------深入理解Android

来源:互联网 发布:java获取24小时制时间 编辑:程序博客网 时间:2024/06/05 15:14

AudioTrack类管理并播放Java应用的音频资源。它将PCM音频缓冲流传输到音频硬件播放。使用write(byte[], int, int)write(short[], int, int)方法推送音频流到AudioTrace对象。

AudioTrace实例有两种工作方式:静态,流态

流态模式,应用将连续的数据流使用write()函数写入AudioTrack。数据流将阻塞,当数据从Java层传递到Native层后返回,并放入Playback队列。流态模式适用于播放音频数据块:

  • 因声音播放时间过长而不适宜存储
  • 因音频数据特性(高采样录、bits/s)而不适宜存储
  • 之前的音频数据正在播放,同时接收或产生新的数据
静态模式适用于可存储的很短的,播放延迟最小的声音。因此该模式适用于播放UI或者游戏中的声音。

根据创造方式,AudioTrack对象可决定是否初始化它的音频缓冲。缓冲的大小在构造时指定,并决定一个AudioTrack播放时间。

静态模式的AudioTrack,缓冲的大小是他可播放声音的最大长度。

流态模式的AudioTrack,数据将以长度小于总缓冲大小的小块写到硬件。


AuidoTrack->set()@AudioTrack.cpp

status_t AudioTrack::set(        int streamType,        uint32_t sampleRate,        int format,        int channels,        int frameCount,        uint32_t flags,        callback_t cbf,        void* user,        int notificationFrames,        const sp<IMemory>& sharedBuffer,        bool threadCanCallJava,        int sessionId){    ......    uint32_t channelCount = AudioSystem::popCount(channels);    audio_io_handle_t output = AudioSystem::getOutput((AudioSystem::stream_type)streamType,            sampleRate, format, channels, (AudioSystem::output_flags)flags);    ......    mVolume[LEFT] = 1.0f;    mVolume[RIGHT] = 1.0f;    mSendLevel = 0;    mFrameCount = frameCount;    mNotificationFramesReq = notificationFrames;    mSessionId = sessionId;    mAuxEffectId = 0;    // create the IAudioTrack    status_t status = createTrack(streamType, sampleRate, format, channelCount,-------------------------------步骤1                                  frameCount, flags, sharedBuffer, output, true);    ......    if (cbf != 0) {        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);----------------------------------步骤2        if (mAudioTrackThread == 0) {          LOGE("Could not create callback thread");          return NO_INIT;        }    }    ......    mSharedBuffer = sharedBuffer;--------------------------------------------------------------------------步骤3    ......    mCbf = cbf;    mUserData = user;    ......    mAudioSession = -1;    return NO_ERROR;}
AuidoTrack->createTrack()@AudioTrack.cpp
status_t AudioTrack::createTrack(        int streamType,        uint32_t sampleRate,        int format,        int channelCount,        int frameCount,        uint32_t flags,        const sp<IMemory>& sharedBuffer,        audio_io_handle_t output,        bool enforceFrameCount){    status_t status;    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();//获取AudioFlinger服务    ......    sp<IAudioTrack> track = audioFlinger->createTrack(getpid(),                                                      streamType,                                                      sampleRate,                                                      format,                                                      channelCount,                                                      frameCount,                                                      ((uint16_t)flags) << 16,                                                      sharedBuffer,                                                      output,                                                      &mSessionId,                                                      &status);    ......    sp<IMemory> cblk = track->getCblk();    ......    mAudioTrack.clear();    mAudioTrack = track;    mCblkMemory.clear();    mCblkMemory = cblk;    mCblk = static_cast<audio_track_cblk_t*>(cblk->pointer());//获取AudioTrack共享buffer首地址    mCblk->flags |= CBLK_DIRECTION_OUT;    if (sharedBuffer == 0) {        mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);    } else {        mCblk->buffers = sharedBuffer->pointer();         // Force buffer full condition as data is already present in shared memory        mCblk->stepUser(mCblk->frameCount);    }    ......    mAudioTrack->attachAuxEffect(mAuxEffectId);    ......    return NO_ERROR;}
AudioFlinger->createTrack()@AudioFlinger.cpp
sp<IAudioTrack> AudioFlinger::createTrack(        pid_t pid,        int streamType,        uint32_t sampleRate,        int format,        int channelCount,        int frameCount,        uint32_t flags,        const sp<IMemory>& sharedBuffer,        int output,        int *sessionId,        status_t *status){    sp<PlaybackThread::Track> track;    sp<TrackHandle> trackHandle;    sp<Client> client;    wp<Client> wclient;    status_t lStatus;    int lSessionId;    {        Mutex::Autolock _l(mLock);        PlaybackThread *thread = checkPlaybackThread_l(output);-----------------------获取output对应的playbackthread        PlaybackThread *effectThread = NULL;        if (thread == NULL) {            LOGE("unknown output thread");            lStatus = BAD_VALUE;            goto Exit;        }        wclient = mClients.valueFor(pid);        if (wclient != NULL) {            client = wclient.promote();        } else {            client = new Client(this, pid);----------------------------------------------初始化1024*1024大小的memoryheap            mClients.add(pid, client);----------------------------------------------------pid,client对        }        LOGV("createTrack() sessionId: %d", (sessionId == NULL) ? -2 : *sessionId);-------sessionId不为空        if (sessionId != NULL && *sessionId != AudioSystem::SESSION_OUTPUT_MIX) {            for (size_t i = 0; i < mPlaybackThreads.size(); i++) {                sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);                if (mPlaybackThreads.keyAt(i) != output) {                    // prevent same audio session on different output threads                    uint32_t sessions = t->hasAudioSession(*sessionId);                    if (sessions & PlaybackThread::TRACK_SESSION) {                        lStatus = BAD_VALUE;                        goto Exit;                    }                    // check if an effect with same session ID is waiting for a track to be created                    if (sessions & PlaybackThread::EFFECT_SESSION) {                        effectThread = t.get();`                    }                }            }            lSessionId = *sessionId;        } else {            // if no audio session id is provided, create one here            lSessionId = nextUniqueId();------------------------------------------------*sessionId赋值            if (sessionId != NULL) {                *sessionId = lSessionId;            }        }        track = thread->createTrack_l(client, streamType, sampleRate, format,                channelCount, frameCount, sharedBuffer, lSessionId, &lStatus);        // move effect chain to this output thread if an effect on same session was waiting        // for a track to be created        if (lStatus == NO_ERROR && effectThread != NULL) {            Mutex::Autolock _dl(thread->mLock);            Mutex::Autolock _sl(effectThread->mLock);            moveEffectChain_l(lSessionId, effectThread, thread, true);        }    }    if (lStatus == NO_ERROR) {        trackHandle = new TrackHandle(track);--------------------------------------------TrackHandle初始化    } else {        // remove local strong reference to Client before deleting the Track so that the Client        // destructor is called by the TrackBase destructor with mLock held        client.clear();        track.clear();    }Exit:    if(status) {        *status = lStatus;    }    return trackHandle;}

AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l()

AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(int output) const{    PlaybackThread *thread = NULL;    if (mPlaybackThreads.indexOfKey(output) >= 0) {        thread = (PlaybackThread *)mPlaybackThreads.valueFor(output).get();-----------------mPlaybackThreads.add(id, thread)@openOutput;id为output号,thread为mixerthread    }    return thread;}
class Client

AudioFlinger::Client::Client(const sp<AudioFlinger>& audioFlinger, pid_t pid)    :   RefBase(),        mAudioFlinger(audioFlinger),        mMemoryDealer(new MemoryDealer(1024*1024, "AudioFlinger::Client")),        mPid(pid){    // 1 MB of address space is good for 32 tracks, 8 buffers each, 4 KB/buffer}
PlaybackThread::createTrack_l()

sp<AudioFlinger::PlaybackThread::Track>  AudioFlinger::PlaybackThread::createTrack_l(        const sp<AudioFlinger::Client>& client,        int streamType,        uint32_t sampleRate,        int format,        int channelCount,        int frameCount,        const sp<IMemory>& sharedBuffer,        int sessionId,        status_t *status){    sp<Track> track;    status_t lStatus;    ......    { // scope for mLock        Mutex::Autolock _l(mLock);        // all tracks in same audio session must share the same routing strategy otherwise        // conflicts will happen when tracks are moved from one output to another by audio policy        // manager        uint32_t strategy =                AudioSystem::getStrategyForStream((AudioSystem::stream_type)streamType);        for (size_t i = 0; i < mTracks.size(); ++i) {            sp<Track> t = mTracks[i];            if (t != 0) {                if (sessionId == t->sessionId() &&                        strategy != AudioSystem::getStrategyForStream((AudioSystem::stream_type)t->type())) {                    lStatus = BAD_VALUE;                    goto Exit;                }            }        }        track = new Track(this, client, streamType, sampleRate, format,                channelCount, frameCount, sharedBuffer, sessionId);        ......    }    lStatus = NO_ERROR;Exit:    if(status) {        *status = lStatus;    }    return track;}
class Track

AudioFlinger::PlaybackThread::Track::Track(            const wp<ThreadBase>& thread,            const sp<Client>& client,            int streamType,            uint32_t sampleRate,            int format,            int channelCount,            int frameCount,            const sp<IMemory>& sharedBuffer,            int sessionId)    :   TrackBase(thread, client, sampleRate, format, channelCount, frameCount, 0, sharedBuffer, sessionId),    mMute(false), mSharedBuffer(sharedBuffer), mName(-1), mMainBuffer(NULL), mAuxBuffer(NULL),    mAuxEffectId(0), mHasVolumeController(false){    if (mCblk != NULL) {        sp<ThreadBase> baseThread = thread.promote();        if (baseThread != 0) {            PlaybackThread *playbackThread = (PlaybackThread *)baseThread.get();            mName = playbackThread->getTrackName_l();            mMainBuffer = playbackThread->mixBuffer();        }        ......        LOGV("Track constructor name %d, calling thread %d", mName, IPCThreadState::self()->getCallingPid());        if (mName < 0) {            LOGE("no more track names available");        }        ......        // NOTE: audio_track_cblk_t::frameSize for 8 bit PCM data is based on a sample size of        // 16 bit because data is converted to 16 bit before being stored in buffer by AudioTrack        mCblk->frameSize = AudioSystem::isLinearPCM(format) ? channelCount * sizeof(int16_t) : sizeof(int8_t);    }    ......}
AudioFlinger::ThreadBase::TrackBase::TrackBase

AudioFlinger::ThreadBase::TrackBase::TrackBase(            const wp<ThreadBase>& thread,            const sp<Client>& client,            uint32_t sampleRate,            int format,            int channelCount,            int frameCount,            uint32_t flags,            const sp<IMemory>& sharedBuffer,            int sessionId)    :   RefBase(),        mThread(thread),        mClient(client),        mCblk(0),        mFrameCount(0),        mState(IDLE),        mClientTid(-1),        mFormat(format),        mFlags(flags & ~SYSTEM_FLAGS_MASK),        mSessionId(sessionId){    LOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());    // LOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);   size_t size = sizeof(audio_track_cblk_t);   size_t bufferSize = 0;   if ( (format == AudioSystem::PCM_16_BIT) ||        (format == AudioSystem::PCM_8_BIT))   {       bufferSize = frameCount*channelCount*sizeof(int16_t);   }   else if (format == AudioSystem::AMR_NB)   {       bufferSize = frameCount*channelCount*32; // full rate frame size   }   ......   if (sharedBuffer == 0) {       size += bufferSize;   }   if (client != NULL) {        mCblkMemory = client->heap()->allocate(size);------------------------------------------在已有的heap上分配空间        if (mCblkMemory != 0) {            mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());            if (mCblk) { // construct the shared structure in-place.                new(mCblk) audio_track_cblk_t();-----------------------------------------------placement new,音频信息头                // clear all buffers                mCblk->frameCount = frameCount;                mCblk->sampleRate = sampleRate;                mCblk->channelCount = (uint8_t)channelCount;                if (sharedBuffer == 0) {                    mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);------------------------mBuffer音频数据的存储起始位置                    // Change for Codec type                    if ( (format == AudioSystem::PCM_16_BIT) ||                    (format == AudioSystem::PCM_8_BIT))                    {                    memset(mBuffer, 0, frameCount*channelCount*sizeof(int16_t));                    }                    else if (format == AudioSystem::AMR_NB)                    {                      memset(mBuffer, 0, frameCount*channelCount*32); // full rate frame size                    }                    ......                    // Force underrun condition to avoid false underrun callback until first data is                    // written to buffer (other flags are cleared)                    mCblk->flags = CBLK_UNDERRUN_ON;                } else {                    mBuffer = sharedBuffer->pointer();                }                mBufferEnd = (uint8_t *)mBuffer + bufferSize;-----------------------------------音频数据的结束为止            }            ......        }         ......   }    ......}
audio_track_cblk_t

struct audio_track_cblk_t{    // The data members are grouped so that members accessed frequently and in the same context    // are in the same line of data cache.                Mutex       lock;                Condition   cv;    volatile    uint32_t    user;    volatile    uint32_t    server;                uint32_t    userBase;                uint32_t    serverBase;                void*       buffers;                uint32_t    frameCount;                // Cache line boundary                uint32_t    loopStart;                uint32_t    loopEnd;                int         loopCount;    volatile    union {                    uint16_t    volume[2];                    uint32_t    volumeLR;                };                uint32_t    sampleRate;                // NOTE: audio_track_cblk_t::frameSize is not equal to AudioTrack::frameSize() for                // 8 bit PCM data: in this case,  mCblk->frameSize is based on a sample size of                // 16 bit because data is converted to 16 bit before being stored in buffer                uint8_t     frameSize;                uint8_t     channelCount;                uint16_t    flags;                uint16_t    bufferTimeoutMs; // Maximum cumulated timeout before restarting audioflinger                uint16_t    waitTimeMs;      // Cumulated wait time                uint16_t    sendLevel;                uint16_t    reserved;                // Cache line boundary (32 bytes)                            audio_track_cblk_t();                uint32_t    stepUser(uint32_t frameCount);                bool        stepServer(uint32_t frameCount);                void*       buffer(uint32_t offset) const;                uint32_t    framesAvailable();                uint32_t    framesAvailable_l();                uint32_t    framesReady();};
int AudioFlinger::MixerThread::getTrackName_l()
int AudioFlinger::MixerThread::getTrackName_l(){    return mAudioMixer->getTrackName();}
int AudioMixer::getTrackName()
 int AudioMixer::getTrackName() {    uint32_t names = mTrackNames;    uint32_t mask = 1;    int n = 0;    while (names & mask) {        mask <<= 1;        n++;    }    if (mask) {        LOGV("add track (%d)", n);        mTrackNames |= mask;        return TRACK0 + n;    }    return -1; }
int16_t     *mixBuffer()

 int16_t     *mixBuffer() { return mMixBuffer; };
mMixBuffer是在PlayBackThread的构造函数中调用readOutputParameters()中初始化的

void AudioFlinger::PlaybackThread::readOutputParameters()

void AudioFlinger::PlaybackThread::readOutputParameters(){    ......    // FIXME - Current mixer implementation only supports stereo output: Always    // Allocate a stereo buffer even if HW output is mono.    if (mMixBuffer != NULL) delete[] mMixBuffer;    mMixBuffer = new int16_t[mFrameCount * 2];    memset(mMixBuffer, 0, mFrameCount * 2 * sizeof(int16_t));    ......}
class TrackHandle

AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)    : BnAudioTrack(),      mTrack(track){}
TrackHandle使用Proxy模式,真正干活的AudioFlinger::PlaybackThread::Track,最终发给底层音频驱动设备。

AuidoTrack->createTrack()函数最终经过AudioFlinger->createTrack()函数获得了AudioFlinger::TrackHandle。mAudioTrack记录下了TrackHandle,将来AudioTrack就靠它与下层AudioFlinger协作,调用start,stop等函数。

mCblkMemory记录下共享buffer首地址。

track->getCblk()

sp<IMemory> AudioFlinger::TrackHandle::getCblk() const {    return mTrack->getCblk();}
sp<IMemory> AudioFlinger::EffectHandle::getCblk() const {    return mCblkMemory;}
mCblkMemory@TrackBase是在TrackBase构造函数中init的,mCblkMemory = client->heap()->allocate(size);。client也就是前面Client类中预先分配的1M的heap空间。


原创粉丝点击