Android Audio stream type

来源:互联网 发布:殷保华炒股软件 编辑:程序博客网 时间:2024/05/17 00:58

http://blog.csdn.net/hgl868/article/details/6872126

在看AudioTrack代码的时候,我们看到,要创建一个AudioTrack对象,需要指定一个StreamType。
今天我们只把stream type相关的代码抽取出来,详细看看stream type相关的东东。

java层的代码就不看了。
从函数android_media_AudioTrack_native_setup开始说起。

static int
android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_this,
        jint streamType, jint sampleRateInHertz, jint channels,
        jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession)
{
...

// 获取Frame Count和Sampling Rate的依据都是stream type。
// 其实现方法是通过stream type取得output,然后取得output的描述
// 如果取得成功,则取描述中的frame count,否则通过audio flinger取得output对应的frame count。
    if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
        LOGE("Error creating AudioTrack: Could not get AudioSystem frame count.");
        return AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
    }
    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
        LOGE("Error creating AudioTrack: Could not get AudioSystem sampling rate.");
        return AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
    }

    ...
    
 // 下面的工作是将java 侧的stream type转换为native 侧的stream type。
 // 以后使用的都是转换后的strea type。
    // check the stream type
    AudioSystem::stream_type atStreamType;
    if (streamType == javaAudioTrackFields.STREAM_VOICE_CALL) {
        atStreamType = AudioSystem::VOICE_CALL;
    } else if (streamType == javaAudioTrackFields.STREAM_SYSTEM) {
        atStreamType = AudioSystem::SYSTEM;
    } else if (streamType == javaAudioTrackFields.STREAM_RING) {
        atStreamType = AudioSystem::RING;
    } else if (streamType == javaAudioTrackFields.STREAM_MUSIC) {
        atStreamType = AudioSystem::MUSIC;
    } else if (streamType == javaAudioTrackFields.STREAM_ALARM) {
        atStreamType = AudioSystem::ALARM;
    } else if (streamType == javaAudioTrackFields.STREAM_NOTIFICATION) {
        atStreamType = AudioSystem::NOTIFICATION;
    } else if (streamType == javaAudioTrackFields.STREAM_BLUETOOTH_SCO) {
        atStreamType = AudioSystem::BLUETOOTH_SCO;
    } else if (streamType == javaAudioTrackFields.STREAM_DTMF) {
        atStreamType = AudioSystem::DTMF;
    } else {
        LOGE("Error creating AudioTrack: unknown stream type.");
        return AUDIOTRACK_ERROR_SETUP_INVALIDSTREAMTYPE;
    }

    ...
 
    // 将stream type保存在AudioTrackJniStorage对象中
    lpJniStorage->mStreamType = atStreamType;

    ...
    
 // 调用AudioTrack对象的set函数
    // initialize the native AudioTrack object
    if (memoryMode == javaAudioTrackFields.MODE_STREAM) {

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioTrack::set(
        int streamType,
        uint32_t sampleRate,
        int format,
        int channels,
        int frameCount,
        uint32_t flags,
        callback_t cbf,
        void* user,
        int notificationFrames,
        const sp<IMemory>& sharedBuffer,
        bool threadCanCallJava,
        int sessionId)
{

    ...

 // 前面已经说过
    int afSampleRate;
    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
        return NO_INIT;
    }
    uint32_t afLatency;
    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
        return NO_INIT;
    }

 // stream type如果是DEFAULT,将其设置为MUSIC
    // handle default values first.
    if (streamType == AudioSystem::DEFAULT) {
        streamType = AudioSystem::MUSIC;
    }
 
    ...

 // 获取output
    audio_io_handle_t output = AudioSystem::getOutput((AudioSystem::stream_type)streamType,
            sampleRate, format, channels, (AudioSystem::output_flags)flags);

    if (output == 0) {
        LOGE("Could not get audio output for stream type %d", streamType);
        return BAD_VALUE;
    }

    mVolume[LEFT] = 1.0f;
    mVolume[RIGHT] = 1.0f;
    mSendLevel = 0;
    mFrameCount = frameCount;
    mNotificationFramesReq = notificationFrames;
    mSessionId = sessionId;
    mAuxEffectId = 0;

 // 创建IAudioTrack对象
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioTrack::createTrack(
        int streamType,
        uint32_t sampleRate,
        int format,
        int channelCount,
        int frameCount,
        uint32_t flags,
        const sp<IMemory>& sharedBuffer,
        audio_io_handle_t output,
        bool enforceFrameCount)
{
    ...

 // 这几个又出现了
    int afSampleRate;
    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
        return NO_INIT;
    }
    int afFrameCount;
    if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
        return NO_INIT;
    }
    uint32_t afLatency;
    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
        return NO_INIT;
    }

    ...

 // 调用audio flinger中的函数创建IAudioTrack对象
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
sp<IAudioTrack> AudioFlinger::createTrack(
        pid_t pid,
        int streamType,
        uint32_t sampleRate,
        int format,
        int channelCount,
        int frameCount,
        uint32_t flags,
        const sp<IMemory>& sharedBuffer,
        int output,
        int *sessionId,
        status_t *status)
{
    sp<PlaybackThread::Track> track;
    sp<TrackHandle> trackHandle;
    sp<Client> client;
    wp<Client> wclient;
    status_t lStatus;
    int lSessionId;

 // 参数检查
    if (streamType >= AudioSystem::NUM_STREAM_TYPES) {
        LOGE("invalid stream type");
        lStatus = BAD_VALUE;
        goto Exit;
    }

    ...

 // 并没有对stream type作其他处理,调用playback thread的函数
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held
sp<AudioFlinger::PlaybackThread::Track>  AudioFlinger::PlaybackThread::createTrack_l(
        const sp<AudioFlinger::Client>& client,
        int streamType,
        uint32_t sampleRate,
        int format,
        int channelCount,
        int frameCount,
        const sp<IMemory>& sharedBuffer,
        int sessionId,
        status_t *status)
{
    ...

 // 创建Track对象
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
AudioFlinger::PlaybackThread::Track::Track(
            const wp<ThreadBase>& thread,
            const sp<Client>& client,
            int streamType,
            uint32_t sampleRate,
            int format,
            int channelCount,
            int frameCount,
            const sp<IMemory>& sharedBuffer,
            int sessionId)
    :   TrackBase(thread, client, sampleRate, format, channelCount, frameCount, 0, sharedBuffer, sessionId),
    mMute(false), mSharedBuffer(sharedBuffer), mName(-1), mMainBuffer(NULL), mAuxBuffer(NULL),
    mAuxEffectId(0), mHasVolumeController(false)
{
    ...
 
 // 没有再往下传,赋值给了成员变量mStreamType
 
 // 看看哪些地方使用到了该成员变量mStreamType
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void AudioFlinger::PlaybackThread::Track::destroy()
{
    // NOTE: destroyTrack_l() can remove a strong reference to this Track
    // by removing it from mTracks vector, so there is a risk that this Tracks's
    // desctructor is called. As the destructor needs to lock mLock,
    // we must acquire a strong reference on this Track before locking mLock
    // here so that the destructor is called only when exiting this function.
    // On the other hand, as long as Track::destroy() is only called by
    // TrackHandle destructor, the TrackHandle still holds a strong ref on
    // this Track with its member mTrack.
    sp<Track> keep(this);
    { // scope for mLock
        sp<ThreadBase> thread = mThread.promote();
        if (thread != 0) {
            if (!isOutputTrack()) {
                if (mState == ACTIVE || mState == RESUMING) {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioSystem::stopOutput(audio_io_handle_t output,
                                 AudioSystem::stream_type stream,
                                 int session)
{
    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
    if (aps == 0) return PERMISSION_DENIED;
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
                                        AudioSystem::stream_type stream,
                                        int session)
{
    if (mpPolicyManager == NULL) {
        return NO_INIT;
    }
    LOGV("stopOutput() tid %d", gettid());
    Mutex::Autolock _l(mLock);
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioPolicyManagerBase::stopOutput(audio_io_handle_t output,
                                            AudioSystem::stream_type stream,
                                            int session)
{
    LOGV("stopOutput() output %d, stream %d, session %d", output, stream, session);
    ssize_t index = mOutputs.indexOfKey(output);
    if (index < 0) {
        LOGW("stopOutput() unknow output %d", output);
        return BAD_VALUE;
    }

    AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
 // 根据stream type获取strategy
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AudioPolicyManagerBase::routing_strategy AudioPolicyManagerBase::getStrategy(
        AudioSystem::stream_type stream) {
    // stream to strategy mapping
    switch (stream) {
    case AudioSystem::VOICE_CALL:
    case AudioSystem::BLUETOOTH_SCO:
        return STRATEGY_PHONE;
    case AudioSystem::RING:
    case AudioSystem::NOTIFICATION:
    case AudioSystem::ALARM:
    case AudioSystem::ENFORCED_AUDIBLE:
        return STRATEGY_SONIFICATION;
    case AudioSystem::DTMF:
        return STRATEGY_DTMF;
    default:
        LOGE("unknown stream type");
    case AudioSystem::SYSTEM:
        // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
        // while key clicks are played produces a poor result
    case AudioSystem::TTS:
    case AudioSystem::MUSIC:
        return STRATEGY_MEDIA;
    }
}
----------------------------------------------------------------
    routing_strategy strategy = getStrategy((AudioSystem::stream_type)stream);

    // handle special case for sonification while in call
    if (isInCall()) {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void AudioPolicyManagerBase::handleIncallSonification(int stream, bool starting, bool stateChange)
{
    // if the stream pertains to sonification strategy and we are in call we must
    // mute the stream if it is low visibility. If it is high visibility, we must play a tone
    // in the device used for phone strategy and play the tone if the selected device does not
    // interfere with the device used for phone strategy
    // if stateChange is true, we are called from setPhoneState() and we must mute or unmute as
    // many times as there are active tracks on the output

    if (getStrategy((AudioSystem::stream_type)stream) == STRATEGY_SONIFICATION) {
        AudioOutputDescriptor *outputDesc = mOutputs.valueFor(mHardwareOutput);
        LOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d",
                stream, starting, outputDesc->mDevice, stateChange);
        if (outputDesc->mRefCount[stream]) {
            int muteCount = 1;
            if (stateChange) {
                muteCount = outputDesc->mRefCount[stream];
            }
            if (AudioSystem::isLowVisibility((AudioSystem::stream_type)stream)) {
                LOGV("handleIncallSonification() low visibility, muteCount %d", muteCount);
                for (int i = 0; i < muteCount; i++) {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void AudioPolicyManagerBase::setStreamMute(int stream, bool on, audio_io_handle_t output, int delayMs)
{
    StreamDescriptor &streamDesc = mStreams[stream];
    AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);

    LOGV("setStreamMute() stream %d, mute %d, output %d, mMuteCount %d", stream, on, output, outputDesc->mMuteCount[stream]);

    if (on) {
        if (outputDesc->mMuteCount[stream] == 0) {
            if (streamDesc.mCanBeMuted) {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioPolicyManagerBase::checkAndSetVolume(int stream, int index, audio_io_handle_t output, uint32_t device, int delayMs, bool force)
{

    // do not change actual stream volume if the stream is muted
    if (mOutputs.valueFor(output)->mMuteCount[stream] != 0) {
        LOGV("checkAndSetVolume() stream %d muted count %d", stream, mOutputs.valueFor(output)->mMuteCount[stream]);
        return NO_ERROR;
    }

    // do not change in call volume if bluetooth is connected and vice versa
    if ((stream == AudioSystem::VOICE_CALL && mForceUse[AudioSystem::FOR_COMMUNICATION] == AudioSystem::FORCE_BT_SCO) ||
        (stream == AudioSystem::BLUETOOTH_SCO && mForceUse[AudioSystem::FOR_COMMUNICATION] != AudioSystem::FORCE_BT_SCO)) {
        LOGV("checkAndSetVolume() cannot set stream %d volume with force use = %d for comm",
             stream, mForceUse[AudioSystem::FOR_COMMUNICATION]);
        return INVALID_OPERATION;
    }

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
float AudioPolicyManagerBase::computeVolume(int stream, int index, audio_io_handle_t output, uint32_t device)
{
    float volume = 1.0;
    AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
    StreamDescriptor &streamDesc = mStreams[stream];

    if (device == 0) {
        device = outputDesc->device();
    }

    int volInt = (100 * (index - streamDesc.mIndexMin)) / (streamDesc.mIndexMax - streamDesc.mIndexMin);
    volume = AudioSystem::linearToLog(volInt);

    // if a headset is connected, apply the following rules to ring tones and notifications
    // to avoid sound level bursts in user's ears:
    // - always attenuate ring tones and notifications volume by 6dB
    // - if music is playing, always limit the volume to current music volume,
    // with a minimum threshold at -36dB so that notification is always perceived.
    if ((device &
        (AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP |
        AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
        AudioSystem::DEVICE_OUT_WIRED_HEADSET |
        AudioSystem::DEVICE_OUT_WIRED_HEADPHONE)) &&
        ((getStrategy((AudioSystem::stream_type)stream) == STRATEGY_SONIFICATION) ||
         (stream == AudioSystem::SYSTEM)) &&
        streamDesc.mCanBeMuted) {
        volume *= SONIFICATION_HEADSET_VOLUME_FACTOR;
        // when the phone is ringing we must consider that music could have been paused just before
        // by the music application and behave as if music was active if the last music track was
        // just stopped
        if (outputDesc->mRefCount[AudioSystem::MUSIC] || mLimitRingtoneVolume) {
            float musicVol = computeVolume(AudioSystem::MUSIC, mStreams[AudioSystem::MUSIC].mIndexCur, output, device);
            float minVol = (musicVol > SONIFICATION_HEADSET_VOLUME_MIN) ? musicVol : SONIFICATION_HEADSET_VOLUME_MIN;
            if (volume > minVol) {
                volume = minVol;
                LOGV("computeVolume limiting volume to %f musicVol %f", minVol, musicVol);
            }
        }
    }

    return volume;
}
----------------------------------------------------------------
    float volume = computeVolume(stream, index, output, device);
    // We actually change the volume if:
    // - the float value returned by computeVolume() changed
    // - the force flag is set
    if (volume != mOutputs.valueFor(output)->mCurVolume[stream] ||
            force) {
        mOutputs.valueFor(output)->mCurVolume[stream] = volume;
        LOGV("setStreamVolume() for output %d stream %d, volume %f, delay %d", output, stream, volume, delayMs);
        if (stream == AudioSystem::VOICE_CALL ||
            stream == AudioSystem::DTMF ||
            stream == AudioSystem::BLUETOOTH_SCO) {
            // offset value to reflect actual hardware volume that never reaches 0
            // 1% corresponds roughly to first step in VOICE_CALL stream volume setting (see AudioService.java)
            volume = 0.01 + 0.99 * volume;
        }
  
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioPolicyService::setStreamVolume(AudioSystem::stream_type stream,
                                             float volume,
                                             audio_io_handle_t output,
                                             int delayMs)
{
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioPolicyService::AudioCommandThread::volumeCommand(int stream,
                                                               float volume,
                                                               int output,
                                                               int delayMs)
{
    status_t status = NO_ERROR;

    AudioCommand *command = new AudioCommand();
    command->mCommand = SET_VOLUME;
    VolumeData *data = new VolumeData();
    data->mStream = stream;
    data->mVolume = volume;
    data->mIO = output;
    command->mParam = data;
    if (delayMs == 0) {
        command->mWaitStatus = true;
    } else {
        command->mWaitStatus = false;
    }
    Mutex::Autolock _l(mLock);
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// insertCommand_l() must be called with mLock held
void AudioPolicyService::AudioCommandThread::insertCommand_l(AudioCommand *command, int delayMs)
{
    ssize_t i;
    Vector <AudioCommand *> removedCommands;

    command->mTime = systemTime() + milliseconds(delayMs);

    // acquire wake lock to make sure delayed commands are processed
    if (mName != "" && mAudioCommands.isEmpty()) {
        acquire_wake_lock(PARTIAL_WAKE_LOCK, mName.string());
    }

    // check same pending commands with later time stamps and eliminate them
    for (i = mAudioCommands.size()-1; i >= 0; i--) {
        AudioCommand *command2 = mAudioCommands[i];
        // commands are sorted by increasing time stamp: no need to scan the rest of mAudioCommands
        if (command2->mTime <= command->mTime) break;
        if (command2->mCommand != command->mCommand) continue;

        switch (command->mCommand) {
        case SET_PARAMETERS: {
            ParametersData *data = (ParametersData *)command->mParam;
            ParametersData *data2 = (ParametersData *)command2->mParam;
            if (data->mIO != data2->mIO) break;
            LOGV("Comparing parameter command %s to new command %s",
                    data2->mKeyValuePairs.string(), data->mKeyValuePairs.string());
            AudioParameter param = AudioParameter(data->mKeyValuePairs);
            AudioParameter param2 = AudioParameter(data2->mKeyValuePairs);
            for (size_t j = 0; j < param.size(); j++) {
               String8 key;
               String8 value;
               param.getAt(j, key, value);
               for (size_t k = 0; k < param2.size(); k++) {
                  String8 key2;
                  String8 value2;
                  param2.getAt(k, key2, value2);
                  if (key2 == key) {
                      param2.remove(key2);
                      LOGV("Filtering out parameter %s", key2.string());
                      break;
                  }
               }
            }
            // if all keys have been filtered out, remove the command.
            // otherwise, update the key value pairs
            if (param2.size() == 0) {
                removedCommands.add(command2);
            } else {
                data2->mKeyValuePairs = param2.toString();
            }
        } break;

        case SET_VOLUME: {
            VolumeData *data = (VolumeData *)command->mParam;
            VolumeData *data2 = (VolumeData *)command2->mParam;
            if (data->mIO != data2->mIO) break;
            if (data->mStream != data2->mStream) break;
            LOGV("Filtering out volume command on output %d for stream %d",
                    data->mIO, data->mStream);
            removedCommands.add(command2);
        } break;
        case START_TONE:
        case STOP_TONE:
        default:
            break;
        }
    }

    // remove filtered commands
    for (size_t j = 0; j < removedCommands.size(); j++) {
        // removed commands always have time stamps greater than current command
        for (size_t k = i + 1; k < mAudioCommands.size(); k++) {
            if (mAudioCommands[k] == removedCommands[j]) {
                LOGV("suppressing command: %d", mAudioCommands[k]->mCommand);
                mAudioCommands.removeAt(k);
                break;
            }
        }
    }
    removedCommands.clear();

    // insert command at the right place according to its time stamp
    LOGV("inserting command: %d at index %d, num commands %d",
            command->mCommand, (int)i+1, mAudioCommands.size());
 // 将command保存到了成员变量mAudioCommands中。
 // 函数AudioPolicyService::AudioCommandThread::threadLoop中,处理mAudioCommands中的command。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
bool AudioPolicyService::AudioCommandThread::threadLoop()
{
    nsecs_t waitTime = INT64_MAX;

    mLock.lock();
    while (!exitPending())
    {
        while(!mAudioCommands.isEmpty()) {
            nsecs_t curTime = systemTime();
            // commands are sorted by increasing time stamp: execute them from index 0 and up
            if (mAudioCommands[0]->mTime <= curTime) {
                AudioCommand *command = mAudioCommands[0];
                mAudioCommands.removeAt(0);
                mLastCommand = *command;

                switch (command->mCommand) {
                case START_TONE: {
                    mLock.unlock();
                    ToneData *data = (ToneData *)command->mParam;
                    LOGV("AudioCommandThread() processing start tone %d on stream %d",
                            data->mType, data->mStream);
                    if (mpToneGenerator != NULL)
                        delete mpToneGenerator;
                    mpToneGenerator = new ToneGenerator(data->mStream, 1.0);
                    mpToneGenerator->startTone(data->mType);
                    delete data;
                    mLock.lock();
                    }break;
                case STOP_TONE: {
                    mLock.unlock();
                    LOGV("AudioCommandThread() processing stop tone");
                    if (mpToneGenerator != NULL) {
                        mpToneGenerator->stopTone();
                        delete mpToneGenerator;
                        mpToneGenerator = NULL;
                    }
                    mLock.lock();
                    }break;
                case SET_VOLUME: {
                    VolumeData *data = (VolumeData *)command->mParam;
                    LOGV("AudioCommandThread() processing set volume stream %d, \
                            volume %f, output %d", data->mStream, data->mVolume, data->mIO);
     // 调用到AudioSystem的函数中
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioSystem::setStreamVolume(int stream, float value, int output)
{
    if (uint32_t(stream) >= NUM_STREAM_TYPES) return BAD_VALUE;
    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
    if (af == 0) return PERMISSION_DENIED;
 // 又调到了audio flinger中
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioFlinger::setStreamVolume(int stream, float value, int output)
{
    // check calling permissions
    if (!settingsAllowed()) {
        return PERMISSION_DENIED;
    }

    if (stream < 0 || uint32_t(stream) >= AudioSystem::NUM_STREAM_TYPES) {
        return BAD_VALUE;
    }

    AutoMutex lock(mLock);
    PlaybackThread *thread = NULL;
    if (output) {
        thread = checkPlaybackThread_l(output);
        if (thread == NULL) {
            return BAD_VALUE;
        }
    }

 // audio flinger中保存的各stream的volume
    mStreamTypes[stream].volume = value;

    if (thread == NULL) {
        for (uint32_t i = 0; i < mPlaybackThreads.size(); i++) {
           mPlaybackThreads.valueAt(i)->setStreamVolume(stream, value);
        }
    } else {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioFlinger::PlaybackThread::setStreamVolume(int stream, float value)
{
#ifdef LVMX
    int audioOutputType = LifeVibes::getMixerType(mId, mType);
    if (LifeVibes::audioOutputTypeIsLifeVibes(audioOutputType)) {
        LifeVibes::setStreamVolume(audioOutputType, stream, value);
    }
#endif
// playback thread中保存的各stream的volume
    mStreamTypes[stream].volume = value;
    return NO_ERROR;
}
----------------------------------------------------------------
        thread->setStreamVolume(stream, value);
    }

    return NO_ERROR;
}
----------------------------------------------------------------
    af->setStreamVolume(stream, value, output);
    return NO_ERROR;
}
----------------------------------------------------------------
                    command->mStatus = AudioSystem::setStreamVolume(data->mStream,
                                                                    data->mVolume,
                                                                    data->mIO);
                    if (command->mWaitStatus) {
                        command->mCond.signal();
                        mWaitWorkCV.wait(mLock);
                    }
                    delete data;
                    }break;
                case SET_PARAMETERS: {
                     ParametersData *data = (ParametersData *)command->mParam;
                     LOGV("AudioCommandThread() processing set parameters string %s, io %d",
                             data->mKeyValuePairs.string(), data->mIO);
                     command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs);
                     if (command->mWaitStatus) {
                         command->mCond.signal();
                         mWaitWorkCV.wait(mLock);
                     }
                     delete data;
                     }break;
                case SET_VOICE_VOLUME: {
                    VoiceVolumeData *data = (VoiceVolumeData *)command->mParam;
                    LOGV("AudioCommandThread() processing set voice volume volume %f",
                            data->mVolume);
                    command->mStatus = AudioSystem::setVoiceVolume(data->mVolume);
                    if (command->mWaitStatus) {
                        command->mCond.signal();
                        mWaitWorkCV.wait(mLock);
                    }
                    delete data;
                    }break;
                default:
                    LOGW("AudioCommandThread() unknown command %d", command->mCommand);
                }
                delete command;
                waitTime = INT64_MAX;
            } else {
                waitTime = mAudioCommands[0]->mTime - curTime;
                break;
            }
        }
        // release delayed commands wake lock
        if (mName != "" && mAudioCommands.isEmpty()) {
            release_wake_lock(mName.string());
        }
        LOGV("AudioCommandThread() going to sleep");
        mWaitWorkCV.waitRelative(mLock, waitTime);
        LOGV("AudioCommandThread() waking up");
    }
    mLock.unlock();
    return false;
}
----------------------------------------------------------------
    mAudioCommands.insertAt(command, i + 1);
}
----------------------------------------------------------------
    insertCommand_l(command, delayMs);
    LOGV("AudioCommandThread() adding set volume stream %d, volume %f, output %d",
            stream, volume, output);
    mWaitWorkCV.signal();
    if (command->mWaitStatus) {
        command->mCond.wait(mLock);
        status =  command->mStatus;
        mWaitWorkCV.signal();
    }
    return status;
}
----------------------------------------------------------------
    return mAudioCommandThread->volumeCommand((int)stream, volume, (int)output, delayMs);
}
----------------------------------------------------------------
        mpClientInterface->setStreamVolume((AudioSystem::stream_type)stream, volume, output, delayMs);
    }

    if (stream == AudioSystem::VOICE_CALL ||
        stream == AudioSystem::BLUETOOTH_SCO) {
        float voiceVolume;
        // Force voice volume to max for bluetooth SCO as volume is managed by the headset
        if (stream == AudioSystem::VOICE_CALL) {
            voiceVolume = (float)index/(float)mStreams[stream].mIndexMax;
        } else {
            voiceVolume = 1.0;
        }
        if (voiceVolume != mLastVoiceVolume && output == mHardwareOutput) {
            mpClientInterface->setVoiceVolume(voiceVolume, delayMs);
            mLastVoiceVolume = voiceVolume;
        }
    }

    return NO_ERROR;
}
----------------------------------------------------------------
                checkAndSetVolume(stream, 0, output, outputDesc->device(), delayMs);
            }
        }
        // increment mMuteCount after calling checkAndSetVolume() so that volume change is not ignored
        outputDesc->mMuteCount[stream]++;
    } else {
        if (outputDesc->mMuteCount[stream] == 0) {
            LOGW("setStreamMute() unmuting non muted stream!");
            return;
        }
        if (--outputDesc->mMuteCount[stream] == 0) {
            checkAndSetVolume(stream, streamDesc.mIndexCur, output, outputDesc->device(), delayMs);
        }
    }
}
----------------------------------------------------------------
                    setStreamMute(stream, starting, mHardwareOutput);
                }
            } else {
                LOGV("handleIncallSonification() high visibility");
                if (outputDesc->device() & getDeviceForStrategy(STRATEGY_PHONE)) {
                    LOGV("handleIncallSonification() high visibility muted, muteCount %d", muteCount);
                    for (int i = 0; i < muteCount; i++) {
                        setStreamMute(stream, starting, mHardwareOutput);
                    }
                }
                if (starting) {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioPolicyService::startTone(ToneGenerator::tone_type tone,
                                       AudioSystem::stream_type stream)
{
// 应该与刚看过的set volume command类似
// command的处理中threadloop函数中,处理代码如下:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
                case START_TONE: {
                    mLock.unlock();
                    ToneData *data = (ToneData *)command->mParam;
                    LOGV("AudioCommandThread() processing start tone %d on stream %d",
                            data->mType, data->mStream);
                    if (mpToneGenerator != NULL)
                        delete mpToneGenerator;
                    mpToneGenerator = new ToneGenerator(data->mStream, 1.0);
                    mpToneGenerator->startTone(data->mType);
                    delete data;
                    mLock.lock();
                    }break;
----------------------------------------------------------------
    mTonePlaybackThread->startToneCommand(tone, stream);
    return NO_ERROR;
}
----------------------------------------------------------------
                    mpClientInterface->startTone(ToneGenerator::TONE_SUP_CALL_WAITING, AudioSystem::VOICE_CALL);
                } else {
                    mpClientInterface->stopTone();
                }
            }
        }
    }
}
----------------------------------------------------------------
        handleIncallSonification(stream, false, false);
    }

    if (outputDesc->mRefCount[stream] > 0) {
        // decrement usage count of this stream on the output
        outputDesc->changeRefCount(stream, -1);
        // store time at which the last music track was stopped - see computeVolume()
        if (stream == AudioSystem::MUSIC) {
            mMusicStopTime = systemTime();
        }

        setOutputDevice(output, getNewDevice(output));

#ifdef WITH_A2DP
        if (mA2dpOutput != 0 && !a2dpUsedForSonification() &&
                strategy == STRATEGY_SONIFICATION) {
            setStrategyMute(STRATEGY_MEDIA,
                            false,
                            mA2dpOutput,
                            mOutputs.valueFor(mHardwareOutput)->mLatency*2);
        }
#endif
        if (output != mHardwareOutput) {
            setOutputDevice(mHardwareOutput, getNewDevice(mHardwareOutput), true);
        }
        return NO_ERROR;
    } else {
        LOGW("stopOutput() refcount is already 0 for output %d", output);
        return INVALID_OPERATION;
    }
}
----------------------------------------------------------------
    return mpPolicyManager->stopOutput(output, stream, session);
}
----------------------------------------------------------------
    return aps->stopOutput(output, stream, session);
}
----------------------------------------------------------------
                    AudioSystem::stopOutput(thread->id(),
                                            (AudioSystem::stream_type)mStreamType,
                                            mSessionId);
                }
                AudioSystem::releaseOutput(thread->id());
            }
            Mutex::Autolock _l(thread->mLock);
            PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
            playbackThread->destroyTrack_l(this);
        }
    }
}
----------------------------------------------------------------

// 看看下一个使用成员变量 mStreamType 的地方是哪儿
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioFlinger::PlaybackThread::Track::start()
{
    status_t status = NO_ERROR;
    LOGV("start(%d), calling thread %d session %d",
            mName, IPCThreadState::self()->getCallingPid(), mSessionId);
    sp<ThreadBase> thread = mThread.promote();
    if (thread != 0) {
        Mutex::Autolock _l(thread->mLock);
        int state = mState;
        // here the track could be either new, or restarted
        // in both cases "unstop" the track
        if (mState == PAUSED) {
            mState = TrackBase::RESUMING;
            LOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
        } else {
            mState = TrackBase::ACTIVE;
            LOGV("? => ACTIVE (%d) on thread %p", mName, this);
        }

        if (!isOutputTrack() && state != ACTIVE && state != RESUMING) {
            thread->mLock.unlock();
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioSystem::startOutput(audio_io_handle_t output,
                                  AudioSystem::stream_type stream,
                                  int session)
{
    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
    if (aps == 0) return PERMISSION_DENIED;
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioPolicyService::startOutput(audio_io_handle_t output,
                                         AudioSystem::stream_type stream,
                                         int session)
{
    if (mpPolicyManager == NULL) {
        return NO_INIT;
    }
    LOGV("startOutput() tid %d", gettid());
    Mutex::Autolock _l(mLock);
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// 与刚看过的函数AudioPolicyManagerBase::stopOutput中调用的函数类似
status_t AudioPolicyManagerBase::startOutput(audio_io_handle_t output,
                                             AudioSystem::stream_type stream,
                                             int session)
{
    LOGV("startOutput() output %d, stream %d, session %d", output, stream, session);
    ssize_t index = mOutputs.indexOfKey(output);
    if (index < 0) {
        LOGW("startOutput() unknow output %d", output);
        return BAD_VALUE;
    }

    AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
    routing_strategy strategy = getStrategy((AudioSystem::stream_type)stream);

#ifdef WITH_A2DP
    if (mA2dpOutput != 0  && !a2dpUsedForSonification() && strategy == STRATEGY_SONIFICATION) {
        setStrategyMute(STRATEGY_MEDIA, true, mA2dpOutput);
    }
#endif

    // incremenent usage count for this stream on the requested output:
    // NOTE that the usage count is the same for duplicated output and hardware output which is
    // necassary for a correct control of hardware output routing by startOutput() and stopOutput()
    outputDesc->changeRefCount(stream, 1);

    setOutputDevice(output, getNewDevice(output));

    // handle special case for sonification while in call
    if (isInCall()) {
        handleIncallSonification(stream, true, false);
    }

    // apply volume rules for current stream and device if necessary
    checkAndSetVolume(stream, mStreams[stream].mIndexCur, output, outputDesc->device());

    return NO_ERROR;
}
----------------------------------------------------------------
    return mpPolicyManager->startOutput(output, stream, session);
}
----------------------------------------------------------------
    return aps->startOutput(output, stream, session);
}
----------------------------------------------------------------
            status = AudioSystem::startOutput(thread->id(),
                                              (AudioSystem::stream_type)mStreamType,
                                              mSessionId);
            thread->mLock.lock();
        }
        if (status == NO_ERROR) {
            PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
            playbackThread->addTrack_l(this);
        } else {
            mState = state;
        }
    } else {
        status = BAD_VALUE;
    }
    return status;
}
----------------------------------------------------------------

// 看看下一个使用成员变量 mStreamType 的地方是哪儿
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void AudioFlinger::PlaybackThread::Track::stop()
{
    LOGV("stop(%d), calling thread %d", mName, IPCThreadState::self()->getCallingPid());
    sp<ThreadBase> thread = mThread.promote();
    if (thread != 0) {
        Mutex::Autolock _l(thread->mLock);
        int state = mState;
        if (mState > STOPPED) {
            mState = STOPPED;
            // If the track is not active (PAUSED and buffers full), flush buffers
            PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
            if (playbackThread->mActiveTracks.indexOf(this) < 0) {
                reset();
            }
            LOGV("(> STOPPED) => STOPPED (%d) on thread %p", mName, playbackThread);
        }
        if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) {
            thread->mLock.unlock();
   // 调用的函数也是AudioSystem::stopOutput,刚才已看过
            AudioSystem::stopOutput(thread->id(),
                                    (AudioSystem::stream_type)mStreamType,
                                    mSessionId);
            thread->mLock.lock();
        }
    }
}
----------------------------------------------------------------

// 看看下一个使用成员变量 mStreamType 的地方是哪儿
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void AudioFlinger::PlaybackThread::Track::pause()
{
    LOGV("pause(%d), calling thread %d", mName, IPCThreadState::self()->getCallingPid());
    sp<ThreadBase> thread = mThread.promote();
    if (thread != 0) {
        Mutex::Autolock _l(thread->mLock);
        if (mState == ACTIVE || mState == RESUMING) {
            mState = PAUSING;
            LOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
            if (!isOutputTrack()) {
                thread->mLock.unlock();
    // 调用的函数也是AudioSystem::stopOutput,刚才已看过
                AudioSystem::stopOutput(thread->id(),
                                        (AudioSystem::stream_type)mStreamType,
                                        mSessionId);
                thread->mLock.lock();
            }
        }
    }
}
----------------------------------------------------------------

        mStreamType = streamType;
    
 ...
 
}
----------------------------------------------------------------
        track = new Track(this, client, streamType, sampleRate, format,
                channelCount, frameCount, sharedBuffer, sessionId);
    
    ...
 
    return track;
}
----------------------------------------------------------------
        track = thread->createTrack_l(client, streamType, sampleRate, format,
                channelCount, frameCount, sharedBuffer, lSessionId, &lStatus);

    ...
 
    return trackHandle;
}
----------------------------------------------------------------
    sp<IAudioTrack> track = audioFlinger->createTrack(getpid(),
                                                      streamType,
                                                      sampleRate,
                                                      format,
                                                      channelCount,
                                                      frameCount,
                                                      ((uint16_t)flags) << 16,
                                                      sharedBuffer,
                                                      output,
                                                      &mSessionId,
                                                      &status);

    if (track == 0) {
        LOGE("AudioFlinger could not create track, status: %d", status);
        return status;
    }
 
    ...
 
    return NO_ERROR;
}
----------------------------------------------------------------
    // create the IAudioTrack
    status_t status = createTrack(streamType, sampleRate, format, channelCount,
                                  frameCount, flags, sharedBuffer, output, true);

    if (status != NO_ERROR) {
        return status;
    }

    if (cbf != 0) {
        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
        if (mAudioTrackThread == 0) {
          LOGE("Could not create callback thread");
          return NO_INIT;
        }
    }

    mStatus = NO_ERROR;

 // 将stream type保存到成员变量中
 // 以下函数中会使用到该成员变量
 // AudioTrack::streamType - 获取stream type
 // AudioTrack::start - 如果status为DEAD_OBJECT,将调用函数createTrack创建IAudioTrack对象
 // AudioTrack::setSampleRate - 会根据stream type获取sampling rate。
 // AudioTrack::getOutput - 根据stream type获取output
 // AudioTrack::obtainBuffer - 如果status为DEAD_OBJECT,将调用函数createTrack创建IAudioTrack对象
    mStreamType = streamType;
    mFormat = format;
    mChannels = channels;
    mChannelCount = channelCount;
    mSharedBuffer = sharedBuffer;
    mMuted = false;
    mActive = 0;
    mCbf = cbf;
    mUserData = user;
    mLoopCount = 0;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    mFlags = flags;

    return NO_ERROR;
}
----------------------------------------------------------------
        lpTrack->set(
            atStreamType,// stream type
            sampleRateInHertz,
            format,// word length, PCM
            channels,
            frameCount,
            0,// flags
            audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
            0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
            0,// shared mem
            true,// thread can call Java
            sessionId);// audio session ID
            
    } else if (memoryMode == javaAudioTrackFields.MODE_STATIC) {
        // AudioTrack is using shared memory
        
        if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
            LOGE("Error creating AudioTrack in static mode: error creating mem heap base");
            goto native_init_failure;
        }
        
        lpTrack->set(
            atStreamType,// stream type
            sampleRateInHertz,
            format,// word length, PCM
            channels,
            frameCount,
            0,// flags
            audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
            0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
            lpJniStorage->mMemBase,// shared mem
            true,// thread can call Java
            sessionId);// audio session ID
    }

    ...
    
}

&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&总结&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
stream type是audio native层管理stream用的。
再往下,HAL层中,并没有stream type的概念,
如函数AudioHardwareALSA::openOutputStream的参数中并不包含stream type的信息:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    // set/get global audio parameters
    //virtual status_t    setParameters(const String8& keyValuePairs);
    //virtual String8     getParameters(const String8& keys);

    // Returns audio input buffer size according to parameters passed or 0 if one of the
    // parameters is not supported
    //virtual size_t    getInputBufferSize(uint32_t sampleRate, int format, int channels);

    /** This method creates and opens the audio hardware output stream */
    virtual AudioStreamOut* openOutputStream(
            uint32_t devices,
            int *format=0,
            uint32_t *channels=0,
            uint32_t *sampleRate=0,
            status_t *status=0);
----------------------------------------------------------------

audio flinger和play back thread分别用数组保存了各stream type的相关信息。
audio policy中保存了各stream type的strategy。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
audio flinger中定义的stream type的信息的数组:
PlaybackThread::stream_type_t       mStreamTypes[AudioSystem::NUM_STREAM_TYPES];
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        struct  stream_type_t {
            stream_type_t()
                :   volume(1.0f),
                    mute(false)
            {
            }
            float       volume;
            bool        mute;
        };
----------------------------------------------------------------
----------------------------------------------------------------
&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
0 0
原创粉丝点击