AudioTrack.cpp 源码分析
来源:互联网 发布:荒潮 知乎 编辑:程序博客网 时间:2024/05/16 03:48
AudioTrack. java 进行音频播放的相关处理, 它对应的JNI 中间层为android_media_AudioTrack.cpp ,然后到达C层AudioStack.cpp
这部分根据他们之间的对应关系来分析android_media_AudioTrack.cpp 的源码。
一个典型的AudioTracK 播放音频的实例如下:
//1. 根据音频特性来确定索要分配的缓冲区大小 int bufSize=AudioTrack.getMinBufferSize(8000, //采样率 AudioFormat.CHANNEL_COFIGURATION_STEREO,//声道数 AudioFormat.ENCODING_PCM_16BIT //采样精度,一个采样点16bit,两个字节 ); //2.创建 AudioTrack AudioTrack track=new AudioTrack( AudioManager.STREAM_MUSIC, // 音频流类型 8000,AudioFormat.CHANNEL_COFIGURATION_STEREO, AudioFormat.ENCODING_PCM_16BIT,bufSize, AudioTrack.MODE_STREAM //数据加载模式, 非静态加载 ); //3.开始播放 track.play(); ...... //4,写入播放数据 track.write(audiodata,0,length); ...... //5.停止播放, 释放资源 track.stop(); // 停止播放 track.release();// 释放底层资源
一 获得缓冲区最小值
在AudioTrack.java 中
static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) { int channelCount = 0; switch(channelConfig) { case AudioFormat.CHANNEL_OUT_MONO: case AudioFormat.CHANNEL_CONFIGURATION_MONO: // 单声道 channelCount = 1; break; case AudioFormat.CHANNEL_OUT_STEREO: case AudioFormat.CHANNEL_CONFIGURATION_STEREO: // 双声道 channelCount = 2; break; default:...... int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat); if (size <= 0) { loge("getMinBufferSize(): error querying hardware"); return ERROR; } else { return size; }
channelCount 对应着声道数
在android_media_AudioTrack.cpp 中,
static jint android_media_AudioTrack_get_min_buff_size(JNIEnv *env, jobject thiz, jint sampleRateInHertz, jint channelCount, jint audioFormat) { size_t frameCount; const status_t status = AudioTrack::getMinFrameCount(&frameCount, AUDIO_STREAM_DEFAULT, sampleRateInHertz); if (status != NO_ERROR) { ALOGE("AudioTrack::getMinFrameCount() for sample rate %d failed with status %d", sampleRateInHertz, status); return -1; } const audio_format_t format = audioFormatToNative(audioFormat); if (audio_has_proportional_frames(format)) { const size_t bytesPerSample = audio_bytes_per_sample(format); return frameCount * channelCount * bytesPerSample; } else { return frameCount; }}
二 建立AudioTrack. 对象
在AudioTrack中
public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode) throws IllegalArgumentException { this(streamType, sampleRateInHz, channelConfig, audioFormat, bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE); }
public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode, int sessionId) throws IllegalArgumentException { // mState already == STATE_UNINITIALIZED this((new AudioAttributes.Builder()) .setLegacyStreamType(streamType) .build(), (new AudioFormat.Builder()) .setChannelMask(channelConfig) .setEncoding(audioFormat) .setSampleRate(sampleRateInHz) .build(), bufferSizeInBytes, mode, sessionId); }
public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId) throws IllegalArgumentException { super(attributes); // mState already == STATE_UNINITIALIZED int channelIndexMask = 0; if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) { channelIndexMask = format.getChannelIndexMask(); } int channelMask = 0; if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) { channelMask = format.getChannelMask(); } else if (channelIndexMask == 0) { // if no masks at all, use stereo channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; } int encoding = AudioFormat.ENCODING_DEFAULT; if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) { encoding = format.getEncoding(); } audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode); mStreamType = AudioSystem.STREAM_DEFAULT; audioBuffSizeCheck(bufferSizeInBytes); mInitializationLooper = looper; if (sessionId < 0) { throw new IllegalArgumentException("Invalid audio session ID: "+sessionId); } int[] sampleRate = new int[] {mSampleRate}; int[] session = new int[1]; session[0] = sessionId; // AudioTrack 在JNI 的初始化 int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes, sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat, mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/); if (initResult != SUCCESS) { loge("Error code "+initResult+" when initializing AudioTrack."); return; // with mState == STATE_UNINITIALIZED } mSampleRate = sampleRate[0]; mSessionId = session[0]; if (mDataLoadMode == MODE_STATIC) { mState = STATE_NO_STATIC_DATA; } else { mState = STATE_INITIALIZED; } }
重点看一下JNI层的初始化
int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/);
JNI的初始化
// ----------------------------------------------------------------------------int register_android_media_AudioTrack(JNIEnv *env){ // must be first int res = RegisterMethodsOrDie(env, kClassPathName, gMethods, NELEM(gMethods)); javaAudioTrackFields.nativeTrackInJavaObj = NULL; javaAudioTrackFields.postNativeEventInJava = NULL; // Get the AudioTrack class // audioTrackClass 对应于AudioTrack.java 对象 jclass audioTrackClass = FindClassOrDie(env, kClassPathName); // Get the postEvent method // postNativeEventInJava对应于AudioTrack.java 的postEventFromNative 方法 javaAudioTrackFields.postNativeEventInJava = GetStaticMethodIDOrDie(env, audioTrackClass, JAVA_POSTEVENT_CALLBACK_NAME, "(Ljava/lang/Object;IIILjava/lang/Object;)V"); // Get the variables fields // nativeTrackInJavaObj // nativeTrackInJavaObj对应于AudioTrack.java 的变量mNativeTrackInJavaObj javaAudioTrackFields.nativeTrackInJavaObj = GetFieldIDOrDie(env, audioTrackClass, JAVA_NATIVETRACKINJAVAOBJ_FIELD_NAME, "J"); // jniData // nativeTrackInJavaObj对应于AudioTrack.java 的变量mJniData javaAudioTrackFields.jniData = GetFieldIDOrDie(env, audioTrackClass, JAVA_JNIDATA_FIELD_NAME, "J"); // fieldStreamType // nativeTrackInJavaObj对应于AudioTrack.java 的变量mStreamType javaAudioTrackFields.fieldStreamType = GetFieldIDOrDie(env, audioTrackClass, JAVA_STREAMTYPE_FIELD_NAME, "I"); env->DeleteLocalRef(audioTrackClass); // Get the AudioAttributes class and fields // audioAttrClass 对应于AudioAttributes.java 对象 jclass audioAttrClass = FindClassOrDie(env, kAudioAttributesClassPathName); //fieldUsage 对应于AudioAttributes.java 的变量mUsage javaAudioAttrFields.fieldUsage = GetFieldIDOrDie(env, audioAttrClass, "mUsage", "I"); // 得到class:audioAttrClass 的对应域的ID //fieldUsage 对应于AudioAttributes.java 的变量mContentType javaAudioAttrFields.fieldContentType = GetFieldIDOrDie(env, audioAttrClass, "mContentType", "I"); //fieldUsage 对应于AudioAttributes.java 的变量mFlags javaAudioAttrFields.fieldFlags = GetFieldIDOrDie(env, audioAttrClass, "mFlags", "I"); //fieldUsage 对应于AudioAttributes.java 的变量mFormattedTags javaAudioAttrFields.fieldFormattedTags = GetFieldIDOrDie(env, audioAttrClass, "mFormattedTags", "Ljava/lang/String;"); env->DeleteLocalRef(audioAttrClass); // initialize PlaybackParams field info gPlaybackParamsFields.init(env); return res;}
AudioTrack 初始化。
static jintandroid_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this, jobject jaa, jintArray jSampleRate, jint channelPositionMask, jint channelIndexMask, jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession, jlong nativeAudioTrack) { ALOGV("sampleRates=%p, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d" "nativeAudioTrack=0x%llX", jSampleRate, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes, nativeAudioTrack); sp<AudioTrack> lpTrack = 0; // 获得指向数组jSession内容的指针 jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL); if (nSession == NULL) { ALOGE("Error creating AudioTrack: Error retrieving session id pointer"); return (jint) AUDIO_JAVA_ERROR; } //获取数组的第一个元素 audio_session_t sessionId = (audio_session_t) nSession[0]; //释放指向数组的指针 env->ReleasePrimitiveArrayCritical(jSession, nSession, 0); nSession = NULL; AudioTrackJniStorage* lpJniStorage = NULL; audio_attributes_t *paa = NULL; jclass clazz = env->GetObjectClass(thiz); if (clazz == NULL) { ALOGE("Can't find %s when setting up callback.", kClassPathName); return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED; } // if we pass in an existing *Native* AudioTrack, we don't need to create/initialize one. //第一次初始化 if (nativeAudioTrack == 0) { if (jaa == 0) { ALOGE("Error creating AudioTrack: invalid audio attributes"); return (jint) AUDIO_JAVA_ERROR; } if (jSampleRate == 0) { ALOGE("Error creating AudioTrack: invalid sample rates"); return (jint) AUDIO_JAVA_ERROR; } // 获得采样率 int* sampleRates = env->GetIntArrayElements(jSampleRate, NULL); int sampleRateInHertz = sampleRates[0]; env->ReleaseIntArrayElements(jSampleRate, sampleRates, JNI_ABORT); // Invalid channel representations are caught by !audio_is_output_channel() below. audio_channel_mask_t nativeChannelMask = nativeChannelMaskFromJavaChannelMasks( channelPositionMask, channelIndexMask); if (!audio_is_output_channel(nativeChannelMask)) { ALOGE("Error creating AudioTrack: invalid native channel mask %#x.", nativeChannelMask); return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK; } uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask); // check the format. // This function was called from Java, so we compare the format against the Java constants audio_format_t format = audioFormatToNative(audioFormat); if (format == AUDIO_FORMAT_INVALID) { ALOGE("Error creating AudioTrack: unsupported audio format %d.", audioFormat); return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT; } // compute the frame count size_t frameCount; if (audio_is_linear_pcm(format)) { const size_t bytesPerSample = audio_bytes_per_sample(format); frameCount = buffSizeInBytes / (channelCount * bytesPerSample); } else { frameCount = buffSizeInBytes; } // 1.建立AudioTrack lpTrack = new AudioTrack(); // read the AudioAttributes values paa = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t)); /* 2. *jaa 对应Java层的AudioAttributes, *将AudioAttributes 属性赋予audio_attributes_t */ const jstring jtags = (jstring) env->GetObjectField(jaa, javaAudioAttrFields.fieldFormattedTags); const char* tags = env->GetStringUTFChars(jtags, NULL); // copying array size -1, char array for tags was calloc'd, no need to NULL-terminate it strncpy(paa->tags, tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1); env->ReleaseStringUTFChars(jtags, tags); paa->usage = (audio_usage_t) env->GetIntField(jaa, javaAudioAttrFields.fieldUsage); paa->content_type = (audio_content_type_t) env->GetIntField(jaa, javaAudioAttrFields.fieldContentType); paa->flags = env->GetIntField(jaa, javaAudioAttrFields.fieldFlags); ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s", paa->usage, paa->content_type, paa->flags, paa->tags); // initialize the callback information: // this data will be passed with every AudioTrack callback lpJniStorage = new AudioTrackJniStorage(); lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz); // we use a weak reference so the AudioTrack object can be garbage collected. lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this); lpJniStorage->mCallbackData.busy = false; // initialize the native AudioTrack object //3. 根据memoryMode来进行AudioTrack 的配置 status_t status = NO_ERROR; switch (memoryMode) { case MODE_STREAM: status = lpTrack->set( AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument) sampleRateInHertz, format,// word length, PCM nativeChannelMask, frameCount, AUDIO_OUTPUT_FLAG_NONE, audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user) 0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack 0,// shared mem true,// thread can call Java sessionId,// audio session ID AudioTrack::TRANSFER_SYNC, NULL, // default offloadInfo -1, -1, // default uid, pid values paa); break; case MODE_STATIC: // AudioTrack is using shared memory if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) { ALOGE("Error creating AudioTrack in static mode: error creating mem heap base"); goto native_init_failure; } status = lpTrack->set( AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument) sampleRateInHertz, format,// word length, PCM nativeChannelMask, frameCount, AUDIO_OUTPUT_FLAG_NONE, audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)); 0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack lpJniStorage->mMemBase,// shared mem true,// thread can call Java sessionId,// audio session ID AudioTrack::TRANSFER_SHARED, NULL, // default offloadInfo -1, -1, // default uid, pid values paa); break; default: ALOGE("Unknown mode %d", memoryMode); goto native_init_failure; } if (status != NO_ERROR) { ALOGE("Error %d initializing AudioTrack", status); goto native_init_failure; } } else { // end if (nativeAudioTrack == 0) lpTrack = (AudioTrack*)nativeAudioTrack; // TODO: We need to find out which members of the Java AudioTrack might
这里主要说明一下
1. 初始化AudioTrack
AudioTrack::AudioTrack() : mStatus(NO_INIT), mState(STATE_STOPPED), mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT), mPausedPosition(0), mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE){ mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN; mAttributes.usage = AUDIO_USAGE_UNKNOWN; mAttributes.flags = 0x0; strcpy(mAttributes.tags, "");}
这里初始化了AudioStrack 的状态,暂停位置为0,和audio_attributes_t,audio_attributes_t 对应于java层的AudioAttributes。
typedef struct { audio_content_type_t content_type; audio_usage_t usage; audio_source_t source; audio_flags_mask_t flags; char tags[AUDIO_ATTRIBUTES_TAGS_MAX_SIZE]; /* UTF8 */} audio_attributes_t;对应java 层的AudioAttributes
2. 下面要将java层的AudioAttributes 映射到C层。audio_attributes_fields_t 保存了AudioAttributes 的域名在C层的对应ID(在
register_android_media_AudioTrack方法中初始化)
struct audio_attributes_fields_t { jfieldID fieldUsage; // AudioAttributes.mUsage jfieldID fieldContentType; // AudioAttributes.mContentType jfieldID fieldFlags; // AudioAttributes.mFlags jfieldID fieldFormattedTags;// AudioAttributes.mFormattedTags};
static audio_attributes_fields_t javaAudioAttrFields
AudioTrackJniStorage 是实现一种共享内存机制, 当静态播放时, 用共享内存保存数据。
动态播放时不用。
3. 配置AudioTrack
配置AudipoTrack的函数定义为
status_t set(audio_stream_type_t streamType, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, size_t frameCount = 0, audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE, callback_t cbf = NULL, void* user = NULL, int32_t notificationFrames = 0, const sp<IMemory>& sharedBuffer = 0, bool threadCanCallJava = false, audio_session_t sessionId = AUDIO_SESSION_ALLOCATE, transfer_type transferType = TRANSFER_DEFAULT, const audio_offload_info_t *offloadInfo = NULL, int uid = -1, pid_t pid = -1, const audio_attributes_t* pAttributes = NULL, bool doNotReconnect = false, float maxRequiredSpeed = 1.0f);
status_t AudioTrack::set 重要的是调用了createTrack_l 函数, 得到了指针sp<IAudioTrack> mAudioTrack。
然后生成mProxy 对象管理共享内存(AudioTrack 写入数据, AudioFlinger 读取数据)
// must be called with mLock heldstatus_t AudioTrack::createTrack_l(){ /* *得到AudioFlinger 的Binder 代理端BpAudioFliger */ const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger(); /* * audio_io_handle是一个int 类型,通过typedef 定义,这个值的来历非常复杂, * 涉及AudioFlinger 和AudioPolicyService,这个值主要被AudioFliger 使用,用 * 来标识内部的工作线程索引号. AudioFlinger 会根据情况创建几个工作线程, * 下面的AudioSystem::getOutputforAttr 根据流类型等其他参数最终选择一个合适的 * 工作线程,并返回它在AF 中的索引号。 */ audio_io_handle_t output; audio_stream_type_t streamType = mStreamType; audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;...... // Client can only express a preference for FAST. Server will perform additional tests. if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { bool useCaseAllowed = // either of these use cases: // use case 1: shared buffer (mSharedBuffer != 0) || // use case 2: callback transfer mode (mTransfer == TRANSFER_CALLBACK) || // use case 3: obtain/release mode (mTransfer == TRANSFER_OBTAIN) || // use case 4: synchronous write ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava); // sample rates must also match bool fastAllowed = useCaseAllowed && (mSampleRate == mAfSampleRate); if (!fastAllowed) { ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, " "track %u Hz, output %u Hz", mTransfer, mSampleRate, mAfSampleRate); mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST); } } mNotificationFramesAct = mNotificationFramesReq; size_t frameCount = mReqFrameCount; if (!audio_has_proportional_frames(mFormat)) { if (mSharedBuffer != 0) { // Same comment as below about ignoring frameCount parameter for set() frameCount = mSharedBuffer->size(); } else if (frameCount == 0) { frameCount = mAfFrameCount; } if (mNotificationFramesAct != frameCount) { mNotificationFramesAct = frameCount; } } else if (mSharedBuffer != 0) { // FIXME: Ensure client side memory buffers need // not have additional alignment beyond sample // (e.g. 16 bit stereo accessed as 32 bit frame). size_t alignment = audio_bytes_per_sample(mFormat); if (alignment & 1) { // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java). alignment = 1; } if (mChannelCount > 1) { // More than 2 channels does not require stronger alignment than stereo alignment <<= 1; } if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) { ALOGE("Invalid buffer alignment: address %p, channel count %u", mSharedBuffer->pointer(), mChannelCount); status = BAD_VALUE; goto release; } // When initializing a shared buffer AudioTrack via constructors, // there's no frameCount parameter. // But when initializing a shared buffer AudioTrack via set(), // there _is_ a frameCount parameter. We silently ignore it. frameCount = mSharedBuffer->size() / mFrameSize; } else { size_t minFrameCount = 0; // For fast tracks the frame count calculations and checks are mostly done by server, // but we try to respect the application's request for notifications per buffer. if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { if (mNotificationsPerBufferReq > 0) { // Avoid possible arithmetic overflow during multiplication. // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely. if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) { ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu", mNotificationsPerBufferReq, afFrameCountHAL); } else { minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq; } } } else { // for normal tracks precompute the frame count based on speed. const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f : max(mMaxRequiredSpeed, mPlaybackRate.mSpeed); minFrameCount = calculateMinFrameCount( mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate, speed /*, 0 mNotificationsPerBufferReq*/); } if (frameCount < minFrameCount) { frameCount = minFrameCount; } } IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT; if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { trackFlags |= IAudioFlinger::TRACK_OFFLOAD; } if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) { trackFlags |= IAudioFlinger::TRACK_DIRECT; } size_t temp = frameCount; // temp may be replaced by a revised value of frameCount, // but we will still need the original value also audio_session_t originalSessionId = mSessionId; /*1. * 向AF发送CreatTrack请求,该函数返回IAudioTrack(实际类型是BpAudioTrack)对象,后续 * AF与AT 的交互通过track 进行 * 在介绍AudioFlinger.cpp 时在介绍 */ sp<IAudioTrack> track = audioFlinger->createTrack(streamType, mSampleRate, mFormat, mChannelMask, &temp, &trackFlags, mSharedBuffer, output, mClientPid, tid, &mSessionId, mClientUid, &status); ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId, "session ID changed from %d to %d", originalSessionId, mSessionId); if (status != NO_ERROR) { ALOGE("AudioFlinger could not create track, status: %d", status); goto release; } ALOG_ASSERT(track != 0); // AudioFlinger now owns the reference to the I/O handle, // so we are no longer responsible for releasing it. // FIXME compare to AudioRecord // 获得AT 端的共享内存 sp<IMemory> iMem = track->getCblk(); if (iMem == 0) { ALOGE("Could not get control block"); return NO_INIT; } // 获得共享内存的首地址 void *iMemPointer = iMem->pointer(); if (iMemPointer == NULL) { ALOGE("Could not get control block pointer"); return NO_INIT; } // invariant that mAudioTrack != 0 is true only after set() returns successfully //清理原来的mAudioTrack if (mAudioTrack != 0) { IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this); mDeathNotifier.clear(); } mAudioTrack = track; mCblkMemory = iMem; IPCThreadState::self()->flushCommands(); /* *将类型void* 类型转成audio_track_cdlk_t ,表明这块内存的首部中存在audio_track_cdlk_t这个对象。 */ audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer); mCblk = cblk; // note that temp is the (possibly revised) value of frameCount if (temp < frameCount || (frameCount == 0 && temp == 0)) { // In current design, AudioTrack client checks and ensures frame count validity before // passing it to AudioFlinger so AudioFlinger should not return a different value except // for fast track as it uses a special method of assigning frame count. ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp); } frameCount = temp; mAwaitBoost = false; if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { if (trackFlags & IAudioFlinger::TRACK_FAST) { ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount); if (!mThreadCanCallJava) { mAwaitBoost = true; } } else { ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount); mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST); } } // Make sure that application is notified with sufficient margin before underrun. // The client can divide the AudioTrack buffer into sub-buffers, // and expresses its desire to server as the notification frame count. if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) { size_t maxNotificationFrames; if (trackFlags & IAudioFlinger::TRACK_FAST) { // notify every HAL buffer, regardless of the size of the track buffer maxNotificationFrames = afFrameCountHAL; } else { // For normal tracks, use at least double-buffering if no sample rate conversion, // or at least triple-buffering if there is sample rate conversion const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3; maxNotificationFrames = frameCount / nBuffering; } if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) { if (mNotificationFramesAct == 0) { ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu", maxNotificationFrames, frameCount); } else { ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu", mNotificationFramesAct, maxNotificationFrames, frameCount); } mNotificationFramesAct = (uint32_t) maxNotificationFrames; } } // We retain a copy of the I/O handle, but don't own the reference mOutput = output; mRefreshRemaining = true; // Starting address of buffers in shared memory. If there is a shared buffer, buffers // is the value of pointer() for the shared buffer, otherwise buffers points // immediately after the control block. This address is for the mapping within client // address space. AudioFlinger::TrackBase::mBuffer is for the server address space. void* buffers; if (mSharedBuffer == 0) { buffers = cblk + 1; } else { buffers = mSharedBuffer->pointer(); if (buffers == NULL) { ALOGE("Could not get buffer pointer"); return NO_INIT; } } mAudioTrack->attachAuxEffect(mAuxEffectId); // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack) // FIXME don't believe this lie mLatency = mAfLatency + (1000*frameCount) / mSampleRate; mFrameCount = frameCount; // If IAudioTrack is re-created, don't let the requested frameCount // decrease. This can confuse clients that cache frameCount(). if (frameCount > mReqFrameCount) { mReqFrameCount = frameCount; } // reset server position to 0 as we have new cblk. mServer = 0; // update proxy //2.生成共享内存的客户端,// 动态播放 用AudioTrackClientProxy, 用来管理共享内存,AudioTrack 向其写数据, AF获得数据播放 if (mSharedBuffer == 0) { mStaticProxy.clear(); mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize); } else { //model_Stream 用StaticAudioTrackClientProxy mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize); mProxy = mStaticProxy; } mProxy->setVolumeLR(gain_minifloat_pack( gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]), gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT]))); mProxy->setSendLevel(mSendLevel); const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch); const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch); const float effectivePitch = adjustPitch(mPlaybackRate.mPitch); mProxy->setSampleRate(effectiveSampleRate); AudioPlaybackRate playbackRateTemp = mPlaybackRate; playbackRateTemp.mSpeed = effectiveSpeed; playbackRateTemp.mPitch = effectivePitch; mProxy->setPlaybackRate(playbackRateTemp); mProxy->setMinimum(mNotificationFramesAct); mDeathNotifier = new DeathNotifier(this); IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this); if (mDeviceCallback != 0) { AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput); } return NO_ERROR; }release: AudioSystem::releaseOutput(output, streamType, mSessionId); if (status == NO_ERROR) { status = NO_INIT; } return status;}
但是这里要介绍一个函数参数callback_t。
call_back_t 在AudioTrack.cpp 中是一个函数指针,
typedef void (*callback_t)(int event, void* user, void *info);
给此指针赋值为:audioCallback,
static void audioCallback(int event, void* user, void *info) { audiotrack_callback_cookie *callbackInfo = (audiotrack_callback_cookie *)user; { Mutex::Autolock l(sLock); if (sAudioTrackCallBackCookies.indexOf(callbackInfo) < 0) { return; } callbackInfo->busy = true; } switch (event) { case AudioTrack::EVENT_MARKER: { JNIEnv *env = AndroidRuntime::getJNIEnv(); if (user != NULL && env != NULL) { env->CallStaticVoidMethod( callbackInfo->audioTrack_class, javaAudioTrackFields.postNativeEventInJava, callbackInfo->audioTrack_ref, event, 0,0, NULL); if (env->ExceptionCheck()) { env->ExceptionDescribe(); env->ExceptionClear(); } } } break; case AudioTrack::EVENT_NEW_POS: { JNIEnv *env = AndroidRuntime::getJNIEnv(); if (user != NULL && env != NULL) { env->CallStaticVoidMethod( callbackInfo->audioTrack_class, javaAudioTrackFields.postNativeEventInJava, callbackInfo->audioTrack_ref, event, 0,0, NULL); if (env->ExceptionCheck()) { env->ExceptionDescribe(); env->ExceptionClear(); } } } break; } { Mutex::Autolock l(sLock); callbackInfo->busy = false; callbackInfo->cond.broadcast(); }}
此函数的核心就是env->CallStaticVoidMethod函数, 这是一种JNI 调用java 层静态方法的方式。
函数原型 void CallStaticVoidMethod(jclass clazz, jmethodID methodID, ...) C++ 格式
void (*CallStaticVoidMethod)(JNIEnv*, jclass, jmethodID, ...) C格式
这里用的是C++ 格式,一会我们会看到
callbackInfo->audioTrack_class 对应于class AudioTrack.java
javaAudioTrackFields.postNativeEventInJava = GetStaticMethodIDOrDie(env, audioTrackClass, JAVA_POSTEVENT_CALLBACK_NAME, "(Ljava/lang/Object;IIILjava/lang/Object;)V")
#define JAVA_POSTEVENT_CALLBACK_NAME "postEventFromNative"
从以上代码 看到javaAudioRecordFields.postNativeEventInJava 对应法方法ID为
audioTrack.postEventFromNative(Object audiotrack_ref,int what, int arg1, int arg2, Object obj) 方法的ID.
所以env->CallStaticVoidMethod会将参数提交AudioStrack.java的postEventFromNative 方法处理。
那么这个回调在AudioTrack.cpp 中是如何触发的呢?
if (cbf != NULL) { mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava); mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/); // thread begins in paused state, and will not reference us until start() }
AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava) : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL), mIgnoreNextPausedInt(false){}AudioTrack::AudioTrackThread::~AudioTrackThread(){}bool AudioTrack::AudioTrackThread::threadLoop(){ { AutoMutex _l(mMyLock); if (mPaused) { mMyCond.wait(mMyLock); // caller will check for exitPending() return true; } if (mIgnoreNextPausedInt) { mIgnoreNextPausedInt = false; mPausedInt = false; } if (mPausedInt) { if (mPausedNs > 0) { (void) mMyCond.waitRelative(mMyLock, mPausedNs); } else { mMyCond.wait(mMyLock); } mPausedInt = false; return true; } } if (exitPending()) { return false; } nsecs_t ns = mReceiver.processAudioBuffer(); switch (ns) { case 0: return true; case NS_INACTIVE: pauseInternal(); return true; case NS_NEVER: return false; case NS_WHENEVER: // Event driven: call wake() when callback notifications conditions change. ns = INT64_MAX; // fall through default: LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns); pauseInternal(ns); return true; }
当线程调用mReceiver.processAudioBuffer()方法时会触发回调。
但是processAudioBuffer 是干什么用的,
三 play
AudioTrack.play对应于JNI 的native_start() 方法,调用AT的start方法。
status_t AudioTrack::start(){ AutoMutex lock(mLock); if (mState == STATE_ACTIVE) { return INVALID_OPERATION; } mInUnderrun = true; //更新播放状态 State previousState = mState; if (previousState == STATE_PAUSED_STOPPING) { mState = STATE_STOPPING; } else { mState = STATE_ACTIVE; } // 更新服务器播放的帧(frames):Modulo<uint32_t> mServer //mProxy->getPosition()获得 (void) updateAndGetPosition_l(); if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) { // reset current position as seen by client to 0 mPosition = 0; mPreviousTimestampValid = false; mTimestampStartupGlitchReported = false; mRetrogradeMotionReported = false; mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID; // read last server side position change via timestamp. ExtendedTimestamp ets; if (mProxy->getTimestamp(&ets) == OK && ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) { // Server side has consumed something, but is it finished consuming? // It is possible since flush and stop are asynchronous that the server // is still active at this point. ALOGV("start: server read:%lld cumulative flushed:%lld client written:%lld", (long long)(mFramesWrittenServerOffset + ets.mPosition[ExtendedTimestamp::LOCATION_SERVER]), (long long)ets.mFlushed, (long long)mFramesWritten); mFramesWrittenServerOffset = -ets.mPosition[ExtendedTimestamp::LOCATION_SERVER]; } mFramesWritten = 0; mProxy->clearTimestamp(); // need new server push for valid timestamp mMarkerReached = false; // For offloaded tracks, we don't know if the hardware counters are really zero here, // since the flush is asynchronous and stop may not fully drain. // We save the time when the track is started to later verify whether // the counters are realistic (i.e. start from zero after this time). mStartUs = getNowUs(); // force refresh of remaining frames by processAudioBuffer() as last // write before stop could be partial. mRefreshRemaining = true; } mNewPosition = mPosition + mUpdatePeriod; int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags); status_t status = NO_ERROR; if (!(flags & CBLK_INVALID)) { // 调用playBackThread:Thrack.start() status = mAudioTrack->start(); if (status == DEAD_OBJECT) { flags |= CBLK_INVALID; } } if (flags & CBLK_INVALID) { status = restoreTrack_l("start"); } // resume or pause the callback thread as needed. sp<AudioTrackThread> t = mAudioTrackThread; if (status == NO_ERROR) { if (t != 0) { if (previousState == STATE_STOPPING) { mProxy->interrupt(); } else { t->resume(); } } else { mPreviousPriority = getpriority(PRIO_PROCESS, 0); get_sched_policy(0, &mPreviousSchedulingGroup); androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO); } } else { ALOGE("start() status %d", status); mState = previousState; if (t != 0) { if (previousState != STATE_STOPPING) { t->pause(); } } else { setpriority(PRIO_PROCESS, 0, mPreviousPriority); set_sched_policy(0, mPreviousSchedulingGroup); } } return status;}
四 write
android_media_AudioTrack.cpp 的android_media_AudioTrack_write_native_bytes 函数调用了writeToTrack函数。
template <typename T>static jint writeToTrack(const sp<AudioTrack>& track, jint audioFormat, const T *data, jint offsetInSamples, jint sizeInSamples, bool blocking) { // give the data to the native AudioTrack object (the data starts at the offset) ssize_t written = 0; // regular write() or copy the data to the AudioTrack's shared memory? size_t sizeInBytes = sizeInSamples * sizeof(T); //MODE_STREAM, AudioTrack.cpp 不存在shareBuffer if (track->sharedBuffer() == 0) { written = track->write(data + offsetInSamples, sizeInBytes, blocking); // for compatibility with earlier behavior of write(), return 0 in this case if (written == (ssize_t) WOULD_BLOCK) { written = 0; } } else { // writing to shared memory, check for capacity // ,MODE_STATIC, AudioTrack.cpp 存在shareBuffer, 则直接用memcpy 函数拷贝即可。 if ((size_t)sizeInBytes > track->sharedBuffer()->size()) { sizeInBytes = track->sharedBuffer()->size(); } memcpy(track->sharedBuffer()->pointer(), data + offsetInSamples, sizeInBytes); written = sizeInBytes; } if (written >= 0) { return written / sizeof(T); } return interpretWriteSizeError(written);}
下面值讨论一下MODE_STREAM格式的写入。
AudioTrack 与AudioFlinger 公用一块内存,这里写入数据就是AudioTrack.cpp 向内存中写入数据的过程。
这块内存的写入和读出 由AudioTrackShared.cpp 控制。AudioTrack的AudioTrackClientProxy或者StaticAudioTrackServerProxy 对象
就是在AudioTrackShared.cpp 中实现。
ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking){ if (mTransfer != TRANSFER_SYNC) { return INVALID_OPERATION; } if (isDirect()) { AutoMutex lock(mLock); int32_t flags = android_atomic_and( ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags); if (flags & CBLK_INVALID) { return DEAD_OBJECT; } } if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) { // Sanity-check: user is most-likely passing an error code, and it would // make the return value ambiguous (actualSize vs error). ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize); return BAD_VALUE; } size_t written = 0; Buffer audioBuffer; while (userSize >= mFrameSize) { audioBuffer.frameCount = userSize / mFrameSize; /* 申请共享内存, 用sp<AudioTrackClientProxy> mProxy 申请 *在AudioTrackShared.cpp 中实现 */ status_t err = obtainBuffer(&audioBuffer, blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking); if (err < 0) { if (written > 0) { break; } return ssize_t(err); } size_t toWrite = audioBuffer.size; // 将数据拷贝进共享内存 memcpy(audioBuffer.i8, buffer, toWrite); buffer = ((const char *) buffer) + toWrite; userSize -= toWrite; written += toWrite; // 更新AudioTrackShared.cpp 中的读写位置 releaseBuffer(&audioBuffer); } if (written > 0) { mFramesWritten += written / mFrameSize; } return written;}
5 stop
void AudioTrack::stop(){ AutoMutex lock(mLock); if (isOffloaded_l()) { mState = STATE_STOPPING; } else { mState = STATE_STOPPED; mReleased = 0; } //共享内存客户端处理 mProxy->interrupt(); //Track的处理 mAudioTrack->stop(); // Note: legacy handling - stop does not clear playback marker // and periodic update counter, but flush does for streaming tracks. if (mSharedBuffer != 0) { // clear buffer position and loop count. mStaticProxy->setBufferPositionAndLoop(0 /* position */, 0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */); } // PlayBack sp<AudioTrackThread> t = mAudioTrackThread; if (t != 0) { if (!isOffloaded_l()) { t->pause(); } } else { setpriority(PRIO_PROCESS, 0, mPreviousPriority); set_sched_policy(0, mPreviousSchedulingGroup); }}
6. release 方法
public void release() { // even though native_release() stops the native AudioTrack, we need to stop // AudioTrack subclasses too. try { stop(); } catch(IllegalStateException ise) { // don't raise an exception, we're releasing the resources. } baseRelease(); native_release(); mState = STATE_UNINITIALIZED; }
#define CALLBACK_COND_WAIT_TIMEOUT_MS 1000static void android_media_AudioTrack_release(JNIEnv *env, jobject thiz) { sp<AudioTrack> lpTrack = setAudioTrack(env, thiz, 0); if (lpTrack == NULL) { return; } //ALOGV("deleting lpTrack: %x\n", (int)lpTrack); // delete the JNI data AudioTrackJniStorage* pJniStorage = (AudioTrackJniStorage *)env->GetLongField( thiz, javaAudioTrackFields.jniData); // reset the native resources in the Java object so any attempt to access // them after a call to release fails. env->SetLongField(thiz, javaAudioTrackFields.jniData, 0); if (pJniStorage) { Mutex::Autolock l(sLock); audiotrack_callback_cookie *lpCookie = &pJniStorage->mCallbackData; //ALOGV("deleting pJniStorage: %x\n", (int)pJniStorage); while (lpCookie->busy) { if (lpCookie->cond.waitRelative(sLock, milliseconds(CALLBACK_COND_WAIT_TIMEOUT_MS)) != NO_ERROR) { break; } } sAudioTrackCallBackCookies.remove(lpCookie); // delete global refs created in native_setup env->DeleteGlobalRef(lpCookie->audioTrack_class); env->DeleteGlobalRef(lpCookie->audioTrack_ref); delete pJniStorage; }}
- 分析源码 AudioTrack.cpp
- AudioTrack.cpp 源码分析
- audiosystem分析之audiotrack源码
- AudioTrack分析
- AudioTrack分析
- AudioTrack分析
- AudioTrack分析
- AudioTrack分析
- AudioTrack分析
- AudioTrack 分析
- audiotrack分析
- caffe源码分析:softmax_layer.cpp && softmax_loss_layer.cpp
- Android音频子系统源码分析之AudioTrack的使用
- caffe源码分析--data_layer.cpp
- caffe源码分析--softmax_layer.cpp
- caffe源码分析--poolinger_layer.cpp
- Caffe源码:io.cpp 分析
- Android AudioTrack分析
- ZOJ 3469 区间DP
- Android沉浸式状态栏三种方式的实现
- 网易云音乐唱片机效果
- python-os
- Java关键字default
- AudioTrack.cpp 源码分析
- 如何观看VGA视频课件公开课
- 如何拆分含有多种分隔符的字符串
- C++ 简单的桶排序
- JAVA内存区域与内存溢出异常(一)
- Python中空格字符串的处理技巧
- Android学习之Padding和Margin的区别
- 习题 2.4(4) 求1+2+3+···+100。
- Android 01 selector状态选择器