MT6737 Android N 平台 Audio系统学习----AudioTrack

来源:互联网 发布:程序员必会的语言 编辑:程序博客网 时间:2024/06/06 14:06

一、 框架

这里写图片描述
从上图可知Android播放声音有MediaPlayer和AudioTrack两种方法,这两种方法区别是比较大的。
MediaPlayer:可以播放多种格式的声音文件,如mp3、wav、ogg、aac等等,其会在framework层创建了对应的音频解码器。
AudioTrack:只能播放已经解码的PCM流,播放文件的格式也只支持wav格式,因为wav格式音频大部分为PCM流,其不创建解码器。
MediaPlayer和AudioTrack联系:由上图可知,MediaPlayer在framework层还是会创建AudioTrack,把解码后的PCM数据流传给AudioTrack,AudioTrack再传给AudioFlinger进行混音,然后才传给硬件播放,所以是MediaPlayer包含了AudioTrack。

二、 程序详解(根据MT6737 Android N平台为例)

1、代码相关代码路径及框图

frameworks/base/media/java/android/media/AudioTrack.java
frameworks/base/core/jni/android_media_AudioTrack.cpp
frameworks/av/media/libmedia/AudioTrack.cpp
这里写图片描述

2、调用AudioTrack api写个播放音乐示例

package com.audiotrack;import java.io.File;import java.io.FileInputStream;import java.io.FileNotFoundException;import java.io.FileOutputStream;import java.io.IOException;import android.app.Activity;import android.media.AudioFormat;import android.media.AudioManager;import android.media.AudioRecord;import android.media.AudioTrack;import android.media.MediaRecorder;import android.os.Bundle;import android.os.Environment;import android.os.Handler;import android.os.Message;import android.view.View;import android.view.View.OnClickListener;import android.widget.Button;import android.widget.RadioButton;import android.widget.RadioGroup;import android.widget.Toast;import android.widget.RadioGroup.OnCheckedChangeListener;public class ActivityMain extends Activity{    /**     * btn 录音     */    private Button btnRecord;    /**     * btn 停止录音     */    private Button btnStopRecord;    /**     * btn 播放     */    private Button btnPlay;    /**     * btn 停止播放     */    private Button btnStopPlay;    /**     * btn 即时播放     */    private Button btnInstantPlay;    /**     * btn 停止即时播放     */    private Button btnStopInstantPlay;    /**     * rGrp 声道     */    private RadioGroup rGrpTrack;    /**     * rGrp 采样率     */    private RadioGroup rGrpFP;    /**     * rGrp 采样精度,编码率     */    private RadioGroup rGrpPCM;    /**     * rbtn 单声道     */    private RadioButton rbtnSingle;    /**     * rbtn 双声道     */    private RadioButton rbtnDouble;    /**     * rbtn 采样率 44100hz     */    private RadioButton rbtn44100;    /**     * rbtn 采样率 11025hz     */    private RadioButton rbtn11025;    /**     * rbtn 采样率 22050hz     */    private RadioButton rbtn22050;    /**     * rbtn 采样精度 pcm8     */    private RadioButton rbtnPcm8;    /**     * rbtn 采样精度 pcm16     */    private RadioButton rbtnPcm16;    private int recBufSize = 0;    private int playBufSize = 0;    /**     * 采样率(默认44100,每秒44100个点)     */    private int sampleRateInHz = 44100;    /**     * 声道(默认单声道)     */    private int channelConfig = AudioFormat.CHANNEL_CONFIGURATION_MONO;    /**     * 编码率(默认ENCODING_PCM_16BIT)     */    private int encodingBitrate = AudioFormat.ENCODING_PCM_16BIT;    private AudioRecord audioRecord;    private AudioTrack audioTrack;    /**     * 是否录音     */    private boolean blnRecord = false;    /**     * 是否播放     */    private boolean blnPlay = false;    /**     * 即时播放     */    private boolean blnInstantPlay = false;    /**     * 录音线程     */    private Thread threadRecord;    /**     * 播放录音线程     */    private ThreadAudioTrack threadAudioTrack;    /**     * 文件夹     */    private static final String AUDIO_RECORDER_FOLDER = "audioRecorder";    /**     * 临时文件名称     */    private static final String AUDIO_RECORDER_TEMP_FILE = "record_temp.raw";    /**     * 文件名称     */    private static final String AUDIO_RECORDER_FILE = "session.wav";    private static final int RECORDER_BPP = 16;    /** Called when the activity is first created. */    @Override    public void onCreate(Bundle savedInstanceState)    {        super.onCreate(savedInstanceState);        setContentView(R.layout.main);        btnRecord = (Button)findViewById(R.id.btn_record);        btnStopRecord = (Button)findViewById(R.id.btn_stop_record);        btnPlay = (Button)findViewById(R.id.btn_play);        btnStopPlay = (Button)findViewById(R.id.btn_stop_play);        btnInstantPlay = (Button)findViewById(R.id.btn_instantplay);        btnStopInstantPlay = (Button)findViewById(R.id.btn_stop_instantplay);        rGrpTrack = (RadioGroup)findViewById(R.id.rgrp_track);        rGrpFP = (RadioGroup)findViewById(R.id.rgrp_fp);        rGrpPCM = (RadioGroup)findViewById(R.id.rgrp_pcm);        rbtnSingle = (RadioButton)findViewById(R.id.rbtn_single);        rbtnDouble = (RadioButton)findViewById(R.id.rbtn_double);        rbtn44100 = (RadioButton)findViewById(R.id.rbtn_fp_44100);        rbtn11025 = (RadioButton)findViewById(R.id.rbtn_fp_11025);        rbtn22050 = (RadioButton)findViewById(R.id.rbtn_fp_22050);        rbtnPcm8 = (RadioButton)findViewById(R.id.rbtn_pcm_8bit);        rbtnPcm16 = (RadioButton)findViewById(R.id.rbtn_pcm_16bit);        btnStopRecord.setEnabled(false);        btnPlay.setEnabled(false);        btnStopPlay.setEnabled(false);        btnStopInstantPlay.setEnabled(false);        //声道        rGrpTrack.setOnCheckedChangeListener(new OnCheckedChangeListener()         {            @Override            public void onCheckedChanged(RadioGroup group, int checkedId)             {                if(checkedId==rbtnSingle.getId())   //单声道                {                    channelConfig = AudioFormat.CHANNEL_CONFIGURATION_MONO;                }                else if(checkedId==rbtnDouble.getId())  //双声道                {                    channelConfig = AudioFormat.CHANNEL_CONFIGURATION_STEREO;                }            }        });        rGrpFP.setOnCheckedChangeListener(new OnCheckedChangeListener()         {            @Override            public void onCheckedChanged(RadioGroup group, int checkedId)             {                if(checkedId==rbtn44100.getId())                {                    sampleRateInHz = 44100;                }                else if(checkedId==rbtn11025.getId())                {                    sampleRateInHz = 11025;                }                else if(checkedId==rbtn22050.getId())                {                    sampleRateInHz = 22050;                }                if(blnInstantPlay||blnPlay)                {                    audioTrack.setPlaybackRate(sampleRateInHz);                }            }        });        rGrpPCM.setOnCheckedChangeListener(new OnCheckedChangeListener()        {            @Override            public void onCheckedChanged(RadioGroup group, int checkedId)             {                if(checkedId==rbtnPcm8.getId())                {                    encodingBitrate = AudioFormat.ENCODING_PCM_8BIT;                }                else if(checkedId==rbtnPcm16.getId())                {                    encodingBitrate = AudioFormat.ENCODING_PCM_16BIT;                }            }        });        //录音        btnRecord.setOnClickListener(new OnClickListener()        {            @Override            public void onClick(View v)            {                btnRecord.setEnabled(false);                btnInstantPlay.setEnabled(false);                btnStopRecord.setEnabled(true);                startRecord();            }        });        //停止录音        btnStopRecord.setOnClickListener(new OnClickListener()        {            @Override            public void onClick(View v)            {                btnStopRecord.setEnabled(false);                btnPlay.setEnabled(true);                stopRecord();            }        });        //播放        btnPlay.setOnClickListener(new OnClickListener()         {            @Override            public void onClick(View v)             {                btnStopPlay.setEnabled(true);                btnPlay.setEnabled(false);                btnInstantPlay.setEnabled(false);                threadAudioTrack = new ThreadAudioTrack();                threadAudioTrack.init();                threadAudioTrack.start();            }        });        //停止播放        btnStopPlay.setOnClickListener(new OnClickListener()        {            @Override            public void onClick(View v)             {                threadAudioTrack.free();                threadAudioTrack = null;                btnRecord.setEnabled(true);                btnInstantPlay.setEnabled(true);                btnStopRecord.setEnabled(false);                btnPlay.setEnabled(false);                btnStopPlay.setEnabled(false);            }        });        //开始即时播放        btnInstantPlay.setOnClickListener(new OnClickListener()        {            @Override            public void onClick(View v) {                btnRecord.setEnabled(false);                btnInstantPlay.setEnabled(false);                btnStopInstantPlay.setEnabled(true);                btnPlay.setEnabled(false);                recBufSize = AudioRecord.getMinBufferSize(sampleRateInHz,                         channelConfig, encodingBitrate);                playBufSize = AudioTrack.getMinBufferSize(sampleRateInHz,                         channelConfig, encodingBitrate);                audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,                         sampleRateInHz, channelConfig, encodingBitrate, recBufSize);                audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRateInHz,                         channelConfig, encodingBitrate, playBufSize, AudioTrack.MODE_STREAM);                blnInstantPlay = true;                new ThreadInstantPlay().start();            }        });        //停止即时播放        btnStopInstantPlay.setOnClickListener(new OnClickListener()         {            @Override            public void onClick(View v)             {                blnInstantPlay = false;                btnStopInstantPlay.setEnabled(false);                btnRecord.setEnabled(true);                btnInstantPlay.setEnabled(true);            }        });    }    /**     * 开始录音     */    private void startRecord()    {        //根据采样率、声道、采样精度 决定frame的大小        recBufSize = AudioRecord.getMinBufferSize(sampleRateInHz,                channelConfig, encodingBitrate);        audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,                sampleRateInHz, channelConfig, encodingBitrate, recBufSize);        audioRecord.startRecording();        blnRecord = true;        threadRecord = new Thread(new Runnable()        {            @Override            public void run()            {                writeAudioDataToFile();            }        });        threadRecord.start();    }    /**     * 停止录音     */    private void stopRecord()    {        if(audioRecord!=null)        {            blnRecord = false;            audioRecord.stop();            audioRecord.release();            audioRecord = null;            threadRecord = null;        }        copyWaveFile(getTempFilename(),getFileName());        deleteTempFile();    }    /**     * 删除临时文件     */    private void deleteTempFile()    {        File file = new File(getTempFilename());        file.delete();    }    /**     * 将语音信息     */    private void writeAudioDataToFile()    {        byte[] bs = new byte[recBufSize];        String fileName = getTempFilename();        FileOutputStream fos = null;        try        {            fos = new FileOutputStream(fileName);        }        catch (FileNotFoundException e)        {            e.printStackTrace();        }        int line = 0;        if(fos!=null)        {            while(blnRecord)            {                line = audioRecord.read(bs, 0, recBufSize);                if(line!=AudioRecord.ERROR_INVALID_OPERATION)                {                    try                     {                        fos.write(bs);                    }                     catch (IOException e)                     {                        e.printStackTrace();                    }                }            }        }    }    /**     * 获取临时文件名称     *     * @return String     */    private String getTempFilename()    {        String filePath = Environment.getExternalStorageDirectory().getPath();        File file = new File(filePath, AUDIO_RECORDER_FOLDER);        if(!file.exists())        {            file.mkdirs();        }        File tmpFile = new File(filePath, AUDIO_RECORDER_TEMP_FILE);        if(tmpFile.exists())        {            tmpFile.delete();        }        return (file.getPath() + "/" + AUDIO_RECORDER_TEMP_FILE);    }    /**     * 获取文件名称     *      * @return String     */    private String getFileName()    {        String filePath = Environment.getExternalStorageDirectory().getAbsolutePath();        File file = new File(filePath, AUDIO_RECORDER_FOLDER);        if(file.exists())        {            file.delete();        }        return (file.getAbsolutePath() + "/" + AUDIO_RECORDER_FILE);    }    /**     * 复制文件     *      * @param inFilename     * @param outFilename     */    private void copyWaveFile(String inFilename,String outFilename)    {        FileInputStream fis = null;        FileOutputStream fos = null;        long totalAudioLen = 0;        long totalDataLen = totalAudioLen + 36;        long sampleRate = sampleRateInHz;        int channels = 2;        long byteRate = RECORDER_BPP * sampleRateInHz * channels/8;        byte[] data = new byte[recBufSize];        try         {            fis = new FileInputStream(inFilename);            fos = new FileOutputStream(outFilename);            totalAudioLen = fis.getChannel().size();            totalDataLen = totalAudioLen + 36;            writeWaveFileHeader(fos, totalAudioLen, totalDataLen, sampleRate,                     channels, byteRate);            while(fis.read(data)!=-1)            {                fos.write(data);            }            fis.close();            fos.close();        }        catch(FileNotFoundException e)        {            e.printStackTrace();        }         catch (IOException e)         {            e.printStackTrace();        }    }    /**     * @param fos     * @param totalAudioLen     * @param totalDataLen     * @param longSampleRate     * @param channels     * @param byteRate     * @throws IOException     */    private void writeWaveFileHeader(FileOutputStream fos, long totalAudioLen,             long totalDataLen, long sampleRate, int channels,             long byteRate) throws IOException    {        byte[] header = new byte[44];        header[0] = 'R';  // RIFF/WAVE header        header[1] = 'I';        header[2] = 'F';        header[3] = 'F';        header[4] = (byte) (totalDataLen & 0xff);        header[5] = (byte) ((totalDataLen >> 8) & 0xff);        header[6] = (byte) ((totalDataLen >> 16) & 0xff);        header[7] = (byte) ((totalDataLen >> 24) & 0xff);        header[8] = 'W';        header[9] = 'A';        header[10] = 'V';        header[11] = 'E';        header[12] = 'f';  // 'fmt ' chunk        header[13] = 'm';        header[14] = 't';        header[15] = ' ';        header[16] = 16;  // 4 bytes: size of 'fmt ' chunk        header[17] = 0;        header[18] = 0;        header[19] = 0;        header[20] = 1;  // format = 1        header[21] = 0;        header[22] = (byte) channels;        header[23] = 0;        header[24] = (byte) (sampleRate & 0xff);        header[25] = (byte) ((sampleRate >> 8) & 0xff);        header[26] = (byte) ((sampleRate >> 16) & 0xff);        header[27] = (byte) ((sampleRate >> 24) & 0xff);        header[28] = (byte) (byteRate & 0xff);        header[29] = (byte) ((byteRate >> 8) & 0xff);        header[30] = (byte) ((byteRate >> 16) & 0xff);        header[31] = (byte) ((byteRate >> 24) & 0xff);        header[32] = (byte) (2 * 16 / 8);  // block align        header[33] = 0;        header[34] = RECORDER_BPP;  // bits per sample        header[35] = 0;        header[36] = 'd';        header[37] = 'a';        header[38] = 't';        header[39] = 'a';        header[40] = (byte) (totalAudioLen & 0xff);        header[41] = (byte) ((totalAudioLen >> 8) & 0xff);        header[42] = (byte) ((totalAudioLen >> 16) & 0xff);        header[43] = (byte) ((totalAudioLen >> 24) & 0xff);        fos.write(header, 0, 44);    }    /**     * 播放录音线程     */    class ThreadAudioTrack extends Thread    {        byte[] bs;        File file;        FileInputStream fis;        /**         * 初始化AudioTrack         */        public void init()        {               file = new File("/sdcard/" + AUDIO_RECORDER_FOLDER +"/",                     AUDIO_RECORDER_FILE);            try             {                file.createNewFile();                fis = new FileInputStream(file);                blnPlay = true;                playBufSize = AudioTrack.getMinBufferSize(sampleRateInHz,                         channelConfig, encodingBitrate);                audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRateInHz,                         channelConfig, encodingBitrate, playBufSize, AudioTrack.MODE_STREAM);                bs = new byte[playBufSize];            }             catch (IOException e)             {                e.printStackTrace();            }        }        /**         * 释放AudioTrack         */        public void free()        {            blnPlay = false;        }        @Override        public void run() {            audioTrack.play();            while(blnPlay)            {                try                 {                    int line = fis.read(bs, 0, recBufSize);                    if(line==-1)                    {                        blnPlay = false;                        handler.sendMessage(new Message());//发送空消息体                        return;                    }                    byte[] tmpBuf = new byte[line];                    System.arraycopy(bs, 0, tmpBuf, 0, line);//                  fis.read(bs);//                  tmpBuf = bs.clone();                    audioTrack.write(tmpBuf, 0, tmpBuf.length);                }                 catch (IOException e)                 {                    e.printStackTrace();                }            }            audioTrack.stop();            audioTrack = null;            try             {                fis.close();            }             catch (IOException e)             {                e.printStackTrace();            }        }    }    /**     *      * 即时播放线程     *     */    class ThreadInstantPlay extends Thread    {        @Override        public void run()        {            byte[] bsBuffer = new byte[recBufSize];            audioRecord.startRecording();            audioTrack.play();            while(blnInstantPlay)             {                int line = audioRecord.read(bsBuffer, 0, recBufSize);                byte[] tmpBuf = new byte[line];                System.arraycopy(bsBuffer, 0, tmpBuf, 0, line);                audioTrack.write(tmpBuf, 0, tmpBuf.length);            }            audioTrack.stop();            audioRecord.stop();        }    }    @Override    protected void onDestroy()     {        super.onDestroy();        android.os.Process.killProcess(android.os.Process.myPid());    }    Handler handler = new Handler()    {        @Override        public void handleMessage(Message msg) {            super.handleMessage(msg);            Toast.makeText(ActivityMain.this, "播放结束!", 2000).show();            btnPlay.setEnabled(true);            btnInstantPlay.setEnabled(true);            btnStopPlay.setEnabled(false);        }    };}

3、分析AudioTrack(java空间)

AudioTrack.java文件主要为应用层提供API接口,将函数进行封装。下面对部分函数进行分析。

3.1、先看下例子中调用的AudioTrack构造函数的代码

audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,// 音频流类型            sampleRateInHz, //采样率            channelConfig, //声道数            encodingBitrate, //采样精度            playBufSize, //分配音频数据缓冲区的最小size            AudioTrack.MODE_STREAM);//数据加载模式

3.1.1、AudioTrack函数各参数分析

3.1.1.1、音频流的类型

追踪代码到实现定义这个参数文件在AudioTrack.java->AudioManager.java->AudioSytem.java

 /* The default audio stream */    public static final int STREAM_DEFAULT = -1;    /* The audio stream for phone calls */    public static final int STREAM_VOICE_CALL = 0;//通话声    /* The audio stream for system sounds */    public static final int STREAM_SYSTEM = 1;//系统声音    /* The audio stream for the phone ring and message alerts */    public static final int STREAM_RING = 2;//铃声    /* The audio stream for music playback */    public static final int STREAM_MUSIC = 3;//音乐声    /* The audio stream for alarms */    public static final int STREAM_ALARM = 4;//警告声    /* The audio stream for notifications */    public static final int STREAM_NOTIFICATION = 5;//通知音频流    /* @hide The audio stream for phone calls when connected on bluetooth */    public static final int STREAM_BLUETOOTH_SCO = 6;//从注释上看时使用蓝牙耳机通话的音频流    /* @hide The audio stream for enforced system sounds in certain countries (e.g camera in Japan) */    public static final int STREAM_SYSTEM_ENFORCED = 7;//一些国家强制使用的音频流,比如日本要求任何模式下拍照都要有声音,防止偷拍    /* @hide The audio stream for DTMF tones */    public static final int STREAM_DTMF = 8;//DTMF音频流      /* @hide The audio stream for text to speech (TTS) */    public static final int STREAM_TTS = 9;//文件到语言的音频流,即机器说话 #####3.1.1.2、采样率    AudioFormat.java中可以看出采样率范围4000<sampleRateInHz <192000public static final int SAMPLE_RATE_HZ_MIN = 4000;    /* Maximum value for sample rate*/    public static final int SAMPLE_RATE_HZ_MAX = 192000;    /* Sample rate will be a route-dependent value.*/    public static final int SAMPLE_RATE_UNSPECIFIED = 0;     if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN ||                sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) &&                sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) {            throw new IllegalArgumentException(sampleRateInHz                    + "Hz is not a supported sample rate.");        }        mSampleRate = sampleRateInHz;#####3.1.1.3、声道    AudioFormat.java有定义声道类型:    @Deprecated    public static final int CHANNEL_CONFIGURATION_INVALID   = 0;    @Deprecated    public static final int CHANNEL_CONFIGURATION_DEFAULT   = 1;    @Deprecated    public static final int CHANNEL_CONFIGURATION_MONO      = 2;//单声道    @Deprecated    public static final int CHANNEL_CONFIGURATION_STEREO    = 3;//双声道#####3.1.1.4、采样精度    AudioFormat.java有定义采样精度:    /** Invalid audio data format */    public static final int ENCODING_INVALID = 0;    /** Default audio data format */    public static final int ENCODING_DEFAULT = 1;    /** Audio data format: PCM 16 bit per sample. Guaranteed to be supported by devices. */    public static final int ENCODING_PCM_16BIT = 2;//一个采样点16比特,相当于2个字节    /** Audio data format: PCM 8 bit per sample. Not guaranteed to be supported by devices. */    public static final int ENCODING_PCM_8BIT = 3;//一个采样点8比特,相当于1个字节    /** Audio data format: single-precision floating-point per sample */    public static final int ENCODING_PCM_FLOAT = 4;
3.1.1.5、缓冲区playBufSize

playBufSize 是根据音频数据的特性来确定所要分配的缓冲区的最小size。

playBufSize = AudioTrack.getMinBufferSize(sampleRateInHz,                         channelConfig, encodingBitrate);
3.1.1.6、数据加载模式

AudioTrack数据加载模式有两种MODE_STATIC 和 MODE_STREAM。
STREAM的意思是由用户在应用程序通过write方式把数据一次一次得写到audiotrack中。这个和我们在socket中发送数据一样,应用层从某个地方获取数据,例如通过编解码得到PCM数据,然后write到audiotrack。这种方式的坏处就是总是在JAVA层和Native层交互,效率损失较大。
STATIC的意思是一开始创建的时候,就把音频数据放到一个固定的buffer,然后直接传给audiotrack,后续就不用一次次得write了。AudioTrack会自己播放这个buffer中的数据。这种方法对于铃声等内存占用较小,延时要求较高的声音来说很适用。

/**     * Creation mode where audio data is transferred from Java to the native layer     * only once before the audio starts playing.     */    public static final int MODE_STATIC = 0;    /**     * Creation mode where audio data is streamed from Java to the native layer     * as the audio is playing.     */    public static final int MODE_STREAM = 1;    /** @hide */    @IntDef({        MODE_STATIC,        MODE_STREAM    })

3.1.2、AudioTrack函数的实现

3.1.2.1、AudioTrack函数在AudioTrack.java中实现

AudioTrack函数实现在AudioTrack.java中,下面对此实现函数进行分析。

 public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,int mode, int sessionId)        throws IllegalArgumentException {        super(attributes);        // mState already == STATE_UNINITIALIZED        if (format == null) {            throw new IllegalArgumentException("Illegal null AudioFormat");        }        // remember which looper is associated with the AudioTrack instantiation        Looper looper;        if ((looper = Looper.myLooper()) == null) {            looper = Looper.getMainLooper();        }        int rate = format.getSampleRate();        if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {            rate = 0;        }        int channelIndexMask = 0;        if ((format.getPropertySetMask()                & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {            channelIndexMask = format.getChannelIndexMask();        }        int channelMask = 0;        if ((format.getPropertySetMask()                & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {            channelMask = format.getChannelMask();        } else if (channelIndexMask == 0) { // if no masks at all, use stereo            channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT                    | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;        }        int encoding = AudioFormat.ENCODING_DEFAULT;        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {            encoding = format.getEncoding();        }        audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);//检查参数是否合法        mStreamType = AudioSystem.STREAM_DEFAULT;        audioBuffSizeCheck(bufferSizeInBytes);//检查缓冲区大小        mInitializationLooper = looper;//记录创建实例时所在的looper        /*    创建AudioTrack的时候需要指定一个audio session。如果这个session已经被其他的播放器或者audio track使用,新创建的AudioTrack就会和它们共享AudioEffect。如果是第一次使用该session,AudioEffect就会将session与新创建的AudioTrack对象关联起来,以后其他的播放器或audio track再使用该session时,就会与该AudioTrack共享AudioEffect。如果没有指定一个session,就会为该AudioTrack创建一个自己的session,该session可以被别人通过getAudioSessionId得到。别人得到了该session,并且使用该session创建播放器或者audio track,就会与你共享AudioEffect。    */        //检查sessionId的合法性        if (sessionId < 0) {            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);        }        int[] sampleRate = new int[] {mSampleRate};        int[] session = new int[1];//new一个变量,用来保存sessionId        session[0] = sessionId;        // native initialization        //调用native层的native_setup,将上层设置的采样率、声道、采样精度、数据加载模式、buffersize传给native层        int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,                sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,                mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/);        if (initResult != SUCCESS) {            loge("Error code "+initResult+" when initializing AudioTrack.");            return; // with mState == STATE_UNINITIALIZED        }        mSampleRate = sampleRate[0];        mSessionId = session[0];    //根据加载模式,设置状态        if (mDataLoadMode == MODE_STATIC) {            mState = STATE_NO_STATIC_DATA;        } else {            mState = STATE_INITIALIZED;        }    }
3.1.2.2、native_setup函数调用

native_setup函数通过JNI调用到native层函数,通过下图JNI函数对照表可知native层对应函数是android_media_AudioTrack.cpp中的android_media_AudioTrack_setup。
这里写图片描述
下面进入android_media_AudioTrack_setup函数进行分析。

static jint    android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this, jobject jaa,        jintArray jSampleRate, jint channelPositionMask, jint channelIndexMask,        jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession,        jlong nativeAudioTrack) {    ALOGV("sampleRates=%p, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d"        "nativeAudioTrack=0x%llX",        jSampleRate, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes,        nativeAudioTrack);    sp<AudioTrack> lpTrack = 0;    //检查参数合法    if (jSession == NULL) {        ALOGE("Error creating AudioTrack: invalid session ID pointer");        return (jint) AUDIO_JAVA_ERROR;    }    jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);    if (nSession == NULL) {        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");        return (jint) AUDIO_JAVA_ERROR;    }    audio_session_t sessionId = (audio_session_t) nSession[0];    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);    nSession = NULL;    AudioTrackJniStorage* lpJniStorage = NULL;    audio_attributes_t *paa = NULL;    jclass clazz = env->GetObjectClass(thiz);    if (clazz == NULL) {        ALOGE("Can't find %s when setting up callback.", kClassPathName);        return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;    }    // if we pass in an existing *Native* AudioTrack, we don't need to create/initialize one.    if (nativeAudioTrack == 0) {        if (jaa == 0) {            ALOGE("Error creating AudioTrack: invalid audio attributes");            return (jint) AUDIO_JAVA_ERROR;        }        if (jSampleRate == 0) {            ALOGE("Error creating AudioTrack: invalid sample rates");            return (jint) AUDIO_JAVA_ERROR;        }        int* sampleRates = env->GetIntArrayElements(jSampleRate, NULL);        int sampleRateInHertz = sampleRates[0];        env->ReleaseIntArrayElements(jSampleRate, sampleRates, JNI_ABORT);        // Invalid channel representations are caught by !audio_is_output_channel() below.        audio_channel_mask_t nativeChannelMask = nativeChannelMaskFromJavaChannelMasks(                channelPositionMask, channelIndexMask);        if (!audio_is_output_channel(nativeChannelMask)) {            ALOGE("Error creating AudioTrack: invalid native channel mask %#x.", nativeChannelMask);            return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;        }        uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask);        // check the format.        // This function was called from Java, so we compare the format against the Java constants        audio_format_t format = audioFormatToNative(audioFormat);//check采样精度        if (format == AUDIO_FORMAT_INVALID) {            ALOGE("Error creating AudioTrack: unsupported audio format %d.", audioFormat);            return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;        }        // compute the frame count计算帧数        //帧数 = buffSizeInBytes / (channel数 * 每个channel数据量)        size_t frameCount;        if (audio_is_linear_pcm(format)) {            const size_t bytesPerSample = audio_bytes_per_sample(format);            frameCount = buffSizeInBytes / (channelCount * bytesPerSample);        } else {            frameCount = buffSizeInBytes;        }        //创建一个native AudioTrack对象        // create the native AudioTrack object        lpTrack = new AudioTrack();        // read the AudioAttributes values        paa = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));        const jstring jtags =                (jstring) env->GetObjectField(jaa, javaAudioAttrFields.fieldFormattedTags);        const char* tags = env->GetStringUTFChars(jtags, NULL);        // copying array size -1, char array for tags was calloc'd, no need to NULL-terminate it        strncpy(paa->tags, tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);        env->ReleaseStringUTFChars(jtags, tags);        paa->usage = (audio_usage_t) env->GetIntField(jaa, javaAudioAttrFields.fieldUsage);        paa->content_type =                (audio_content_type_t) env->GetIntField(jaa, javaAudioAttrFields.fieldContentType);        paa->flags = env->GetIntField(jaa, javaAudioAttrFields.fieldFlags);        ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s",                paa->usage, paa->content_type, paa->flags, paa->tags);        // initialize the callback information:        // this data will be passed with every AudioTrack callback这些数据将通过每个AudioTrack回调        lpJniStorage = new AudioTrackJniStorage();        lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);        // we use a weak reference so the AudioTrack object can be garbage collected.        lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);        lpJniStorage->mCallbackData.busy = false;        //初始化不同加载模式下的native AudioTrack对象        // initialize the native AudioTrack object        status_t status = NO_ERROR;        switch (memoryMode) {        case MODE_STREAM:            status = lpTrack->set(                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)                    sampleRateInHertz,                    format,// word length, PCM                    nativeChannelMask,                    frameCount,                    AUDIO_OUTPUT_FLAG_NONE,                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack                    0,// shared mem 共享内存,STREAM模式下为空,实际使用的共享内存在AudioFlinger中创建                    true,// thread can call Java内部线程可以调用JNI函数                    sessionId,// audio session ID                    AudioTrack::TRANSFER_SYNC,                    NULL,                         // default offloadInfo                    -1, -1,                       // default uid, pid values                    paa);            break;        case MODE_STATIC:            //如果是static模式,需要先创建共享内存            // AudioTrack is using shared memory            if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {                ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");                goto native_init_failure;            }            status = lpTrack->set(                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)                    sampleRateInHertz,                    format,// word length, PCM                    nativeChannelMask,                    frameCount,                    AUDIO_OUTPUT_FLAG_NONE,                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack                    lpJniStorage->mMemBase,// shared mem                    true,// thread can call Java                    sessionId,// audio session ID                    AudioTrack::TRANSFER_SHARED,                    NULL,                         // default offloadInfo                    -1, -1,                       // default uid, pid values                    paa);            break;        default:            ALOGE("Unknown mode %d", memoryMode);            goto native_init_failure;        }        if (status != NO_ERROR) {            ALOGE("Error %d initializing AudioTrack", status);            goto native_init_failure;        }    } else {  // end if (nativeAudioTrack == 0)        lpTrack = (AudioTrack*)nativeAudioTrack;        // TODO: We need to find out which members of the Java AudioTrack might        // need to be initialized from the Native AudioTrack        // these are directly returned from getters:        //  mSampleRate        //  mAudioFormat        //  mStreamType        //  mChannelConfiguration        //  mChannelCount        //  mState (?)        //  mPlayState (?)        // these may be used internally (Java AudioTrack.audioParamCheck():        //  mChannelMask        //  mChannelIndexMask        //  mDataLoadMode        // initialize the callback information:        // this data will be passed with every AudioTrack callback        lpJniStorage = new AudioTrackJniStorage();        lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);        // we use a weak reference so the AudioTrack object can be garbage collected.        lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);        lpJniStorage->mCallbackData.busy = false;    }    nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);    if (nSession == NULL) {        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");        goto native_init_failure;    }    // read the audio session ID back from AudioTrack in case we create a new session    nSession[0] = lpTrack->getSessionId();    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);    nSession = NULL;    {        const jint elements[1] = { (jint) lpTrack->getSampleRate() };        env->SetIntArrayRegion(jSampleRate, 0, 1, elements);    }    {   // scope for the lock        Mutex::Autolock l(sLock);        sAudioTrackCallBackCookies.add(&lpJniStorage->mCallbackData);    }    // save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field    // of the Java object (in mNativeTrackInJavaObj)    setAudioTrack(env, thiz, lpTrack);    // save the JNI resources so we can free them later    //ALOGV("storing lpJniStorage: %x\n", (long)lpJniStorage);    env->SetLongField(thiz, javaAudioTrackFields.jniData, (jlong)lpJniStorage);    // since we had audio attributes, the stream type was derived from them during the    // 把JNI层中new出来的AudioTrack对象指针保存到Java对象的一个变量中,这样就把JNI层的AudioTrack对象和Java层的AudioTrack对象关联起来了,这是Android的常用技法。      // creation of the native AudioTrack: push the same value to the Java object    env->SetIntField(thiz, javaAudioTrackFields.fieldStreamType, (jint) lpTrack->streamType());    if (paa != NULL) {        // audio attributes were copied in AudioTrack creation        free(paa);        paa = NULL;    }    return (jint) AUDIO_JAVA_SUCCESS;    // failures:    native_init_failure:    if (paa != NULL) {        free(paa);    }    if (nSession != NULL) {        env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);    }    env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_class);    env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_ref);    delete lpJniStorage;    env->SetLongField(thiz, javaAudioTrackFields.jniData, 0);    // lpTrack goes out of scope, so reference count drops to zero    return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;}
3.1.2.3、分析AudioTrackJniStorage

AudioTrack函数位于AudioTrack.cpp中,现在进行分析。

class AudioTrackJniStorage {    public:        sp<MemoryHeapBase>         mMemHeap;        sp<MemoryBase>             mMemBase;        audiotrack_callback_cookie mCallbackData;        sp<JNIDeviceCallback>      mDeviceCallback;    AudioTrackJniStorage() {        mCallbackData.audioTrack_class = 0;        mCallbackData.audioTrack_ref = 0;    }    ~AudioTrackJniStorage() {        mMemBase.clear();        mMemHeap.clear();    }    //分配一块指定大小的匿名共享内存     bool allocSharedMem(int sizeInBytes) {    //创建一个匿名共享内存        mMemHeap = new MemoryHeapBase(sizeInBytes, 0, "AudioTrack Heap Base");        if (mMemHeap->getHeapID() < 0) {            return false;        }        mMemBase = new MemoryBase(mMemHeap, 0, sizeInBytes);        //注意用法,先弄一个HeapBase,再把HeapBase传入到MemoryBase中去        return true;    }};
MemoryHeapBase和MemoryBase主要干了下面两件事:(1)分配了一块共享内存,这样两个进程可以共享这块内存。(2)基于Binder通信,这样使用这两个类的进程就可以交互了。
MemoryHeapBase::MemoryHeapBase(size_t size, uint32_t flags, char const * name)      : mFD(-1), mSize(0), mBase(MAP_FAILED), mFlags(flags),        mDevice(0), mNeedUnmap(false), mOffset(0)      {      //获取内存页大小      const size_t pagesize = getpagesize();      //字节对齐      size = ((size + pagesize-1) & ~(pagesize-1));      /* 创建共享内存,打开/dev/ashmem设备,得到一个文件描述符 */      int fd = ashmem_create_region(name == NULL ? "MemoryHeapBase" : name, size);      ALOGE_IF(fd<0, "error creating ashmem region: %s", strerror(errno));      if (fd >= 0) {          //通过mmap将匿名共享内存映射到当前进程地址空间          if (mapfd(fd, size) == NO_ERROR) {              if (flags & READ_ONLY) {                  ashmem_set_prot_region(fd, PROT_READ);              }          }      }      }   

3.2、分析audio.play()

3.2.1、AudioTrack.java中实现

       public void play()    throws IllegalStateException {        if (mState != STATE_INITIALIZED) {            throw new IllegalStateException("play() called on uninitialized AudioTrack.");        }        baseStart();        synchronized(mPlayStateLock) {            native_start();//调用JNI接口android_media_AudioTrack.cpp的android_media_AudioTrack_start            mPlayState = PLAYSTATE_PLAYING;        }    }

3.2.2、native层native_start

`{"native_start",         "()V",      (void *)android_media_AudioTrack_start},`

从JNI对照表可知native_start对应android_media_AudioTrack.cpp中android_media_AudioTrack_start函数。

static void    android_media_AudioTrack_start(JNIEnv *env, jobject thiz)    {    sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);    if (lpTrack == NULL) {        jniThrowException(env, "java/lang/IllegalStateException",            "Unable to retrieve AudioTrack pointer for start()");        return;    }    lpTrack->start();//调用AudioTrack的start方法    }

3.3、分析audio.write()

3.3.1、分析AudioTrack中write

java层write函数有两个,一个是用来写PCM16数据的,它对应的一个采样点的数据量是两个字节。另一个是用来写PCM8数据的,它对应的一个采样点的数据量是一个字节。

public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,            @WriteMode int writeMode) {        if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {            return ERROR_INVALID_OPERATION;        }        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");            return ERROR_BAD_VALUE;        }        if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)                || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow                || (offsetInShorts + sizeInShorts > audioData.length)) {            return ERROR_BAD_VALUE;        }        int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,                writeMode == WRITE_BLOCKING);//通过JNI调用android_media_AudioTrack.cpp中函数(void *)android_media_AudioTrack_writeArray<jshortArray>        if ((mDataLoadMode == MODE_STATIC)                && (mState == STATE_NO_STATIC_DATA)                && (ret > 0)) {            // benign race with respect to other APIs that read mState            mState = STATE_INITIALIZED;        }        return ret;    }    public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,            @WriteMode int writeMode) {        if (mState == STATE_UNINITIALIZED) {            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");            return ERROR_INVALID_OPERATION;        }        if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {            Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");            return ERROR_INVALID_OPERATION;        }        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");            return ERROR_BAD_VALUE;        }        if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)                || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow                || (offsetInFloats + sizeInFloats > audioData.length)) {            Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");            return ERROR_BAD_VALUE;        }        int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,                writeMode == WRITE_BLOCKING);//通过JNI调用android_media_AudioTrack.cpp中函数(void *)android_media_AudioTrack_writeArray<jfloatArray>        if ((mDataLoadMode == MODE_STATIC)                && (mState == STATE_NO_STATIC_DATA)                && (ret > 0)) {            // benign race with respect to other APIs that read mState            mState = STATE_INITIALIZED;        }        return ret;    }

3.3.2、分析(void *)android_media_AudioTrack_writeArray

template <typename T>    static jint android_media_AudioTrack_writeArray(JNIEnv *env, jobject thiz,                                                T javaAudioData,                                                jint offsetInSamples, jint sizeInSamples,                                                jint javaAudioFormat,                                                jboolean isWriteBlocking) {    //ALOGV("android_media_AudioTrack_writeArray(offset=%d, sizeInSamples=%d) called",    //        offsetInSamples, sizeInSamples);    sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);    if (lpTrack == NULL) {        jniThrowException(env, "java/lang/IllegalStateException",            "Unable to retrieve AudioTrack pointer for write()");        return (jint)AUDIO_JAVA_INVALID_OPERATION;    }    if (javaAudioData == NULL) {        ALOGE("NULL java array of audio data to play");        return (jint)AUDIO_JAVA_BAD_VALUE;    }    // NOTE: We may use GetPrimitiveArrayCritical() when the JNI implementation changes in such    // a way that it becomes much more efficient. When doing so, we will have to prevent the    // AudioSystem callback to be called while in critical section (in case of media server    // process crash for instance)    // get the pointer for the audio data from the java array    auto cAudioData = envGetArrayElements(env, javaAudioData, NULL);    if (cAudioData == NULL) {        ALOGE("Error retrieving source of audio data to play");        return (jint)AUDIO_JAVA_BAD_VALUE; // out of memory or no data to load    }    jint samplesWritten = writeToTrack(lpTrack, javaAudioFormat, cAudioData,            offsetInSamples, sizeInSamples, isWriteBlocking == JNI_TRUE /* blocking */);    envReleaseArrayElements(env, javaAudioData, cAudioData, 0);    //ALOGV("write wrote %d (tried %d) samples in the native AudioTrack with offset %d",    //        (int)samplesWritten, (int)(sizeInSamples), (int)offsetInSamples);    return samplesWritten;    }    template <typename T>    static jint writeToTrack(const sp<AudioTrack>& track, jint audioFormat, const T *data,                         jint offsetInSamples, jint sizeInSamples, bool blocking) {    // give the data to the native AudioTrack object (the data starts at the offset)    ssize_t written = 0;    // regular write() or copy the data to the AudioTrack's shared memory?    size_t sizeInBytes = sizeInSamples * sizeof(T);    //如果是STATIC模式,sharedBuffer()返回不为空。如果是STREAM模式,sharedBuffer()返回为空。    if (track->sharedBuffer() == 0) {        written = track->write(data + offsetInSamples, sizeInBytes, blocking);        // for compatibility with earlier behavior of write(), return 0 in this case        if (written == (ssize_t) WOULD_BLOCK) {            written = 0;        }    } else {        // writing to shared memory, check for capacity        if ((size_t)sizeInBytes > track->sharedBuffer()->size()) {            sizeInBytes = track->sharedBuffer()->size();        }        //在STATIC模式下,直接把数据memcpy到共享内存,记住在这种模式下要先调用write,后调用play。        memcpy(track->sharedBuffer()->pointer(), data + offsetInSamples, sizeInBytes);        written = sizeInBytes;    }    if (written >= 0) {        return written / sizeof(T);    }    return interpretWriteSizeError(written);}

3.4、分析audio.release()

当数据都write完后,需要调用stop停止播放,或者直接调用release来释放相关资源。由于release和stop有一定的相关性,这里只分析release调用。

3.4.1、分析AudioTrack.java中的release()

 public void release() {        // even though native_release() stops the native AudioTrack, we need to stop        // AudioTrack subclasses too.        try {            stop();        } catch(IllegalStateException ise) {            // don't raise an exception, we're releasing the resources.        }        baseRelease();        native_release();//通过JNI调用android_media_AudioTrack.cpp中函数android_media_AudioTrack_release        mState = STATE_UNINITIALIZED;    }    @Override    protected void finalize() {        baseRelease();        native_finalize();    }    public void stop()    throws IllegalStateException {        if (mState != STATE_INITIALIZED) {            throw new IllegalStateException("stop() called on uninitialized AudioTrack.");        }        // stop playing        synchronized(mPlayStateLock) {            native_stop();//通过JNI调用android_media_AudioTrack.cpp中函数android_media_AudioTrack_stop            mPlayState = PLAYSTATE_STOPPED;            mAvSyncHeader = null;            mAvSyncBytesRemaining = 0;        }    }

3.4.1、分析android_media_AudioTrack_stop和android_media_AudioTrack_release

#define CALLBACK_COND_WAIT_TIMEOUT_MS 1000    static void android_media_AudioTrack_release(JNIEnv *env,  jobject thiz) {    sp<AudioTrack> lpTrack = setAudioTrack(env, thiz, 0);    if (lpTrack == NULL) {        return;    }    //ALOGV("deleting lpTrack: %x\n", (int)lpTrack);    // delete the JNI data    AudioTrackJniStorage* pJniStorage = (AudioTrackJniStorage *)env->GetLongField(        thiz, javaAudioTrackFields.jniData);    // reset the native resources in the Java object so any attempt to access    // them after a call to release fails.    env->SetLongField(thiz, javaAudioTrackFields.jniData, 0);    if (pJniStorage) {        Mutex::Autolock l(sLock);        audiotrack_callback_cookie *lpCookie = &pJniStorage->mCallbackData;        //ALOGV("deleting pJniStorage: %x\n", (int)pJniStorage);        while (lpCookie->busy) {            if (lpCookie->cond.waitRelative(sLock,                                            milliseconds(CALLBACK_COND_WAIT_TIMEOUT_MS)) !=                                                    NO_ERROR) {                break;            }        }        sAudioTrackCallBackCookies.remove(lpCookie);        // delete global refs created in native_setup        env->DeleteGlobalRef(lpCookie->audioTrack_class);        env->DeleteGlobalRef(lpCookie->audioTrack_ref);        delete pJniStorage;    }    }    static void    android_media_AudioTrack_stop(JNIEnv *env, jobject thiz)    {        sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);        if (lpTrack == NULL) {        jniThrowException(env, "java/lang/IllegalStateException",            "Unable to retrieve AudioTrack pointer for stop()");        return;    }       lpTrack->stop();    }

3.5AudioTrack(Java空间)的分析总结:

AudioTrack在JNI层使用了Native的AudioTrack对象,总结一下调用Native对象的流程:
· new一个AudioTrack,使用无参的构造函数。
· 调用set函数,把Java层的参数传进去,另外还设置了一个audiocallback回调函数。
· 调用了AudioTrack的start函数。
· 调用AudioTrack的write函数。
· 工作完毕后,调用stop。
· 最后就是Native对象的delete。

4、分析AudioTrack(Native空间)

native层AudioTrack代码在AudioTrack.cpp中,下面来进行分析。

4.1、AudioTrack构造函数

在上面android_media_AudioTrack.cpp中有lpTrack = new AudioTrack()这么一句,其创建了一个native AudioTrack对象。下面看看AudioTrack构造函数。

    AudioTrack::AudioTrack()    : mStatus(NO_INIT),      mState(STATE_STOPPED),      mPreviousPriority(ANDROID_PRIORITY_NORMAL),      mPreviousSchedulingGroup(SP_DEFAULT),      mPausedPosition(0),      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)    {        mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;        mAttributes.usage = AUDIO_USAGE_UNKNOWN;        mAttributes.flags = 0x0;        strcpy(mAttributes.tags, "");    }

4.2、AudioTrack set调用

status_t AudioTrack::set(        audio_stream_type_t streamType,//音频流加载模式        uint32_t sampleRate,//采样率        audio_format_t format,//采样格式        audio_channel_mask_t channelMask,//输出声道        size_t frameCount,//帧数        audio_output_flags_t flags,//输出标志位        callback_t cbf,//callback function(会调函数),如果不是null,这个函数会被定期调用来提供新的数据和通知标记,位置更新等。        void* user,//为cbf参数        int32_t notificationFrames,        const sp<IMemory>& sharedBuffer,//共享内存        bool threadCanCallJava,        audio_session_t sessionId,        transfer_type transferType,        const audio_offload_info_t *offloadInfo,        int uid,        pid_t pid,        const audio_attributes_t* pAttributes,        bool doNotReconnect,        float maxRequiredSpeed){    ALOGD("set(): %p, streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "          "flags #%x, notificationFrames %u, sessionId %d, transferType %d, uid %d, pid %d",          this, streamType, sampleRate, format, channelMask, frameCount, flags,          notificationFrames, sessionId, transferType, uid, pid);    /// M: ALPS02266941: For Game Detection @{    #ifdef MTK_GAS_SERVICE_SUPPORT    sp<GameDetectionThread> gameDetectionThread = new GameDetectionThread;    gameDetectionThread->run("GameDetectionThread");    #endif    /// @}    #ifdef MTK_AOSP_ENHANCEMENT    if(AUDIO_STREAM_ENFORCED_AUDIBLE == streamType) {        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);        ALOGD("Camera not support fast !!");    }    #endif    mThreadCanCallJava = threadCanCallJava;/*    为AudioTrack设置音频参数信息,transfer_type参数用于指定音频数据的传输方式,Android定义了4种音频数据传输方式:enum transfer_type {            TRANSFER_DEFAULT,   // not specified explicitly; determine from the other parameters            TRANSFER_CALLBACK,  // callback EVENT_MORE_DATA            TRANSFER_OBTAIN,    // FIXME deprecated: call obtainBuffer() and releaseBuffer()            TRANSFER_SYNC,      // synchronous write()            TRANSFER_SHARED,    // shared memory};*///设置音频数据类型    switch (transferType) {    case TRANSFER_DEFAULT:        if (sharedBuffer != 0) {            transferType = TRANSFER_SHARED;        } else if (cbf == NULL || threadCanCallJava) {            transferType = TRANSFER_SYNC;        } else {            transferType = TRANSFER_CALLBACK;        }        break;    case TRANSFER_CALLBACK:        if (cbf == NULL || sharedBuffer != 0) {            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");            return BAD_VALUE;        }        break;    case TRANSFER_OBTAIN:    case TRANSFER_SYNC:        if (sharedBuffer != 0) {            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");            return BAD_VALUE;        }        break;    case TRANSFER_SHARED:        if (sharedBuffer == 0) {            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");            return BAD_VALUE;        }        break;    default:        ALOGE("Invalid transfer type %d", transferType);        return BAD_VALUE;    }    mSharedBuffer = sharedBuffer;    mTransfer = transferType;    mDoNotReconnect = doNotReconnect;    ALOGD_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),            sharedBuffer->size());    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);    #ifdef MTK_AOSP_ENHANCEMENT    SLOGD("audiotrack %p set Type %d, rate %d, fmt %d, chn %d, fcnt %zu, flags %04x",            this, streamType, sampleRate, format, channelMask, frameCount, flags);    #endif    // invariant that mAudioTrack != 0 is true only after set() returns successfully    if (mAudioTrack != 0) {        ALOGE("Track already in use");        return INVALID_OPERATION;    }//音频数据流加载模式设置    // handle default values first.    if (streamType == AUDIO_STREAM_DEFAULT) {        streamType = AUDIO_STREAM_MUSIC;    }    if (pAttributes == NULL) {        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {            ALOGE("Invalid stream type %d", streamType);            return BAD_VALUE;        }        mStreamType = streamType;    } else {        // stream type shouldn't be looked at, this track has audio attributes        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);        mStreamType = AUDIO_STREAM_DEFAULT;        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);        }        if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {            flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);        }    }    //音频格式设置    // these below should probably come from the audioFlinger too...    if (format == AUDIO_FORMAT_DEFAULT) {        format = AUDIO_FORMAT_PCM_16_BIT;    } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?        mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;    }    // validate parameters    if (!audio_is_valid_format(format)) {        ALOGE("Invalid format %#x", format);        return BAD_VALUE;    }    mFormat = format;    if (!audio_is_output_channel(channelMask)) {        ALOGE("Invalid channel mask %#x", channelMask);        return BAD_VALUE;    }    mChannelMask = channelMask;    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);    mChannelCount = channelCount;    // force direct flag if format is not linear PCM    // or offload was requested    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)            || !audio_is_linear_pcm(format)) {        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)                    ? "Offload request, forcing to Direct Output"                    : "Not linear PCM, forcing to Direct Output");        flags = (audio_output_flags_t)                // FIXME why can't we allow direct AND fast?                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);    }    // force direct flag if HW A/V sync requested    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);    }    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {        if (audio_has_proportional_frames(format)) {            mFrameSize = channelCount * audio_bytes_per_sample(format);        } else {            mFrameSize = sizeof(uint8_t);        }    #ifdef MTK_AOSP_ENHANCEMENT        // AudioSystem Get HDMI capability        // if Channel count is multi-channel        if(channelMask != AUDIO_CHANNEL_OUT_STEREO && channelMask != AUDIO_CHANNEL_OUT_MONO) {            // Check if HDMI is connected            if(AudioSystem::getDeviceConnectionState(AUDIO_DEVICE_OUT_AUX_DIGITAL,"") == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {                int hdim_channelCount, hdmi_maxSampleRate, hdmi_bitwidth;                hdim_channelCount = channelCount;                // need to get AF HDMI capability.                if (AudioSystem::getHDMICapability(&hdim_channelCount, &hdmi_bitwidth, &hdmi_maxSampleRate ) != NO_ERROR) {                    return BAD_VALUE;                }                ALOGD("GetHDMICapability hdim_channelCount %d, maxSampleRate%d, bitwidth %d", hdim_channelCount, hdmi_maxSampleRate, hdmi_bitwidth);                // if AF channel count > 2                if (hdim_channelCount > 2) {                    // track is Multi-channel and HDMI support Multi-channel.                    // raise for direct output.                    flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT) ;                    ALOGD("flags %d", flags);                }            } else {                ALOGD("receive multi-channel content and not detecting aux digital");            }        }    #endif    } else {        ALOG_ASSERT(audio_has_proportional_frames(format));        mFrameSize = channelCount * audio_bytes_per_sample(format);        // createTrack will return an error if PCM format is not supported by server,        // so no need to check for specific PCM formats here    }    // sampling rate must be specified for direct outputs    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {        return BAD_VALUE;    }    mSampleRate = sampleRate;    mOriginalSampleRate = sampleRate;    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;    // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX    mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);    // Make copy of input parameter offloadInfo so that in the future:    //  (a) createTrack_l doesn't need it as an input parameter    //  (b) we can support re-creation of offloaded tracks    if (offloadInfo != NULL) {        mOffloadInfoCopy = *offloadInfo;        mOffloadInfo = &mOffloadInfoCopy;    } else {        mOffloadInfo = NULL;    }    //AudioTrack初始化    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;    mSendLevel = 0.0f;    // mFrameCount is initialized in createTrack_l    mReqFrameCount = frameCount;    if (notificationFrames >= 0) {        mNotificationFramesReq = notificationFrames;        mNotificationsPerBufferReq = 0;    } else {        if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {            ALOGE("notificationFrames=%d not permitted for non-fast track",                    notificationFrames);            return BAD_VALUE;        }        if (frameCount > 0) {            ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",                    notificationFrames, frameCount);            return BAD_VALUE;        }        mNotificationFramesReq = 0;        const uint32_t minNotificationsPerBuffer = 1;        const uint32_t maxNotificationsPerBuffer = 8;        mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,                max((uint32_t) -notificationFrames, minNotificationsPerBuffer));        ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,                "notificationFrames=%d clamped to the range -%u to -%u",                notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);    }    mNotificationFramesAct = 0;    if (sessionId == AUDIO_SESSION_ALLOCATE) {        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);    } else {        mSessionId = sessionId;    }    int callingpid = IPCThreadState::self()->getCallingPid();    int mypid = getpid();    if (uid == -1 || (callingpid != mypid)) {        mClientUid = IPCThreadState::self()->getCallingUid();    } else {        mClientUid = uid;    }    if (pid == -1 || (callingpid != mypid)) {        mClientPid = callingpid;    } else {        mClientPid = pid;    }    mAuxEffectId = 0;    mOrigFlags = mFlags = flags;    mCbf = cbf;     //cbf是JNI层传入的回调函数audioCallback,如果用户设置了回调函数,则启动AudioTrackThread线程来提供音频数据    if (cbf != NULL) {        ALOGD("set: Create AudioTrackThread");        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);        // thread begins in paused state, and will not reference us until start()    }    //调用createTrack_l    // create the IAudioTrack    status_t status = createTrack_l();    if (status != NO_ERROR) {        ALOGD("set: createTrack_l fail! status = %d", status);        if (mAudioTrackThread != 0) {            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h            mAudioTrackThread->requestExitAndWait();            mAudioTrackThread.clear();        }        return status;    }    mStatus = NO_ERROR;    mUserData = user;    mLoopCount = 0;    mLoopStart = 0;    mLoopEnd = 0;    mLoopCountNotified = 0;    mMarkerPosition = 0;    mMarkerReached = false;    mNewPosition = 0;    mUpdatePeriod = 0;    mPosition = 0;    mReleased = 0;    mStartUs = 0;    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);    mSequence = 1;    mObservedSequence = mSequence;    mInUnderrun = false;    mPreviousTimestampValid = false;    mTimestampStartupGlitchReported = false;    mRetrogradeMotionReported = false;    mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;    mUnderrunCountOffset = 0;    mFramesWritten = 0;    mFramesWrittenServerOffset = 0;    return NO_ERROR;}

4.3、AudioTrackThread线程

AudioTrack支持两种数据输入方式:
(1) Push方式:用户主动write,MediaPlayerService通常采用此方式;
(2) Pull方式: AudioTrackThread线程通过audioCallback回调函数主动从用户那里获取数据,ToneGenerator就是采用这种方式;

bool AudioTrack::AudioTrackThread::threadLoop()    {        {            AutoMutex _l(mMyLock);            ALOGV("AudioTrackThread::threadLoop, mPaused = %d, mIgnoreNextPausedInt = %d, mPausedInt = %d", mPaused, mIgnoreNextPausedInt, mPausedInt);            if (mPaused) {                mMyCond.wait(mMyLock);                // caller will check for exitPending()                return true;            }            if (mIgnoreNextPausedInt) {                mIgnoreNextPausedInt = false;                mPausedInt = false;            }            if (mPausedInt) {                if (mPausedNs > 0) {                    (void) mMyCond.waitRelative(mMyLock, mPausedNs);                } else {                    mMyCond.wait(mMyLock);                }                mPausedInt = false;                return true;            }        }        if (exitPending()) {            ALOGD("AudioTrackThread::threadLoop exitPending");            return false;    }    //调用创建当前AudioTrackThread线程的AudioTrack的processAudioBuffer函数    nsecs_t ns = mReceiver.processAudioBuffer();    ALOGV("processAudioBuffer() returned %" PRId64, ns);    switch (ns) {    case 0:        return true;    case NS_INACTIVE:        pauseInternal();        return true;    case NS_NEVER:        return false;    case NS_WHENEVER:        // Event driven: call wake() when callback notifications conditions change.        ns = INT64_MAX;        // fall through    default:        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);        pauseInternal(ns);        return true;    }}

4.3、createTrack_l

音频播放需要AudioTrack写入音频数据,同时需要AudioFlinger完成混音,因此需要在AudioTrack与AudioFlinger之间建立数据通道,而AudioTrack与AudioFlinger又分属不同的进程空间,Android系统采用Binder通信方式来搭建它们之间的桥梁。

status_t AudioTrack::createTrack_l()    {    //得到AudioFlinger的代理对象        const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();        if (audioFlinger == 0) {            ALOGE("Could not get audioflinger");            return NO_INIT;    }    #ifdef MTK_AOSP_ENHANCEMENT       // forbit fast track for voice feature    String8 ret  = AudioSystem::getParameters(String8("VoiceFeatureOn"));    if (String8("VoiceFeatureOn=1") == ret)    {        mFlags = (audio_output_flags_t)(mFlags & ~AUDIO_OUTPUT_FLAG_FAST);        ALOGV("remove direct flag flags for voice feature %d", mFlags);    }    // AudioSystem Get HDMI capability    // if Channel count is multi-channel    ALOGV("mChannelMask 0x%x", mChannelMask);    if (mChannelMask != AUDIO_CHANNEL_OUT_STEREO && mChannelMask != AUDIO_CHANNEL_OUT_MONO) {        // Check if HDMI is connected        if( AudioSystem::getDeviceConnectionState(AUDIO_DEVICE_OUT_AUX_DIGITAL, "") == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {            int hdim_channelCount, hdmi_maxSampleRate, hdmi_bitwidth;            hdim_channelCount = mChannelCount;            // need to get AF HDMI capability.            if (AudioSystem::getHDMICapability(&hdim_channelCount, &hdmi_bitwidth, &hdmi_maxSampleRate) != NO_ERROR) {                return BAD_VALUE;            }            ALOGD("GetHDMICapability hdim_channelCount %d, maxSampleRate%d, bitwidth %d", hdim_channelCount, hdmi_maxSampleRate, hdmi_bitwidth);            // if AF channel count > 2            if (hdim_channelCount > 2) {                // track is Multi-channel and HDMI support Multi-channel.                // raise for direct output.                mFlags = (audio_output_flags_t)(mFlags | AUDIO_OUTPUT_FLAG_DIRECT);                ALOGD("flags %d", mFlags);            } else {                mFlags = (audio_output_flags_t)(mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);                ALOGD("remove direct flag flags %d", mFlags);            }        } else {            ALOGD("receive multi-channel content and not detecting aux digital");        }    }    #endif    if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);    }    audio_io_handle_t output;    audio_stream_type_t streamType = mStreamType;    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;    // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.    // After fast request is denied, we will request again if IAudioTrack is re-created.    status_t status;    status = AudioSystem::getOutputForAttr(attr, &output,                                           mSessionId, &streamType, mClientUid,                                           mSampleRate, mFormat, mChannelMask,                                           mFlags, mSelectedDeviceId, mOffloadInfo);    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"              " channel mask %#x, flags %#x",              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);        return BAD_VALUE;    }    {    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,    // we must release it ourselves if anything goes wrong.    // Not all of these values are needed under all conditions, but it is easier to get them all    status = AudioSystem::getLatency(output, &mAfLatency);    if (status != NO_ERROR) {        ALOGE("getLatency(%d) failed status %d", output, status);        goto release;    }    ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);    //获取音频帧数    status = AudioSystem::getFrameCount(output, &mAfFrameCount);    if (status != NO_ERROR) {        ALOGE("getFrameCount(output=%d) status %d", output, status);        goto release;    }    // TODO consider making this a member variable if there are other uses for it later    size_t afFrameCountHAL;    status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);    if (status != NO_ERROR) {        ALOGE("getFrameCountHAL(output=%d) status %d", output, status);        goto release;    }    ALOG_ASSERT(afFrameCountHAL > 0);    status = AudioSystem::getSamplingRate(output, &mAfSampleRate);    if (status != NO_ERROR) {        ALOGE("getSamplingRate(output=%d) status %d", output, status);        goto release;    }    if (mSampleRate == 0) {        mSampleRate = mAfSampleRate;        mOriginalSampleRate = mAfSampleRate;    }    #ifdef MTK_AOSP_ENHANCEMENT        ALOGV("mAfLatency %d, mAfFrameCount %zu, mAfSampleRate %d", mAfLatency,mAfFrameCount,  mAfSampleRate);        if((mAfFrameCount <= 0) || (mAfSampleRate <= 0)) {            ALOGE("Get audioflinger parameter error afFrameCount-%zu, afSampleRate-%u", mAfFrameCount, mAfSampleRate);            return NO_INIT;        }    #endif    // Client decides whether the track is TIMED (see below), but can only express a preference    // for FAST.  Server will perform additional tests.    #ifndef MTK_AUDIO    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {        bool useCaseAllowed =            // either of these use cases:            // use case 1: shared buffer            (mSharedBuffer != 0) ||            // use case 2: callback transfer mode            (mTransfer == TRANSFER_CALLBACK) ||            // use case 3: obtain/release mode            (mTransfer == TRANSFER_OBTAIN) ||            // use case 4: synchronous write            ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);        // sample rates must also match        bool fastAllowed = useCaseAllowed && (mSampleRate == mAfSampleRate);        if (!fastAllowed) {            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, "                "track %u Hz, output %u Hz",                        mTransfer, mSampleRate, mAfSampleRate);                    mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);                }            }    #else    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {        bool useCaseAllowed =            // either of these use cases:            // use case 1: shared buffer            (mSharedBuffer != 0) ||            // use case 2: callback transfer mode            (mTransfer == TRANSFER_CALLBACK) ||            // use case 3: obtain/release mode            (mTransfer == TRANSFER_OBTAIN) ||            // use case 4: synchronous write            ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);        // sample rates must also match        bool fastAllowed = useCaseAllowed;        if (!fastAllowed) {            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, "                "track %u Hz, output %u Hz",                        mTransfer, mSampleRate, mAfSampleRate);                    mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);                }            }    #endif    mNotificationFramesAct = mNotificationFramesReq;    #ifdef MTK_AOSP_ENHANCEMENT    if (!strcmp(mAttributes.tags, "addr=CrossMount-Mic")) {        mReqFrameCount = 4094;        ALOGD("found mic mount");    }    #endif    size_t frameCount = mReqFrameCount;    ALOGD("mReqFrameCount %zu", mReqFrameCount);    if (!audio_has_proportional_frames(mFormat)) {        if (mSharedBuffer != 0) {            // Same comment as below about ignoring frameCount parameter for set()            frameCount = mSharedBuffer->size();        } else if (frameCount == 0) {            frameCount = mAfFrameCount;        }        if (mNotificationFramesAct != frameCount) {            mNotificationFramesAct = frameCount;        }    } else if (mSharedBuffer != 0) {        // FIXME: Ensure client side memory buffers need        // not have additional alignment beyond sample        // (e.g. 16 bit stereo accessed as 32 bit frame).        size_t alignment = audio_bytes_per_sample(mFormat);        if (alignment & 1) {            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).            alignment = 1;        }        if (mChannelCount > 1) {            // More than 2 channels does not require stronger alignment than stereo            alignment <<= 1;        }        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {            ALOGE("Invalid buffer alignment: address %p, channel count %u",                    mSharedBuffer->pointer(), mChannelCount);            status = BAD_VALUE;            goto release;        }        // When initializing a shared buffer AudioTrack via constructors,        // there's no frameCount parameter.        // But when initializing a shared buffer AudioTrack via set(),        // there _is_ a frameCount parameter.  We silently ignore it.        frameCount = mSharedBuffer->size() / mFrameSize;    } else {        size_t minFrameCount = 0;        // For fast tracks the frame count calculations and checks are mostly done by server,        // but we try to respect the application's request for notifications per buffer.        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {            if (mNotificationsPerBufferReq > 0) {                // Avoid possible arithmetic overflow during multiplication.                // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely.                if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) {                    ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",                            mNotificationsPerBufferReq, afFrameCountHAL);                } else {                    minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq;                }            }        } else {            // for normal tracks precompute the frame count based on speed.            const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :                            max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);            minFrameCount = calculateMinFrameCount(                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,                    speed /*, 0 mNotificationsPerBufferReq*/);        }        if (frameCount < minFrameCount) {            frameCount = minFrameCount;        }    }    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;    pid_t tid = -1;    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {        trackFlags |= IAudioFlinger::TRACK_FAST;        if (mAudioTrackThread != 0 && !mThreadCanCallJava) {            tid = mAudioTrackThread->getTid();        }    }    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;    }    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {        trackFlags |= IAudioFlinger::TRACK_DIRECT;    }    #ifdef MTK_CROSSMOUNT_SUPPORT    if (mFlags & AUDIO_OUTPUT_FLAG_TO_REMOTE_SUBMIX) {        trackFlags |= IAudioFlinger::TRACK_REMOTE;    }    #endif    #ifdef MTK_AOSP_ENHANCEMENT    if(!strcmp(mAttributes.tags,"BootAnimationAudioTrack")) {        ALOGD("mAttributes.tags = BootAnimationAudioTrack");        trackFlags |= IAudioFlinger::TRACK_BOOT;    }    #endif    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,                                // but we will still need the original value also    audio_session_t originalSessionId = mSessionId;    //向AudioFlinger发送createTrack请求,在stream模式下sharedBuffer为空, output为AudioSystem::getOutput得到一个值,代表AF中的线程索引号该函数返回IAudioTrack(实际类型是BpAudioTrack)对象,后续AF和AT的交互就是围绕IAudioTrack进行的 。    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,                                                      mSampleRate,                                                      mFormat,                                                      mChannelMask,                                                      &temp,                                                      &trackFlags,                                                      mSharedBuffer,                                                      output,                                                      mClientPid,                                                      tid,                                                      &mSessionId,                                                      mClientUid,                                                      &status);    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,            "session ID changed from %d to %d", originalSessionId, mSessionId);    if (status != NO_ERROR) {        ALOGE("AudioFlinger could not create track, status: %d", status);        goto release;    }    ALOG_ASSERT(track != 0);    // AudioFlinger now owns the reference to the I/O handle,    // so we are no longer responsible for releasing it.    //在STREAM模式下,没有在AudioTrack端创建共享内存,但前面提到了AudioTrack和AudioFligner的数据交互是通过共享内存完成的,这块共享内存最终由AudioFligner的createTrack创建。下面这个调用会取出AudioFligner创建的共享内存     // FIXME compare to AudioRecord    sp<IMemory> iMem = track->getCblk();    if (iMem == 0) {        ALOGE("Could not get control block");        return NO_INIT;    }    void *iMemPointer = iMem->pointer();    if (iMemPointer == NULL) {        ALOGE("Could not get control block pointer");        return NO_INIT;    }    // invariant that mAudioTrack != 0 is true only after set() returns successfully    if (mAudioTrack != 0) {        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);        mDeathNotifier.clear();    }    //将创建的Track代理对象、匿名共享内存代理对象保存到AudioTrack的成员变量中    mAudioTrack = track;    mCblkMemory = iMem;    IPCThreadState::self()->flushCommands();     //IMemory的pointer在此处将返回共享内存的首地址,类型为void*,static_cast直接把这个void*类型转成audio_track_cblk_t,表明这块内存的首部中存在audio_track_cblk_t这个对象     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);    mCblk = cblk;    ALOGD("createTrack_l: %p, mCblk = %p", this, mCblk);    // note that temp is the (possibly revised) value of frameCount    if (temp < frameCount || (frameCount == 0 && temp == 0)) {        // In current design, AudioTrack client checks and ensures frame count validity before        // passing it to AudioFlinger so AudioFlinger should not return a different value except        // for fast track as it uses a special method of assigning frame count.        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);    }    frameCount = temp;    mAwaitBoost = false;    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {        if (trackFlags & IAudioFlinger::TRACK_FAST) {            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);            if (!mThreadCanCallJava) {                mAwaitBoost = true;            }        } else {            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);        }    }    // Make sure that application is notified with sufficient margin before underrun.    // The client can divide the AudioTrack buffer into sub-buffers,    // and expresses its desire to server as the notification frame count.    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {        size_t maxNotificationFrames;        if (trackFlags & IAudioFlinger::TRACK_FAST) {            // notify every HAL buffer, regardless of the size of the track buffer            maxNotificationFrames = afFrameCountHAL;        } else {            // For normal tracks, use at least double-buffering if no sample rate conversion,            // or at least triple-buffering if there is sample rate conversion            const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3;            maxNotificationFrames = frameCount / nBuffering;        }        if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {            if (mNotificationFramesAct == 0) {                ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",                    maxNotificationFrames, frameCount);            } else {                ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",                    mNotificationFramesAct, maxNotificationFrames, frameCount);            }            mNotificationFramesAct = (uint32_t) maxNotificationFrames;        }    }    // We retain a copy of the I/O handle, but don't own the reference    mOutput = output;    mRefreshRemaining = true;    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers    // is the value of pointer() for the shared buffer, otherwise buffers points    // immediately after the control block.  This address is for the mapping within client    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.    void* buffers;    if (mSharedBuffer == 0) {//stream模式     //buffers指向数据空间,它的起始位置是共享内存的首部加上audio_track_cblk_t的大小        buffers = cblk + 1;    } else {//static模式        buffers = mSharedBuffer->pointer();        if (buffers == NULL) {            ALOGE("Could not get buffer pointer");            return NO_INIT;        }    }    mAudioTrack->attachAuxEffect(mAuxEffectId);    // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack)    // FIXME don't believe this lie    mLatency = mAfLatency + (1000*frameCount) / mSampleRate;    ALOGD("createTrack_l: %p, mLatency = %d, mAfLatency = %d, frameCount = %zu, mSampleRate = %d",            this, mLatency, mAfLatency, frameCount, mSampleRate);    mFrameCount = frameCount;    // If IAudioTrack is re-created, don't let the requested frameCount    // decrease.  This can confuse clients that cache frameCount().    if (frameCount > mReqFrameCount) {        mReqFrameCount = frameCount;    }    // reset server position to 0 as we have new cblk.    mServer = 0;    // update proxy    if (mSharedBuffer == 0) {        mStaticProxy.clear();        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);    } else {        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);        mProxy = mStaticProxy;    }    mProxy->setVolumeLR(gain_minifloat_pack(            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));    mProxy->setSendLevel(mSendLevel);    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);    mProxy->setSampleRate(effectiveSampleRate);    AudioPlaybackRate playbackRateTemp = mPlaybackRate;    playbackRateTemp.mSpeed = effectiveSpeed;    playbackRateTemp.mPitch = effectivePitch;    mProxy->setPlaybackRate(playbackRateTemp);    mProxy->setMinimum(mNotificationFramesAct);    mDeathNotifier = new DeathNotifier(this);    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);    if (mDeviceCallback != 0) {        AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);    }    #ifdef MTK_AOSP_ENHANCEMENT    gAudioTrackCenter.addTrack((intptr_t)mCblk, frameCount, mSampleRate, (void*)this, mAfFrameCount, mAfSampleRate, mReleased.value());    #endif    return NO_ERROR;    }    release:        AudioSystem::releaseOutput(output, streamType, mSessionId);        if (status == NO_ERROR) {            status = NO_INIT;        }        return status;    }

IAudioTrack建立了AudioTrack和AudioFligner之间的关系,在static模式下,用于存放音频数据的匿名共享内存在AudioTrack这边创建,在stream播放模式下,匿名共享内存却是在AudioFlinger这边创建。这两种播放模式下创建的匿名共享内存是有区别的,stream模式下的匿名共享内存头部会创建一个audio_track_cblk_t对象,用于协调生产者AudioTrack和消费者AudioFlinger之间的步调。createTrack就是在AudioFlinger中创建一个Track对象。

0 0