AudioQueue 音频队列 转

来源:互联网 发布:oracle 数据搜索引擎 编辑:程序博客网 时间:2024/05/16 11:01

转自 http://my.chinaunix.net/space.php?uid=25788167&do=blog&id=366155

AudioQueue是Mac OS X与iPhone中提供录音、播放功能的高级框架,比AudioUnit等框架更方便,而且不要求掌握更多专门的知识。

从AudioQueue的名称就可以看出,AudioQueue框架以队列的形式处理音频数据。因此使用时需要给队列分配缓存空间,由回调(Callback)函数完成向队列缓存读写音频数据的功能。另外,AudioQueue是AudioToolbox框架的一部分,使用前需要将AudioToolbox框架导入进来。

使用AudioQueue来实现音频播放功能时最主要的步骤,可以更简练的归纳如下。

1. 打开播放音频文件

2. 取得播放音频文件的数据格式

3. 准备播放用的队列

4. 将缓冲中的数据移动到队列中

5. 开始播放

6. 在回调函数中进行队列处理

以下是贯彻上述六个主要步骤的代码实例,只需要向[play:]中传入音频文件的路径就可以开始音频播放。稍加修改可以直接应用到自己的程序中。

Source Audioplay.h

#import <Foundation/Foundation.h>

#import <AudioToolbox/AudioToolbox.h>

#import <AudioToolbox/AudioFile.h>

 

#define NUM_BUFFERS 3

 

@interface AudioPlayer : NSObject {

        //播放音频文件ID

    AudioFileID audioFile;

        //音频流描述对象

    AudioStreamBasicDescription dataFormat;

        //音频队列

    AudioQueueRef queue;

    SInt64 packetIndex;

    UInt32 numPacketsToRead;

    UInt32 bufferByteSize;

    AudioStreamPacketDescription *packetDescs;

    AudioQueueBufferRef buffers[NUM_BUFFERS];

}

//定义队列为实例属性

@property AudioQueueRef queue;

//播放方法定义

- (void) play:(CFURLRef) path;

//定义缓存数据读取方法

- (void) audioQueueOutputWithQueue:(AudioQueueRef)audioQueue

                       queueBuffer:(AudioQueueBufferRef)audioQueueBuffer;

//定义回调(Callback)函数

static void BufferCallback(void *inUserData, AudioQueueRef inAQ,

                                            AudioQueueBufferRef buffer);

//定义包数据的读取方法

- (UInt32)readPacketsIntoBuffer:(AudioQueueBufferRef)buffer;

 

@end

 

Source Audioplay.m

static UInt32 gBufferSizeBytes = 0x10000;

 

@implementation AudioPlayer

 

@synthesize queue;

 

// 回调(Callback)函数的实现

static void BufferCallback(void *inUserData, AudioQueueRef inAQ,

  AudioQueueBufferRef buffer) {

    AudioPlayer* player = (AudioPlayer*)inUserData;

    [player  audioQueueOutputWithQueue:inAQ queueBuffer:buffer];

}

//初始化方法(为NSObject中定义的初始化方法)

- (id) init {

    for(int i=0; i<NUM_BUFFERS; i++) {

        AudioQueueEnqueueBuffer(queue,buffers[i],0,nil);

    }

    return self;

}

//缓存数据读取方法的实现

- (void) audioQueueOutputWithQueue:(AudioQueueRef)audioQueue

                       queueBuffer:(AudioQueueBufferRef)audioQueueBuffer {

    OSStatus status;

 

    // 读取包数据

    UInt32  numBytes;

    UInt32  numPackets = numPacketsToRead;

    status = AudioFileReadPackets(

                audioFile, NO, &numBytes, packetDescs,

                packetIndex, &numPackets, audioQueueBuffer->mAudioData);

 

    // 成功读取时

    if (numPackets > 0) {

        //将缓冲的容量设置为与读取的音频数据一样大小(确保内存空间)

        audioQueueBuffer->mAudioDataByteSize = numBytes;

 

        // 完成给队列配置缓存的处理

        status = AudioQueueEnqueueBuffer(

                audioQueue, audioQueueBuffer, numPackets, packetDescs);

 

        // 移动包的位置

        packetIndex += numPackets;

    }

 

}

//音频播放方法的实现

-(void) play:(CFURLRef) path {

    UInt32      size, maxPacketSize;

    char        *cookie;

    int         i;

    OSStatus status;

 

    // 打开音频文件

    status = AudioFileOpenURL(path, kAudioFileReadPermission, 0, &audioFile);

    if (status != noErr) {

        // 错误处理

        return;

    }

 

    // 取得音频数据格式

    size = sizeof(dataFormat);

    AudioFileGetProperty(audioFile, kAudioFilePropertyDataFormat,

                                                   &size, &dataFormat);

 

    // 创建播放用的音频队列

    AudioQueueNewOutput(&dataFormat, BufferCallback,

                               self, nil, nil, 0, &queue);

 

    //计算单位时间包含的包数

    if (dataFormat.mBytesPerPacket==0 || dataFormat.mFramesPerPacket==0) {

        size = sizeof(maxPacketSize);

        AudioFileGetProperty(audioFile,

          kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize);

        if (maxPacketSize > gBufferSizeBytes) {

            maxPacketSize = gBufferSizeBytes;

        }

        // 算出单位时间内含有的包数

        numPacketsToRead = gBufferSizeBytes / maxPacketSize;

        packetDescs = malloc(

          sizeof(AudioStreamPacketDescription) * numPacketsToRead);

    } else {

        numPacketsToRead = gBufferSizeBytes / dataFormat.mBytesPerPacket;

        packetDescs = nil;

    }

    //设置Magic Cookie,参见第二十七章的相关介绍

    AudioFileGetPropertyInfo(audioFile,

           kAudioFilePropertyMagicCookieData, &size, nil);

    if (size > 0) {

        cookie = malloc(sizeof(char) * size);

        AudioFileGetProperty(audioFile,

                  kAudioFilePropertyMagicCookieData, &size, cookie);

        AudioQueueSetProperty(queue,

                  kAudioQueueProperty_MagicCookie, cookie, size);

        free(cookie);

    }

 

    // 创建并分配缓存空间

    packetIndex = 0;

    for (i = 0; i < NUM_BUFFERS; i++) {

        AudioQueueAllocateBuffer(queue, gBufferSizeBytes, &buffers[i]);

        //读取包数据

        if ([self readPacketsIntoBuffer:buffers[i]] == 0) {

            break;

        }

    }

 

    Float32 gain = 1.0;

    //设置音量

    AudioQueueSetParameter (

                            queue,

                            kAudioQueueParam_Volume,

                            gain

                            );

    //队列处理开始,此后系统会自动调用回调(Callback)函数

    AudioQueueStart(queue, nil);

}

 

- (UInt32)readPacketsIntoBuffer:(AudioQueueBufferRef)buffer {

    UInt32      numBytes, numPackets;

 

    // 从文件中接受包数据并保存到缓存(buffer)中

    numPackets = numPacketsToRead;

    AudioFileReadPackets(audioFile, NO, &numBytes, packetDescs,

                       packetIndex, &numPackets, buffer->mAudioData);

    if (numPackets > 0) {

        buffer->mAudioDataByteSize = numBytes;

        AudioQueueEnqueueBuffer(queue, buffer,

              (packetDescs ? numPackets : 0), packetDescs);

        packetIndex += numPackets;

    }

    return numPackets;

}

 

@end

0 0
原创粉丝点击