WebRtc 音频引擎-linux demo

来源:互联网 发布:php is not running 编辑:程序博客网 时间:2024/05/17 22:21

Google收购了著名的音频技术公司GIPS后,基于其强大的音频技术,实现了WebRtc的Voice Engine,即语音处理引擎。本文主要介绍WebRTC 中Voice Engine中音频技术相关的实现,并结合具体实例,介绍如何利用voice engine实现自己的VoIP音频处理引擎。

本文主要介绍如何在linux下搭建一个可以自己调试的基于WebRTC的voiceEngine。

1.VoiceEngine Demo 目录树

下面是一个小的VoiceEngine目录树:

.├── include│   ├── channel_transport.h│   ├── common_types.h│   ├── typedefs.h│   ├── udp_transport.h│   ├── voe_audio_processing.h│   ├── voe_base.h│   ├── voe_call_report.h│   ├── voe_codec.h│   ├── voe_dtmf.h│   ├── voe_encryption.h│   ├── voe_errors.h│   ├── voe_external_media.h│   ├── voe_file.h│   ├── voe_hardware.h│   ├── voe_neteq_stats.h│   ├── voe_network.h│   ├── voe_rtp_rtcp.h│   ├── voe_video_sync.h│   └── voe_volume_control.h├── lib│   ├── libaudio_coding_module.a│   ├── libaudio_conference_mixer.a│   ├── libaudio_device.a│   ├── libaudioproc_debug_proto.a│   ├── libaudio_processing.a│   ├── libaudio_processing_sse2.a│   ├── libchannel_transport.a│   ├── libCNG.a│   ├── libcommon_video.a│   ├── libG711.a│   ├── libG722.a│   ├── libgtest.a│   ├── libgtest_main.a│   ├── libiLBC.a│   ├── libiSAC.a│   ├── libiSACFix.a│   ├── libmedia_file.a│   ├── libNetEq.a│   ├── libopus.a│   ├── libpaced_sender.a│   ├── libPCM16B.a│   ├── libprotobuf_lite.a│   ├── libresampler.a│   ├── librtp_rtcp.a│   ├── libsignal_processing.a│   ├── libsystem_wrappers.a│   ├── libvad.a│   ├── libvoice_engine_core.a│   ├── libwebrtc_opus.a│   └── libwebrtc_utility.a├── Makefile├── out│   └── Debug│       ├── client_recv│       └── client_send└── src    ├── client_recv.cpp    └── client_send.cpp


 

其中,src目录下的client_send和client_recv是基于WebRTC VoiceEngine实现的两个Demo,一个发送音频数据、一个接收音频数据。

2.工程Makefile

下面是Voiceengine工程编译的Makefile文件

#WebRTC VoiceEngine Test => Makefile                                                                                                  CC = g++ CFLAGS= -Wall -gVPATH = src:includelib= -L lib obj=out/Debug/client_send  out/Debug/client_recvdepens= -lvoice_engine_core -laudio_device -lresampler \        -laudio_conference_mixer\        -laudio_processing  \        -laudio_coding_module -lrtp_rtcp\        -lNetEq -lCNG -lG722 -liLBC \        -lG711 -liSAC -lPCM16B \        -lsignal_processing \        -lvad -laudioproc_debug_proto\        -lprotobuf_lite -laudio_processing_sse2\        -lwebrtc_opus -lopus  -lpaced_sender\        -liSACFix -lmedia_file \        -lwebrtc_utility -lchannel_transport -lgtest\        -lpthread -lsystem_wrappers -lrt -ldl\all:${obj}out/Debug/client_send:client_send.cpp        ${CC} ${CFLAGS} -o $@ $< -Iinclude  ${lib} ${depens}        out/Debug/client_recv:client_recv.cpp         ${CC} ${CFLAGS} -o $@ $< -Iinclude  ${lib} ${depens}.PHONY:cleanclean:        rm -rf *.o ${obj}


 

其中,静态库的链接顺序不能随便修改,由于静态库之间存在依赖关系。具体原因可以看这里

3.client_recv Demo

/**  WebRTC VoiceEngine Test => client_recv*  *  @date:13.06.2013*  @author:hongliang*  @mail:lhl_nciae@sina.cn*/#include<iostream>#include"voe_base.h"#include"voe_network.h"#include"voe_hardware.h"#include"voe_errors.h""#include"channel_transport.h"using namespace webrtc;int main(int argc , char *argv[]){//Create VoiceEngineVoiceEngine* voe = VoiceEngine::Create();//Init baseVoEBase* base = VoEBase::GetInterface(voe);base->Init();//hardwareVoEHardware* hardware = VoEHardware::GetInterface(voe);int nRec = 0;char devName[128] = {0};char guidName[128] = {0};int ret = 0;ret = hardware->GetNumOfRecordingDevices(nRec);if(ret != 0){std::cout << "GetNumOfRecordingDevice error:" << base->LastError() << std::endl;}for (int idx = 0; idx < nRec; idx++){hardware->GetRecordingDeviceName(idx , devName , guidName);std::cout << "GetRecordingDeviceName=> " << "name:" << devName << " guidname:" << guidName <<std::endl;}//Create Channelint ch = base->CreateChannel();if(ch != -1){std::cout << "Create channel #" << ch << std::endl;}//Create Voice Channel transportVoENetwork* voe_network = VoENetwork::GetInterface(voe);test::VoiceChannelTransport voe_vct = test::VoiceChannelTransport(voe_network , ch);//recvvoe_vct.SetLocalReceiver(12345);base->StartReceive(ch);base->StartPlayout(ch);std::cout << "Start Receice from channel:" << ch << std::endl;while(1){}//Release resourcebase->DeleteChannel(ch);base->Terminate();base->Release();hardware->Release();VoiceEngine::Delete(voe);return 0;}


 

4.client_send Demo

#include<iostream>#include"voe_base.h"#include"voe_network.h"#include"voe_hardware.h"#include"voe_errors.h"#include"voe_rtp_rtcp.h"#include"channel_transport.h"using namespace webrtc;int main(int argc ,char * argv[]){int ret;//Create VoiceEngineVoiceEngine *voe = VoiceEngine::Create();//Init baseVoEBase* base = VoEBase::GetInterface(voe);base->Init();//handwareint nRec = 0;char devName[128] = {0};char guidName[128] = {0};VoEHardware* hardware = VoEHardware::GetInterface(voe);hardware->GetNumOfRecordingDevices(nRec);std::cout << "Get num of recordingdevice:" << nRec << std::endl;for(int idx = 0; idx < nRec; idx++){hardware->GetRecordingDeviceName(idx , devName , guidName);std::cout << "GetRecordingName(" << idx << ")  " << "name:" << devName << "  guidName:" << guidName << std::endl;}//Create Channelint ch = base->CreateChannel();if(ch == -1){std::cout << "create channel error:" << base->LastError() << std::endl;return -1;}std::cout << "create channel#" << ch << std::endl;//Create Voice Channel transportVoENetwork* voe_network = VoENetwork::GetInterface(voe);test::VoiceChannelTransport voe_ctp = test::VoiceChannelTransport(voe_network , ch);//sendvoe_ctp.SetSendDestination("192.168.1.1" , 12345);//base->SetSendDestination(ch , "192.168.1.1" , 12345);ret = base->StartSend(ch);if(ret == -1){std::cout << "Start send error:" << base->LastError() << std::endl;return -1;}std::cout << "Start send on channel#" << ch << std::endl;//Release Resourcebase->DeleteChannel(ch);base->Terminate();hardware->Release();VoiceEngine::Delete(voe);return 0;}


 

 

原创粉丝点击