使用websocket实现录音实例

来源:互联网 发布:驾图世界数据大会 编辑:程序博客网 时间:2024/05/12 10:43

参考文档:认识HTML5的WebSocket

chrome 支持语言聊天 下面介绍一个chrome 的录音实例:

code
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
<!DOCTYPE HTML>
<htmllang="en">
    <head>
        <metacharset= "utf-8"/>
        <title>Chat by Web Sockets</title>
        <scripttype="text/javascript"src="js/recorder.js"> </script>
        <scripttype="text/javascript"src="js/jquery-1.10.1.min.js"> </script>
         
        <styletype='text/css'>
            
        </style>
    </head>
    <body>
        <audiocontrols autoplay></audio>
        
       <inputtype="button"id="record"value="Record">
       <inputtype="button"id="export"value="Export">
       <divid="message"></div>
    </body>
     
    <scripttype='text/javascript'>
            var onFail = function(e) {
                console.log('Rejected!', e);
            };
         
            var onSuccess = function(s) {
                var context = new webkitAudioContext();
                var mediaStreamSource = context.createMediaStreamSource(s);
                rec = new Recorder(mediaStreamSource);
                //rec.record();
         
                // audio loopback
                // mediaStreamSource.connect(context.destination);
            }
         
            //window.URL = URL || window.URL || window.webkitURL;
            navigator.getUserMedia  = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
         
            var rec;
            var audio = document.querySelector('#audio');
         
            function startRecording() {
                if (navigator.getUserMedia) {
                    navigator.getUserMedia({audio: true}, onSuccess, onFail);
                } else {
                    console.log('navigator.getUserMedia not present');
                }
            }
            startRecording();
            //--------------------     
            $('#record').click(function() {
                rec.record();
               var dd = ws.send("start");
                $("#message").text("Click export to stop recording");
     
                // export a wav every second, so we can send it using websockets
                intervalKey = setInterval(function() {
                    rec.exportWAV(function(blob) {
                         
                        rec.clear();
                        ws.send(blob);
                        //audio.src = URL.createObjectURL(blob);
                    });
                }, 3000);
            });
             
            $('#export').click(function() {
                // first send the stop command
                rec.stop();
                ws.send("stop");
                clearInterval(intervalKey);
                 
                ws.send("analyze");
                $("#message").text("");
            });
             
            var ws = new WebSocket("ws://127.0.0.1:8088/websocket/servlet/record");
            ws.onopen = function () {
                console.log("Openened connection to websocket");
            };
            ws.onclose = function (){
                 console.log("Close connection to websocket");
            }
            ws.onmessage = function(e) {
                audio.src = URL.createObjectURL(e.data);
            }
             
            
        </script>
</html>

recorder.js内容:

code
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
(function(window){
 
  varWORKER_PATH = 'js/recorderWorker.js';
 
  varRecorder = function(source, cfg){
    varconfig = cfg || {};
    varbufferLen = config.bufferLen || 4096;
    this.context = source.context;
    this.node = this.context.createJavaScriptNode(bufferLen, 2, 2);
    varworker = newWorker(config.workerPath || WORKER_PATH);
    worker.postMessage({
      command:'init',
      config: {
        sampleRate:this.context.sampleRate
      }
    });
    varrecording = false,
      currCallback;
 
    this.node.onaudioprocess = function(e){
      if(!recording)return;
      worker.postMessage({
        command:'record',
        buffer: [
          e.inputBuffer.getChannelData(0),
          e.inputBuffer.getChannelData(1)
        ]
      });
    }
 
    this.configure = function(cfg){
      for(varpropincfg){
        if(cfg.hasOwnProperty(prop)){
          config[prop] = cfg[prop];
        }
      }
    }
 
    this.record = function(){
      recording = true;
    }
 
    this.stop = function(){
      recording = false;
    }
 
    this.clear = function(){
      worker.postMessage({ command: 'clear'});
    }
 
    this.getBuffer = function(cb) {
      currCallback = cb || config.callback;
      worker.postMessage({ command: 'getBuffer'})
    }
 
    this.exportWAV = function(cb, type){
      currCallback = cb || config.callback;
      type = type || config.type || 'audio/wav';
      if(!currCallback)thrownewError('Callback not set');
      worker.postMessage({
        command:'exportWAV',
        type: type
      });
    }
 
    worker.onmessage = function(e){
      varblob = e.data;
      currCallback(blob);
    }
 
    source.connect(this.node);
    this.node.connect(this.context.destination);   //this should not be necessary
  };
 
  Recorder.forceDownload = function(blob, filename){
    varurl = (window.URL || window.webkitURL).createObjectURL(blob);
    varlink = window.document.createElement('a');
    link.href = url;
    link.download = filename || 'output.wav';
    varclick = document.createEvent("Event");
    click.initEvent("click",true,true);
    link.dispatchEvent(click);
  }
 
  window.Recorder = Recorder;
 
})(window);

 

recorderWorker.js的内容:

code
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
varrecLength = 0,
  recBuffersL = [],
  recBuffersR = [],
  sampleRate;
 
this.onmessage = function(e){
  switch(e.data.command){
    case'init':
      init(e.data.config);
      break;
    case'record':
      record(e.data.buffer);
      break;
    case'exportWAV':
      exportWAV(e.data.type);
      break;
    case'getBuffer':
      getBuffer();
      break;
    case'clear':
      clear();
      break;
  }
};
 
functioninit(config){
  sampleRate = config.sampleRate;
}
 
functionrecord(inputBuffer){
  recBuffersL.push(inputBuffer[0]);
  recBuffersR.push(inputBuffer[1]);
  recLength += inputBuffer[0].length;
}
 
functionexportWAV(type){
  varbufferL = mergeBuffers(recBuffersL, recLength);
  varbufferR = mergeBuffers(recBuffersR, recLength);
  varinterleaved = interleave(bufferL, bufferR);
  vardataview = encodeWAV(interleaved);
  varaudioBlob = newBlob([dataview], { type: type });
 
  this.postMessage(audioBlob);
}
 
functiongetBuffer() {
  varbuffers = [];
  buffers.push( mergeBuffers(recBuffersL, recLength) );
  buffers.push( mergeBuffers(recBuffersR, recLength) );
  this.postMessage(buffers);
}
 
functionclear(){
  recLength = 0;
  recBuffersL = [];
  recBuffersR = [];
}
 
functionmergeBuffers(recBuffers, recLength){
  varresult = newFloat32Array(recLength);
  varoffset = 0;
  for(vari = 0; i < recBuffers.length; i++){
    result.set(recBuffers[i], offset);
    offset += recBuffers[i].length;
  }
  returnresult;
}
 
functioninterleave(inputL, inputR){
  varlength = inputL.length + inputR.length;
  varresult = newFloat32Array(length);
 
  varindex = 0,
    inputIndex = 0;
 
  while(index < length){
    result[index++] = inputL[inputIndex];
    result[index++] = inputR[inputIndex];
    inputIndex++;
  }
  returnresult;
}
 
functionfloatTo16BitPCM(output, offset, input){
  for(vari = 0; i < input.length; i++, offset+=2){
    vars = Math.max(-1, Math.min(1, input[i]));
    output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
  }
}
 
functionwriteString(view, offset, string){
  for(vari = 0; i < string.length; i++){
    view.setUint8(offset + i, string.charCodeAt(i));
  }
}
 
functionencodeWAV(samples){
  varbuffer = newArrayBuffer(44 + samples.length * 2);
  varview = newDataView(buffer);
 
  /* RIFF identifier */
  writeString(view, 0, 'RIFF');
  /* file length */
  view.setUint32(4, 32 + samples.length * 2, true);
  /* RIFF type */
  writeString(view, 8, 'WAVE');
  /* format chunk identifier */
  writeString(view, 12, 'fmt ');
  /* format chunk length */
  view.setUint32(16, 16, true);
  /* sample format (raw) */
  view.setUint16(20, 1, true);
  /* channel count */
  view.setUint16(22, 2, true);
  /* sample rate */
  view.setUint32(24, sampleRate, true);
  /* byte rate (sample rate * block align) */
  view.setUint32(28, sampleRate * 4, true);
  /* block align (channel count * bytes per sample) */
  view.setUint16(32, 4, true);
  /* bits per sample */
  view.setUint16(34, 16, true);
  /* data chunk identifier */
  writeString(view, 36, 'data');
  /* data chunk length */
  view.setUint32(40, samples.length * 2, true);
 
  floatTo16BitPCM(view, 44, samples);
 
  returnview;
}

 

后台websocket代码:

code
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
packagecom.test;
 
 
importjava.io.ByteArrayInputStream;
importjava.io.ByteArrayOutputStream;
importjava.io.File;
importjava.io.FileOutputStream;
importjava.io.IOException;
importjava.io.SequenceInputStream;
 
importjavax.servlet.http.HttpServletRequest;
importjavax.sound.sampled.AudioFileFormat;
importjavax.sound.sampled.AudioInputStream;
importjavax.sound.sampled.AudioSystem;
 
 
importorg.apache.commons.logging.Log;
importorg.apache.commons.logging.LogFactory;
importorg.eclipse.jetty.io.Connection;
importorg.eclipse.jetty.server.Server;
importorg.eclipse.jetty.server.nio.SelectChannelConnector;
importorg.eclipse.jetty.websocket.WebSocket;
importorg.eclipse.jetty.websocket.WebSocketHandler;
 
 
 
publicclassTestRecordServlet extendsServer {
     
     
    privatefinalstatic Log LOG =  LogFactory.getLog( TestRecordServlet.class);
      
    publicTestRecordServlet(intport) {
        SelectChannelConnector connector = newSelectChannelConnector();
        connector.setPort(port);
        addConnector(connector);
  
        WebSocketHandler wsHandler = newWebSocketHandler() {
            publicWebSocket doWebSocketConnect(HttpServletRequest request, String protocol) {
                returnnewFaceDetectWebSocket();
            }
        };
        setHandler(wsHandler);
    }
  
    /**
     * Simple innerclass that is used to handle websocket connections.
     *
     * @author jos
     */
    privatestaticclass FaceDetectWebSocket implementsWebSocket,
            WebSocket.OnBinaryMessage, WebSocket.OnTextMessage {
        privateString currentCommand ="";
         
        privateConnection connection;
        //private FaceDetection faceDetection = new FaceDetection();
  
        publicFaceDetectWebSocket() {
            super();
        }
  
        /**
         * On open we set the connection locally, and enable
         * binary support
         */
        publicvoidonOpen(Connection connection) {
            this.connection = connection;
            this.connection.setMaxBinaryMessageSize(1024*512);
        }
  
        /**
         * Cleanup if needed. Not used for this example
         */
        publicvoidonClose(intcode, String message) {}
  
        /**
         * When we receive a binary message we assume it is an image. We then run this
         * image through our face detection algorithm and send back the response.
         */
        publicvoidonMessage(byte[] data, intoffset,intlength) {
  
            if(currentCommand.equals("start")) {
                try{
                    // The temporary file that contains our captured audio stream
                    File f = newFile("out.wav");
  
                    // if the file already exists we append it.
                    if(f.exists()) {
                        LOG.info("Adding received block to existing file.");
  
                        // two clips are used to concat the data
                         AudioInputStream clip1 = AudioSystem.getAudioInputStream(f);
                         AudioInputStream clip2 = AudioSystem.getAudioInputStream(newByteArrayInputStream(data));
  
                         // use a sequenceinput to cat them together
                         AudioInputStream appendedFiles =
                                    newAudioInputStream(
                                        newSequenceInputStream(clip1, clip2),    
                                        clip1.getFormat(),
                                        clip1.getFrameLength() + clip2.getFrameLength());
  
                         // write out the output to a temporary file
                            AudioSystem.write(appendedFiles,
                                    AudioFileFormat.Type.WAVE,
                                    newFile("out2.wav"));
  
                            // rename the files and delete the old one
                            File f1 = newFile("out.wav");
                            File f2 = newFile("out2.wav");
                            f1.delete();
                            f2.renameTo(newFile("out.wav"));
                    }else{
                        LOG.info("Starting new recording.");
                        FileOutputStream fOut = newFileOutputStream("out.wav",true);
                        fOut.write(data);
                        fOut.close();
                    }          
                }catch(Exception e) {
                    LOG.error("sss:"+ e );
                }
            }
        }
  
        publicvoidonMessage(String data) {
            if(data.startsWith("start")) {
                // before we start we cleanup anything left over
                //cleanup();
                currentCommand = "start";
            }elseif(data.startsWith("stop")) {
                currentCommand = "stop";
            }elseif(data.startsWith("clear")) {
                // just remove the current recording
                //cleanup();
            }elseif(data.startsWith("analyze")) {
                 
            }
        }
    }
  
    /**
     * Start the server on port 999
     */
    publicstaticvoid main(String[] args) throwsException {
        TestRecordServlet server = newTestRecordServlet(8080);
        server.start();
        server.join();
    }
}

 

 

0 0
原创粉丝点击