web端语音识别

一、获取录音设备

var getAudioDevice = function(){
	var deferred = $.Deferred();
	if(navigator.mediaDevices && navigator.mediaDevices.enumerateDevices){
	    try{
	        var deviceArray = [];
	        navigator.mediaDevices.enumerateDevices().then(function(ret){
	            for(var i = 0; i < ret.length; i++){
	                if(ret[i].kind === 'audioinput' && ret[i].deviceId !== 'communications' && ret[i].deviceId !== 'default'){
	                    deviceArray.push(ret[i]);
	                }
	            }
	            deferred.resolve(deviceArray);
	        });
	    }catch(e){
	        deferred.reject('该浏览器无法获取录音设备!');
	    }
	}else{
	    deferred.reject('该浏览器无法获取录音设备!');
	}
	return deferred.promise();
}

二、根据录音设备获取音频数据

;(function(undefined){
    "use strict";
    var _global;
    function JscAsr(config){
        this._defaultConfig = {
            deviceId: null,
            //语音识别ws接口
            asrUrl: "",
            workerPath: "voice_worker.js",
            downsampleRate: 8000,
            interval: 100,
            onMessage: function(){},
            /**
             * 1000:录音设备初始化成功!
             * 1001:请求麦克风使用授权!
             * 1002:正在获取录音数据!
             * 1003:录音设备初始化失败!
             * 1004:此浏览器中不支持麦克风录音!
             * 2000:语音识别服务连接成功!
             * 2001:语音识别连接关闭!
             * 2002:语音识别连接失败!
             * 3000:开始语音识别!
             * 3001:暂停语音识别!
             * 4001:销毁语音识别对象!
             */
            onLog: function(){}
        };
        this._extend = function(){
            var length = arguments.length;
            var target = arguments[0] || {};
            if (typeof target!="object" && typeof target != "function") {
                target = {};
            }
            if (length == 1) {
                target = arguments[0];
            }
            for (var i = 1; i < length; i++) { 
                var source = arguments[i]; 
                for (var key in source) { 
                    if (Object.prototype.hasOwnProperty.call(source, key)) { 
                        target[key] = source[key]; 
                    } 
                } 
            }
            return target;
        }
        this._dateFormat = function(fmt,date){
            var o = { 
            "M+" : date.getMonth()+1,     //月份
            "d+" : date.getDate(),     //日
            "h+" : date.getHours(),     //小时
            "m+" : date.getMinutes(),     //分
            "s+" : date.getSeconds(),     //秒
            "q+" : Math.floor((date.getMonth()+3)/3), //季度
            "S" : date.getMilliseconds()    //毫秒
            }; 
            if(/(y+)/.test(fmt)) 
            fmt=fmt.replace(RegExp.$1, (date.getFullYear()+"").substr(4 - RegExp.$1.length)); 
            for(var k in o) 
            if(new RegExp("("+ k +")").test(fmt)) 
            fmt = fmt.replace(RegExp.$1, (RegExp.$1.length==1) ? (o[k]) : (("00"+ o[k]).substr((""+ o[k]).length))); 
            return fmt; 
        }
        this.config = this._extend(this._defaultConfig,config);
        this.params = {
            worker: null,
            ws: null,
            // 0: 未开始 1:正在录音 2:暂停录音
            recordType: 0,
            intervalKey: null,
            isStart: false
        };
        this.init = function(callback){
            var self = this;
            window.AudioContext = window.AudioContext || window.webkitAudioContext;
            // 创建一个音频处理图对象
            var audioCtx = new AudioContext();
            // 重新启动一个已被暂停的音频环境
            audioCtx.resume();
            if(audioCtx.state != "running"){
                callback({
                    code: 1001,
                    msg: "请求麦克风使用授权!"
                });
            }else{
                // 提醒用户需要使用音频或视频输入设备,比如相机,屏幕共享,或者麦克风
                navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || MediaDevices.getUserMedia;
                if(navigator.getUserMedia){
                    var constraints = {};
                    constraints.audio = self.config.deviceId ? { optional: [{sourceId: self.config.deviceId}] }  : true;
                    navigator.getUserMedia(constraints,function(stream){
                        self.config.streamId = stream.id;
                        // 创建一个gainNode音量控制器对象,用于控制音量
                        var gainNode = audioCtx.createGain();
                        // 创建一个MediaStreamSourc对象,由内存音频数据组成的音频源,需传入一个流媒体对象,Feed the HTMLMediaElement into it
                        var source = audioCtx.createMediaStreamSource(stream);
                        // 转化为单声道
                        function convertToMono(input) {
                            // 将音频分割为两个声道
                            var splitter = audioCtx.createChannelSplitter(2);
                            // 通道输入音频流的数量,输出流将包含这个数量的通道
                            var merger = audioCtx.createChannelMerger(2);
                            // 流媒体连接声道
                            input.connect(splitter);
                            // 声道连接合并音频流的第一个通道
                            splitter.connect(merger, 0, 0);
                            // 声道连接合并音频流的第二个通道
                            splitter.connect(merger, 0, 1);
                            return merger;
                        }
                        var merger = convertToMono(source);
                        //每秒的采样数(采样率)大于8000
                        if(8000 < audioCtx.sampleRate){
                            // 创建一个双二阶滤波器
                            var biquadFilter = audioCtx.createBiquadFilter();
                            // 合并音频流连接二阶滤波器
                            merger.connect(biquadFilter);
                            // 二阶滤波器连接gainNode对象
                            biquadFilter.connect(gainNode);
                            // 设置低通
                            biquadFilter.type = biquadFilter.LOWPASS || "lowpass";
                            // 设置频率
                            biquadFilter.frequency.value = 8000/2;
                            // 再一次连接
                            biquadFilter.connect(gainNode);
                        }else{
                            // 当采样率小于等于8000,合并音频流连接gainNode对象
                            merger.connect(gainNode);
                        }
                        // 设置缓冲区大小(以样本帧为单位)仅可选:256,512,1024,2048,4096,8192,16384
                        // 通道输入数,默认2,最大32
                        // 通道输出数,默认2,最大32
                        var bufferLen = 4096,inputChannelLen = 2, outputChannelLen = 2,scriptProcessor = null;
                        // 获取通过加载的音轨
                        if(gainNode.context.createScriptProcessor){
                            scriptProcessor = gainNode.context.createScriptProcessor(bufferLen,inputChannelLen,outputChannelLen)
                        }else{
                            scriptProcessor = gainNode.context.createJavaScriptNode(bufferLen,inputChannelLen,outputChannelLen);
                        }
                        // 使用它循环遍历输入缓冲区的每个通道
                        scriptProcessor.onaudioprocess = function(e){
                            callback({
                                code: 1002,
                                msg: "正在获取录音数据!",
                                data: e
                            });
                        }
                        // 音频源连接音轨
                        gainNode.connect(scriptProcessor);
                        // 音轨连接所有节点的最终节点(音频渲染设备)
                        scriptProcessor.connect(gainNode.context.destination);
                        // 再创建一个gainNode对象
                        var zeroGain = audioCtx.createGain();
                        // 设置音量
                        zeroGain.gain.value = 0.0;
                        // 原音频源连接新对象
                        gainNode.connect(zeroGain);
                        // 新对象连接所有节点的最终节点
                        zeroGain.connect(audioCtx.destination);
                        callback({
                            code: 1000,
                            msg: "录音设备初始化成功!",
                            data: {
                                sampleRate: gainNode.context.sampleRate
                            }
                        });
                    },function(err){
                        callback({
                            code: 1003,
                            msg: "录音设备初始化失败!",
                            data: err
                        });
                    });
                }else{
                    callback({
                        code: 1004,
                        msg: "此浏览器中不支持麦克风录音!"
                    });
                }
            }
        };
        this.start = function(){
            var self = this;
            if(self.params.recordType == 1){
                return;
            }
            if(self.params.recordType == 2){
                self.params.recordType = 1;
                return;
            }
            self.params.worker = new Worker(self.config.workerPath);
            self.params.worker.onmessage = function(e){
                var ret = e.data;
                if(self.params.ws.readyState == 1 && ret.action == "get16KMonoBlob"){
                    self.params.ws.send(ret.data);
                }
            };
            self.params.ws = new WebSocket(self.config.asrUrl);
            self.params.ws.onopen = function(){
                self.params.intervalKey = setInterval(function(){
                    if(self.params.recordType == 1){
                        self.params.worker.postMessage({
                            action: "get16KMonoBlob",
                            params: {
                                type: 'audio/wav'
                            }
                        });
                    }
                },self.config.interval);
                self.config.onLog({
                    code: 2000,
                    msg: "语音识别服务连接成功!"
                });
            }
            self.params.ws.onmessage = function(e){
                var ret = JSON.parse(e.data);
                if(ret.status == 0 && ret.result && ret.result.hypotheses && ret.result.hypotheses[0] && ret.result.hypotheses[0].transcript){
                    self.config.onMessage({
                        id: self.config.deviceId,
                        final: ret.result.final,
                        msg: ret.result.hypotheses[0].transcript,
                        time: self._dateFormat("yyyy-MM-dd hh:mm:ss",new Date())
                    });
                }
            };
            self.params.ws.onclose = function(){
                self.config.onLog({
                    code: 2001,
                    msg: "语音识别连接关闭!"
                });
            };
            self.params.ws.onerror = function(){
                self.config.onLog({
                    code: 2002,
                    msg: "语音识别连接失败!"
                });
            };
            self.init(function(ret){
                if(ret.code == 1002){
                    if(self.params.recordType == 1){
                        self.params.worker.postMessage({
                            action: "record",
                            params: {
                                inputBufferLeft: ret.data.inputBuffer.getChannelData(0),
                                inputBufferRight: ret.data.inputBuffer.getChannelData(1)
                            }
                        });
                        if(!self.params.isStart){
                            self.config.onLog({
                                code: 3000,
                                msg: "开始语音识别!"
                            });
                            self.params.isStart = true;
                        }
                    }
                }else if(ret.code == 1000){
                    self.params.worker.postMessage({
                        action: "config",
                        params: {
                            sampleRate: ret.data.sampleRate,
                            downsampleRate: self.config.downsampleRate || ret.data.sampleRate
                        }
                    });
                    self.params.recordType = 1;
                }else{
                    self.params.worker.postMessage({
                        action: "close"
                    });
                    self.params.ws.close();
                    self.params.recordType = 0;
                    self.params.isStart = false;
                }
                self.config.onLog({
                    code: ret.code,
                    msg: ret.msg
                });
            });
        };
        this.stop = function(){
            this.params.recordType = 2;
            this.params.isStart = false;
            this.config.onLog({
                code: 3001,
                msg: "暂停语音识别!"
            });
        };
        this.destroy = function(){
            this.params.ws.close();
            this.params.worker.terminate();
            this.config.onLog({
                code: 4001,
                msg: "销毁语音识别对象!"
            });
        };
    }
    _global = (function(){return this || (0,eval)("this");}());
    if(typeof module !== "undefined" && module.exports){
        module.exports = JscAsr;
    }else if(typeof define === "function" && define.amd){
        define(function(){
            return JscAsr;
        });
    }else{
        !("JscAsr" in _global) && (_global.JscAsr = JscAsr);
    }
}());

三、处理音频数据

var global = this;
global.onmessage = function(e){
    var receiveData = e.data || {};
    receiveData.action && vioceWorker[receiveData.action] 
        && Object.prototype.toString.call(vioceWorker[receiveData.action]) === '[object Function]' 
        && vioceWorker[receiveData.action](receiveData);
}

var vioceWorker = {
    sampleRate: 0,
    downsampleRate: 0,
    recordBuffersLeft: [],
    recordBuffersRight: [],
    recordLength: 0,
    config: function(data){
        var self = this;
        self.sampleRate = data.params.sampleRate;
        self.downsampleRate = data.params.downsampleRate || data.params.sampleRate;
    },
    record: function(data){
        var self = this;
        var buffersLeft = data.params.inputBufferLeft;
        var buffersRight = data.params.inputBufferRight;
        self.recordBuffersLeft.push(buffersLeft);
        self.recordBuffersRight.push(buffersRight);
        self.recordLength += buffersLeft.length;
    },
    get16KMonoBlob: function(data){
        var self = this;
        var bufferLeft = self.mergeBuffers(self.recordBuffersLeft, self.recordLength);
        var dataview = self.encodeRAW(self.downsample(bufferLeft, self.sampleRate, self.downsampleRate), true);
        var audioBlob = new Blob([dataview], {type: data.params.type});
        self.clear();
        global.postMessage({
            action: data.action,
            data: audioBlob
        });
    },
    mergeBuffers: function(recBuffers, recLength){
        var result = new Float32Array(recLength);
        var offset = 0;

        for (var i = 0; i < recBuffers.length; i++) {
            result.set(recBuffers[i], offset);
            offset += recBuffers[i].length;
        }

        return result;
    },
    encodeRAW: function(samples){
        var self = this;
        var buffer = new ArrayBuffer(samples.length * 2);
        var view = new DataView(buffer);
        self.floatTo16BitPCM(view, 0, samples);
        return view;
    },
    downsample: function(e, sampleRate, outputSampleRate){
        if (sampleRate <= outputSampleRate) return e;

        var t = e.length;
        sampleRate += 0.0;
        outputSampleRate += 0.0;
      
        var s = 0,
          o = sampleRate / outputSampleRate,
          u = Math.ceil(t * outputSampleRate / sampleRate),
          a = new Float32Array(u);
      
        for (var i = 0; i < u; i++) {
          a[i] = e[Math.floor(s)];
          s += o;
        }
        return a;
    },
    floatTo16BitPCM: function(output, offset, input){
        for (var i = 0; i < input.length; i++, offset += 2) {
            var s = Math.max(-1, Math.min(1, input[i]));
            output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
        }
    },
    clear: function(){
        var self = this;
        self.recordBuffersLeft = [];
        self.recordBuffersRight = [];
        self.recordLength = 0;
    },
    close: function(){
        this.clear();
        global.close();
    }
}

四、推送音频数据给语音识别服务获取识别结果

五、处理业务

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值