当检测到静音时,我该如何提取先前的音频(来自麦克风)作为缓冲区?(JS)

21
我正在使用Google Cloud API进行语音转文本,并使用NodeJS后端。应用程序需要能够侦听语音命令,并将其作为缓冲区传输到后端。为此,当检测到静音时,我需要发送先前音频的缓冲区。如有任何帮助,包括下面的js代码在内,将不胜感激。
 if (!navigator.getUserMedia)
    navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
        navigator.mozGetUserMedia || navigator.msGetUserMedia;

if (navigator.getUserMedia) {
    navigator.getUserMedia({audio: true}, success, function (e) {
        alert('Error capturing audio.');
    });
} else alert('getUserMedia not supported in this browser.');

var recording = false;

window.startRecording = function () {
    recording = true;
};

window.stopRecording = function () {
    recording = false;
    // window.Stream.end();
};

function success(e) {
    audioContext = window.AudioContext || window.webkitAudioContext;
    context = new audioContext();

    // the sample rate is in context.sampleRate
    audioInput = context.createMediaStreamSource(e);

    var bufferSize = 4096;
    recorder = context.createScriptProcessor(bufferSize, 1, 1);

    recorder.onaudioprocess = function (e) {
        if (!recording) return;
        console.log('recording');
        var left = e.inputBuffer.getChannelData(0);
        console.log(convertoFloat32ToInt16(left));
       };

    audioInput.connect(recorder);
    recorder.connect(context.destination);
}

我不清楚你需要什么帮助:是发送数据到这个API?检测静默?还是分割你录制的数据? - Kaiido
将数据发送到此API并实时获取输出。 - azhar
@azhar API支持实时通信吗? - guest271314
啊,那么“沉默”部分实际上并不是这个问题的一部分……文档似乎没有讨论从JS进行直接API调用,而且这也可能不是一个好主意,因为你需要让你的令牌可见。哦,但是你是指从Node.js开始吗?然后只需按照文档操作即可,你为什么要使用这个前端代码? - Kaiido
@Kaiido,实际上我想从浏览器麦克风检测语音,所以我必须从前端发送缓冲区。 - azhar
3个回答

27

我不太确定问题的具体要求,因此这个答案仅旨在提供一种检测AudioStream中静音的方法。


要检测音频流中的静默,您可以使用AudioAnalyser节点,在其上定期调用getByteFrequencyData方法,并检查是否有声音高于给定时间内期望的级别。您可以直接使用AnalyserNode的minDecibels属性设置阈值级别。

function detectSilence(
  stream,
  onSoundEnd = _=>{},
  onSoundStart = _=>{},
  silence_delay = 500,
  min_decibels = -80
  ) {
  const ctx = new AudioContext();
  const analyser = ctx.createAnalyser();
  const streamNode = ctx.createMediaStreamSource(stream);
  streamNode.connect(analyser);
  analyser.minDecibels = min_decibels;

  const data = new Uint8Array(analyser.frequencyBinCount); // will hold our data
  let silence_start = performance.now();
  let triggered = false; // trigger only once per silence event

  function loop(time) {
    requestAnimationFrame(loop); // we'll loop every 60th of a second to check
    analyser.getByteFrequencyData(data); // get current data
    if (data.some(v => v)) { // if there is data above the given db limit
      if(triggered){
        triggered = false;
        onSoundStart();
        }
      silence_start = time; // set it to now
    }
    if (!triggered && time - silence_start > silence_delay) {
      onSoundEnd();
      triggered = true;
    }
  }
  loop();
}

function onSilence() {
  console.log('silence');
}
function onSpeak() {
  console.log('speaking');
}

navigator.mediaDevices.getUserMedia({
    audio: true
  })
  .then(stream => {
    detectSilence(stream, onSilence, onSpeak);
    // do something else with the stream
  })
  .catch(console.error);

而且,由于stackSnippets可能会阻止gUM,所以as a fiddle


2
您可以使用SpeechRecognition result事件来确定何时识别到单词或短语,例如lscdpwd或其他命令,将SpeechRecognitionAlternative.transcript传递给speechSynthesis.speak(),在SpeechSynthesisUtterance的附加startend事件中调用.start().resume(),在MediaRecorder对象上调用,传递MediaStream; 在dataavailable事件中使用FileReaderResponse.arrayBuffer()Blob转换为ArrayBuffer
我们还可以使用SpeechRecognitionaudiostartsoundstartaudioendsoundend事件记录用户的实际语音,尽管相对于仅使用标准系统麦克风捕获的音频的实际开始和结束,结束可能不会一致地触发。
<!DOCTYPE html>
<html>

<head>
  <title>Speech Recognition Recording</title>
</head>

<body>
  <input type="button" value="Stop speech command recognition" id="stop">
  <script>
    navigator.mediaDevices.getUserMedia({
        audio: true
      })
      .then(stream => {
        const recorder = new MediaRecorder(stream);
        const recognition = new webkitSpeechRecognition();
        const synthesis = new SpeechSynthesisUtterance();
        const handleResult = e => {
          recognition.onresult = null;
          console.log(e.results);
          const result = e.results[e.results.length - 1];

          if (result.isFinal) {
            const [{transcript}] = result;
            console.log(transcript);
            synthesis.text = transcript;
            window.speechSynthesis.speak(synthesis);
          }
        }
        synthesis.onstart = () => {
          if (recorder.state === "inactive") {
            recorder.start()
          } else {
            if (recorder.state === "paused") {
              recorder.resume();
            }
          }
        }
        synthesis.onend = () => {
          recorder.pause();
          recorder.requestData();
        }
        recorder.ondataavailable = async(e) => {
          if (stream.active) {
            try {
              const blobURL = URL.createObjectURL(e.data);
              const request = await fetch(blobURL);
              const ab = await request.arrayBuffer();
              console.log(blobURL, ab);
              recognition.onresult = handleResult;
              // URL.revokeObjectURL(blobURL);
            } catch (err) {
              throw err
            }
          }
        }
        recorder.onpause = e => {
          console.log("recorder " + recorder.state);
        }
        recognition.continuous = true;
        recognition.interimResults = false;
        recognition.maxAlternatives = 1;
        recognition.start();
        recognition.onend = e => {
          console.log("recognition ended, stream.active", stream.active);

          if (stream.active) {
            console.log(e);
            // the service disconnects after a period of time
            recognition.start();
          }
        }
        recognition.onresult = handleResult;

        stream.oninactive = () => {
          console.log("stream ended");
        }

        document.getElementById("stop")
          .onclick = () => {
            console.log("stream.active:", stream.active);
            if (stream && stream.active && recognition) {
              recognition.abort();
              recorder.stop();
              for (let track of stream.getTracks()) {
                track.stop();
              }
              console.log("stream.active:", stream.active);
            }
          }

      })
      .catch(err => {
        console.error(err)
      });
  </script>
</body>

</html>

plnkr https://plnkr.co/edit/4DVEg6mhFRR94M5gdaIp?p=preview

这是一个关于it技术的网站,其中包含了plnkr的链接。请点击链接查看相关内容。

"webkitSpeechRecognition" 这个只能在 Chrome 浏览器中使用。我希望它也可以在非 Chrome 浏览器中使用。 - azhar
我正在尝试在后端使用Google Cloud API并解析语音为文本。但是如何将前端的语音实时流式传输到后端是我的问题。 - azhar
@azhar:“我也想在非Chrome浏览器中实现这个。” 一种方法是使用两个<button>元素来启动或停止记录MediaStream。 “但如何将前端的声音实时流传到后端?”您尝试过使用WebRTC吗? OP描述的要求是“将它们作为缓冲区传输到后端”,而不是直播MediaStream - guest271314
服务期望被“POST”的数据类型是什么? - guest271314
socket.io 有任何可能性吗? - azhar
socket.io 与需求有什么关系?最简单的解决方案是使用按钮让用户在需要录制和暂停音频时点击,然后将录制的音频POST到服务器。*oogle Cloud API语音转文本需要一个ArrayBuffer作为输入数据吗? - guest271314

2
最简单的方法是使用MediaRecorder().pause().resume().stop()方法,允许用户开始、暂停和停止录制使用navigator.mediaDevices.getUserMedia()捕获的音频,并将生成的Blob转换为ArrayBuffer,如果API期望被POST到服务器。
<!DOCTYPE html>
<html>

<head>
  <title>User Media Recording</title>
</head>

<body>
  <input type="button" value="Start/resume recording audio" id="start">
  <input type="button" value="Pause recording audio" id="pause">
  <input type="button" value="Stop recording audio" id="stop">
  <script>
    navigator.mediaDevices.getUserMedia({
        audio: true
      })
      .then(stream => {
        const recorder = new MediaRecorder(stream);

        recorder.ondataavailable = async(e) => {
          if (stream.active) {
            try {
              const blobURL = URL.createObjectURL(e.data);
              const request = await fetch(blobURL);
              const ab = await request.arrayBuffer();
              // do stuff with `ArrayBuffer` of recorded audio
              console.log(blobURL, ab);
              // we do not need the `Blob URL`, we can revoke the object
              // URL.revokeObjectURL(blobURL);
            } catch (err) {
              throw err
            }
          }
        }
        recorder.onpause = e => {
          console.log("recorder " + recorder.state);
          recorder.requestData();
        }

        stream.oninactive = () => {
          console.log("stream ended");
        }

        document.getElementById("start")
          .onclick = () => {

            if (recorder.state === "inactive") {
              recorder.start();
            } else {
              recorder.resume();
            }
            console.log("recorder.state:", recorder.state);
          }

        document.getElementById("pause")
          .onclick = () => {

            if (recorder.state === "recording") {
              recorder.pause();
            }
            console.log("recorder.state:", recorder.state);
          }

        document.getElementById("stop")
          .onclick = () => {

            if (recorder.state === "recording" || recorder.state === "paused") {
              recorder.stop();
            }

            for (let track of stream.getTracks()) {
              track.stop();
            }

            document.getElementById("start").onclick = null;
            document.getElementById("pause").onclick = null;
            console.log("recorder.state:", recorder.state
            , "stream.active", stream.active);
          }

      })
      .catch(err => {
        console.error(err)
      });
  </script>
</body>

</html>

plnkr https://plnkr.co/edit/7caWYMsvub90G6pwDdQp?p=preview

这句话的意思是:在这个段落中包含了一个名为"plnkr"的链接,指向"https://plnkr.co/edit/7caWYMsvub90G6pwDdQp?p=preview"。

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接