javascript - 读取 Web-Audio API 分析器的音频文件(Node.Js 服务器、JS 前端)

标签 javascript node.js web-audio-api

我设置了一个简单的 Node.js 服务器来向本地前端提供 .wav 文件。

require('dotenv').config();
const debugBoot = require('debug')('boot');
const cors = require('cors')
const express = require('express');
const app = express();
app.set('port', process.env.PORT || 3000);

app.use(cors());
app.use(express.static('public'));


const server = app.listen(app.get('port'), () => {
    const port = server.address().port;
    debugBoot('Server running at http://localhost:' + port);
});

在我的本地前端我收到文件:

fetch('http://localhost:3000/audio/8bars60bpmOnlyKick.wav').then(response => process(response.body))

function process(stream) {
    console.log(stream);
    const context = new AudioContext();
    const analyser = context.createAnalyser();
    const source = context.createMediaStreamSource(stream);
    source.connect(analyser);
    const data = new Uint8Array(analyser.frequencyBinCount);

我想将流传输到 AudioContext().createMediaStreamSource 中。我可以用 Media Stream 来做到这一点,例如,从麦克风。

但使用 ReadableStream 时,我收到错误无法在“AudioContext”上执行“createMediaStreamSource”:参数 1 的类型不是“MediaStream”

我想以一种可以将音频插入网络音频 API 并使用分析器的方式提供/接收音频。如果有其他解决方案,它就不需要是流。

最佳答案

我基本上将这两个示例合并在一起:

https://www.youtube.com/watch?v=hYNJGPnmwls (https://codepen.io/jakealbaugh/pen/jvQweW/)

以及来自网络音频 API 的示例:

https://github.com/mdn/webaudio-examples/blob/master/audio-analyser/index.html

let audioBuffer;
let sourceNode;
let analyserNode;
let javascriptNode;
let audioData = null;
let audioPlaying = false;
let sampleSize = 1024;  // number of samples to collect before analyzing data
let frequencyDataArray;     // array to hold time domain data
// Global Variables for the Graphics
let canvasWidth = 512;
let canvasHeight = 256;
let ctx;

document.addEventListener("DOMContentLoaded", function () {
    ctx = document.body.querySelector('canvas').getContext("2d");
    // the AudioContext is the primary 'container' for all your audio node objects
    try {
        audioContext = new AudioContext();
    } catch (e) {
        alert('Web Audio API is not supported in this browser');
    }
    // When the Start button is clicked, finish setting up the audio nodes, play the sound,
    // gather samples for the analysis, update the canvas
    document.body.querySelector('#start_button').addEventListener('click', function (e) {
        e.preventDefault();
        // Set up the audio Analyser, the Source Buffer and javascriptNode
        initCanvas();
        setupAudioNodes();
        javascriptNode.onaudioprocess = function () {
            // get the Time Domain data for this sample
            analyserNode.getByteFrequencyData(frequencyDataArray);
            // draw the display if the audio is playing
            console.log(frequencyDataArray)
            draw();
        };
        loadSound();
    });

    document.body.querySelector("#stop_button").addEventListener('click', function(e) {
        e.preventDefault();
        sourceNode.stop(0);
        audioPlaying = false;
    });

    function loadSound() {
        fetch('http://localhost:3000/audio/8bars60bpmOnlyKick.wav').then(response => {
            response.arrayBuffer().then(function (buffer) {
                audioContext.decodeAudioData(buffer).then((audioBuffer) => {
                    console.log('audioBuffer', audioBuffer);
                    // {length: 1536000, duration: 32, sampleRate: 48000, numberOfChannels: 2}
                    audioData = audioBuffer;
                    playSound(audioBuffer);
                });
            });
        })
    }

    function setupAudioNodes() {
        sourceNode = audioContext.createBufferSource();
        analyserNode = audioContext.createAnalyser();
        analyserNode.fftSize = 4096;
        javascriptNode = audioContext.createScriptProcessor(sampleSize, 1, 1);
        // Create the array for the data values
        frequencyDataArray = new Uint8Array(analyserNode.frequencyBinCount);
        // Now connect the nodes together
        sourceNode.connect(audioContext.destination);
        sourceNode.connect(analyserNode);
        analyserNode.connect(javascriptNode);
        javascriptNode.connect(audioContext.destination);
    }

    function initCanvas() {
        ctx.fillStyle = 'hsl(280, 100%, 10%)';
        ctx.fillRect(0, 0, canvasWidth, canvasHeight);
    };

    // Play the audio once
    function playSound(buffer) {
        sourceNode.buffer = buffer;
        sourceNode.start(0);    // Play the sound now
        sourceNode.loop = false;
        audioPlaying = true;
    }

    function draw() {
        const data = frequencyDataArray;
        const dataLength = frequencyDataArray.length;
        console.log("data", data);

        const h = canvasHeight / dataLength;
        // draw on the right edge
        const x = canvasWidth - 1;

        // copy the old image and move one left
        let imgData = ctx.getImageData(1, 0, canvasWidth - 1, canvasHeight);
        ctx.fillRect(0, 0, canvasWidth, canvasHeight);
        ctx.putImageData(imgData, 0, 0);

        for (let i = 0; i < dataLength; i++) {
            // console.log(data)
            let rat = data[i] / 255;
            let hue = Math.round((rat * 120) + 280 % 360);
            let sat = '100%';
            let lit = 10 + (70 * rat) + '%';
            // console.log("rat %s, hue %s, lit %s", rat, hue, lit);
            ctx.beginPath();
            ctx.strokeStyle = `hsl(${hue}, ${sat}, ${lit})`;
            ctx.moveTo(x, canvasHeight - (i * h));
            ctx.lineTo(x, canvasHeight - (i * h + h));
            ctx.stroke();
        }
    }
});

我简要解释一下每个部分的作用:

创建音频上下文

当 DOM 加载时,AudioContext 被创建。

加载音频文件并将其转换为AudioBuffer

然后我从后端服务器加载声音(代码如上所示)。然后响应被转换为缓冲区,然后解码为音频缓冲区。这基本上就是上述问题的主要解决方案。

处理音频缓冲区

为了显示更多上下文如何使用加载的音频文件,我包含了文件的其余部分。

为了进一步处理 AudioBuffer,将创建一个源并将缓冲区分配给该源:sourceNode.buffer = buffer

javascriptNode 恕我直言,它就像一个流,您可以在其中访问分析器的输出。

关于javascript - 读取 Web-Audio API 分析器的音频文件(Node.Js 服务器、JS 前端),我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/56743980/

相关文章:

javascript - 无法从 Promise 中检索数据

javascript - 网络音频API : Pan audio (left/right) + controlling gain

audio - 组合两个 WebRTC 对等点时是否可以同步音频?

javascript - 在附加到屏幕之前编辑返回的结果

javascript - 是否可以在不更改 url 的情况下重定向 url

node.js - 在 CPanel 上部署 Node.js React 应用程序 - 'Failed to load resource: the server responded with a status of 404 ()'

javascript - mqtt 异步等待消息然后响应 http post 请求

javascript - WebAudio activeSourceCount

javascript - 如何使用加减按钮和输入框创建总计

javascript - 使用 enzyme 测试 `React.createRef` api