我正在使用以下代码解码来自 nodejs 套接字的音频 block
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new AudioContext();
var delayTime = 0;
var init = 0;
var audioStack = [];
var nextTime = 0;
client.on('stream', function(stream, meta){
stream.on('data', function(data) {
context.decodeAudioData(data, function(buffer) {
audioStack.push(buffer);
if ((init!=0) || (audioStack.length > 10)) { // make sure we put at least 10 chunks in the buffer before starting
init++;
scheduleBuffers();
}
}, function(err) {
console.log("err(decodeAudioData): "+err);
});
});
});
function scheduleBuffers() {
while ( audioStack.length) {
var buffer = audioStack.shift();
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
if (nextTime == 0)
nextTime = context.currentTime + 0.05; /// add 50ms latency to work well across systems - tune this if you like
source.start(nextTime);
nextTime+=source.buffer.duration; // Make the next buffer wait the length of the last buffer before being played
};
}
但它在音频 block 之间存在一些我无法弄清楚的间隙/故障。
我还了解到,使用 MediaSource 可以执行相同的操作,并让播放器处理时间,而不是手动执行。有人可以提供处理 mp3 数据的示例吗?
此外,使用网络音频 API 处理实时流媒体的正确方法是什么?我已经阅读了几乎所有关于这个主题的问题,而且它们似乎都没有问题。有什么想法吗?
最佳答案
您可以以此代码为例:https://github.com/kmoskwiak/node-tcp-streaming-server
它基本上使用媒体源扩展。您需要做的就是从视频转换为音频
buffer = mediaSource.addSourceBuffer('audio/mpeg');
关于node.js - 网络音频 API : Proper way to play data chunks from a nodejs server via socket,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/47600421/