javascript - 值存储在不同的数组中而不是单个数组中

标签 javascript arrays web-audio-api

在 updatepitch() 函数中,我尝试将音调值存储在单个数组中,但我的代码所做的是将所有值存储在不同的数组中。

有没有办法将所有值存储在一个数组中。

window.AudioContext = window.AudioContext || window.webkitAudioContext;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia;
window.URL = window.URL || window.webkitURL;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;


var audioContext = null;
var isPlaying = false;
var sourceNode = null;
var analyser = null;
var theBuffer = null;

var mediaStreamSource = null;
var detectorElem,
  canvasElem,
  waveCanvas,
  pitchElem,
  noteElem,
  detuneElem,
  detuneAmount;

window.onload = function() {
  audioContext = new AudioContext();
  MAX_SIZE = Math.max(4, Math.floor(audioContext.sampleRate / 5000)); // corresponds to a 5kHz signal


  detectorElem = document.getElementById("detector");
  canvasElem = document.getElementById("output");


  pitchElem = document.getElementById("pitch");

  noteElem = document.getElementById("note");
  detuneElem = document.getElementById("detune");
  detuneAmount = document.getElementById("detune_amt");





}


function error() {
  alert('Stream generation failed.');
}

function getUserMedia(dictionary, callback) {
  try {
    navigator.getUserMedia =
      navigator.getUserMedia ||
      navigator.webkitGetUserMedia ||
      navigator.mozGetUserMedia;
    navigator.getUserMedia(dictionary, callback, error);
  } catch (e) {
    alert('getUserMedia threw exception :' + e);
  }
}

function gotStream(stream) {
  // Create an AudioNode from the stream.
  mediaStreamSource = audioContext.createMediaStreamSource(stream);

  // Connect it to the destination.
  analyser = audioContext.createAnalyser();
  analyser.fftSize = 2048;
  mediaStreamSource.connect(analyser);
  updatePitch();
}



function toggleLiveInput() {
  if (isPlaying) {
    //stop playing and return
    sourceNode.stop(0);
    sourceNode = null;
    analyser = null;
    isPlaying = false;
    if (!window.cancelAnimationFrame)
      window.cancelAnimationFrame = window.webkitCancelAnimationFrame;
    window.cancelAnimationFrame(rafID);
  }
  getUserMedia({
    "audio": {
      "mandatory": {
        "googEchoCancellation": "false",
        "googAutoGainControl": "false",
        "googNoiseSuppression": "false",
        "googHighpassFilter": "false"
      },
      "optional": []
    },
  }, gotStream);

}


var rafID = null;
var tracks = null;
var buflen = 1024;
var buf = new Float32Array(buflen);

var noteStrings = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"];

function noteFromPitch(frequency) {
  var noteNum = 12 * (Math.log(frequency / 440) / Math.log(2));
  return Math.round(noteNum) + 69;
}

function frequencyFromNoteNumber(note) {
  return 440 * Math.pow(2, (note - 69) / 12);
}

function centsOffFromPitch(frequency, note) {
  return Math.floor(1200 * Math.log(frequency / frequencyFromNoteNumber(note)) / Math.log(2));
}

var MIN_SAMPLES = 0; // will be initialized when AudioContext is created.
var GOOD_ENOUGH_CORRELATION = 0.9; // this is the "bar" for how close a correlation needs to be

function autoCorrelate(buf, sampleRate) {
  var SIZE = buf.length;
  var MAX_SAMPLES = Math.floor(SIZE / 2);
  var best_offset = -1;
  var best_correlation = 0;
  var rms = 0;
  var foundGoodCorrelation = false;
  var correlations = new Array(MAX_SAMPLES);

  for (var i = 0; i < SIZE; i++) {
    var val = buf[i];
    rms += val * val;
  }
  rms = Math.sqrt(rms / SIZE);
  if (rms < 0.01) // not enough signal
    return -1;

  var lastCorrelation = 1;
  for (var offset = MIN_SAMPLES; offset < MAX_SAMPLES; offset++) {
    var correlation = 0;

    for (var i = 0; i < MAX_SAMPLES; i++) {
      correlation += Math.abs((buf[i]) - (buf[i + offset]));
    }
    correlation = 1 - (correlation / MAX_SAMPLES);
    correlations[offset] = correlation; // store it, for the tweaking we need to do below.
    if ((correlation > GOOD_ENOUGH_CORRELATION) && (correlation > lastCorrelation)) {
      foundGoodCorrelation = true;
      if (correlation > best_correlation) {
        best_correlation = correlation;
        best_offset = offset;
      }
    } else if (foundGoodCorrelation) {

      var shift = (correlations[best_offset + 1] - correlations[best_offset - 1]) / correlations[best_offset];
      return sampleRate / (best_offset + (8 * shift));
    }
    lastCorrelation = correlation;
  }
  if (best_correlation > 0.01) {
    // console.log("f = " + sampleRate/best_offset + "Hz (rms: " + rms + " confidence: " + best_correlation + ")")
    return sampleRate / best_offset;
  }
  return -1;
  //	var best_frequency = sampleRate/best_offset;
}

function updatePitch(time) {
  var cycles = new Array;
  analyser.getFloatTimeDomainData(buf);
  var ac = autoCorrelate(buf, audioContext.sampleRate);
  // TODO: Paint confidence meter on canvasElem here.



  if (ac == -1) {
    detectorElem.className = "vague";
    pitchElem.innerText = "--";
    noteElem.innerText = "-";
    detuneElem.className = "";
    detuneAmount.innerText = "--";
  } else {
    detectorElem.className = "confident";
    pitch = ac;
    var arraypitch = [];
    arraypitch.push(pitch);
    console.log(arraypitch);
    document.getElementById("pit").innerHTML = Math.max(arraypitch);
    pitchElem.innerText = Math.round(pitch);
    var note = noteFromPitch(pitch);
    noteElem.innerHTML = noteStrings[note % 12];
    var detune = centsOffFromPitch(pitch, note);

  }

  if (!window.requestAnimationFrame)
    window.requestAnimationFrame = window.webkitRequestAnimationFrame;
  rafID = window.requestAnimationFrame(updatePitch);
}
body {
  font: 14pt 'Alike', sans-serif;
}

#note {
  font-size: 164px;
}

.droptarget {
  background-color: #348781
}

div.confident {
  color: black;
}

div.vague {
  color: lightgrey;
}

#note {
  display: inline-block;
  height: 180px;
  text-align: left;
}

#detector {
  width: 300px;
  height: 300px;
  border: 4px solid gray;
  border-radius: 8px;
  text-align: center;
  padding-top: 10px;
}

#output {
  width: 300px;
  height: 42px;
}

#flat {
  display: none;
}

#sharp {
  display: none;
}

.flat #flat {
  display: inline;
}

.sharp #sharp {
  display: inline;
}
<link href='http://fonts.googleapis.com/css?family=Alike' rel='stylesheet' type='text/css'>
<p>

  <button onclick="toggleLiveInput()">use live input</button>

  <!--<button onclick="updatePitch(0);">sample</button>-->
</p>
<canvas id="canvas"></canvas>
<div id="detector" class="vague">
  <div class="pitch"><span id="pitch">--</span>Hz</div>
  <div class="note"><span id="note">--</span></div>
  <canvas id="output" width=300 height=42></canvas>

  <div id="detune"><span id="detune_amt">--</span><span id="flat">cents &#9837;</span><span id="sharp">cents &#9839;</span></div>
</div>
<div id="pit"></div>

</body>

</html>

最佳答案

看起来 updatePitch() 中的这些行有问题:

var arraypitch = [];
arraypitch.push(pitch);

由于每次调用 updatePitch() 时,您都会创建一个新数组 arraypitch。

在函数 updatePitch() 之外声明数组 arrayPitch,并将音调插入 updatePitch() 内部。

也许这会有所帮助!

关于javascript - 值存储在不同的数组中而不是单个数组中,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/43570461/

相关文章:

javascript - 如何在jquery中的for循环中求和?

c - 在c数组中通过引用传递

android - 根据过滤器将一个数组一分为二

arrays - Ruby Hash Values 是数组,需要转换成字符串

javascript - 使用键盘输入时 ToneJS 的性能滞后

javascript - XMLHTTPRequest Level 2 和 CORS 服务器

javascript - 选择浏览器自动完成功能提交表单(不是自定义自动完成功能)

javascript - 使用 Javascript 和 HTML 创建幻灯片放映

javascript - 网络音频 API : Collect all audio informations at "once"

javascript - 使用 AnalyserNode 提高 FFT 分辨率