javascript - 使用 JavaScript 训练神经网络后,如何保存神经网络中的权重和偏差?

标签 javascript neural-network tensorflow.js

我基于 Michael Nielsen 在他的在线书籍 NeuralNetworksAndDeepLearning 中的 Python 代码构建了一个神经网络。我使用了 JavaScript,而不是 Numpy,而是使用了 Tensorflow.js。网络正在工作,但我想找到一种方法在训练后保存权重和偏差。我只使用 Tensorflow 进行矩阵/向量运算,因为我想跟随 Nielsen 的书学习神经网络的工作原理。我相信 Layers API 提供了一种保存模型的方法,但我试图在不依赖 Layers 的情况下做到这一点。感谢您的帮助。

export class Network {
  constructor(sizes) {
    this.num_layers = sizes.length;
    this.sizes = sizes;
    this.biases = [];
    for (let i = 1; i < sizes.length; i++) {
      this.biases.push(tf.randomNormal([sizes[i], 1]));
    }
    this.weights = [];
    for (let j = 0; j < sizes.length - 1; j++) {
      this.weights.push(tf.randomNormal([sizes[j + 1], sizes[j]]));
    }
  }
  shuffleArray(array) {
    for (let i = array.length - 1; i > 0; i--) {
      const j = Math.floor(Math.random() * (i + 1));
      [array[i], array[j]] = [array[j], array[i]];
    }
  }
  feedforward(act) {
    let a = act;
    for (let i = 0; i < this.num_layers - 1; i++) {
      a = tf.tidy(() => tf.sigmoid(this.weights[i].dot(a).add(this.biases[i])));
    }
    return a;
  }

  SGD(training_data, epochs, mini_batch_size, eta, test_data = null) {
    let n_test;
    let n = training_data.length;
    if (test_data) n_test = test_data.length;
    for (let j = 0; j < epochs; j++) {
      this.shuffleArray(training_data);
      let mini_batches = [];
      for (let k = 0; k < n; k += mini_batch_size) {
        mini_batches.push(training_data.slice(k, k + mini_batch_size));
      }
      mini_batches.forEach(mb => {
        [this.weights, this.biases] = tf.tidy(() =>
          this.update_mini_batch([...mb], eta)
        );
      });
      if (test_data) {
        console.log(`Epoch ${j}: ${this.evaluate(test_data)} / ${n_test}`);
      } else {
        console.log(`Epoch ${j} complete`);
      }
      console.log("Epoch complete:");
      console.log("Weights:");
      this.weights.forEach(x => x.print());
      console.log("Biases:");
      this.biases.forEach(x => x.print());
    }
  }

  update_mini_batch(mini_batch, eta) {
    //console.log(tf.memory().numTensors);
    let nabla_b = [];
    let nabla_w = [];
    for (let i = 0; i < this.num_layers - 1; i++) {
      nabla_b.push(tf.zeros(this.biases[i].shape));
      nabla_w.push(tf.zeros(this.weights[i].shape));
    }
    let x, y;
    mini_batch.forEach(data => {
      x = data[0];
      y = data[1];
      let delta_nabla_b, delta_nabla_w;
      [delta_nabla_b, delta_nabla_w] = this.backprop(x, y);
      nabla_b = nabla_b.map((nb, i) => {
        return nb.add(delta_nabla_b[i]);
      });
      nabla_w = nabla_w.map((nw, i) => {
        return nw.add(delta_nabla_w[i]);
      });
    });

    let weights = this.weights.map((w, i) => {
      return w.sub(tf.mul(nabla_w[i], eta / mini_batch.length));
    });
    let biases = this.biases.map((b, i) => {
      return b.sub(tf.mul(nabla_b[i], eta / mini_batch.length));
    });
    this.weights.forEach((x, i) => {
      x.dispose();
      this.biases[i].dispose();
    });

    return [weights, biases];
  }
  backprop(x, y) {
    let nabla_b = [];
    let nabla_w = [];
    for (let i = 0; i < this.num_layers - 1; i++) {
      nabla_b.push(tf.zeros(this.biases[i].shape));
      nabla_w.push(tf.zeros(this.weights[i].shape));
    }
    let activation = x;
    let activations = [x];
    let zs = [];
    this.biases.forEach((b, i) => {
      let z = this.weights[i].dot(activation).add(b);
      zs.push(z);
      activation = z.sigmoid();
      activations.push(activation);
    });
    let delta = this.cost_derivative(
      activations[activations.length - 1],
      y
    ).mul(this.sigmoid_prime(zs[zs.length - 1]));
    nabla_b[nabla_b.length - 1] = delta;
    nabla_w[nabla_w.length - 1] = delta.dot(
      activations[activations.length - 2].transpose()
    );
    for (let i = this.num_layers - 2; i > 0; i--) {
      let z = zs[i - 1];
      let sp = this.sigmoid_prime(z);
      delta = this.weights[i]
        .transpose()
        .dot(delta)
        .mul(sp);
      nabla_b[i - 1] = delta;
      nabla_w[i - 1] = delta.dot(activations[i - 1].transpose());

      //sp.dispose();
    }

    return [nabla_b, nabla_w];
  }
  evaluate(test_data) {
    let sum = 0;
    test_data.forEach(data => {
      let x = tf.tidy(() => this.feedforward(data[0]).argMax());
      let y = data[1].argMax();

      let xvalue = x.dataSync()[0];
      let yvalue = y.dataSync()[0];

      if (xvalue === yvalue) {
        sum++;
      }
      x.dispose();
    });
    return sum;
  }
  cost_derivative(output_activations, y) {
    return output_activations.sub(y);
  }
  sigmoid_prime(z) {
    return z.sigmoid().mul(tf.sub(1, z.sigmoid()));
  }
}

最佳答案

使用层 API,可以通过在层上使用 getWeights 来获取模型的权重。保存模型有不同的方法:在 localStorage 中、在磁盘上......

由于您使用自己的网络实现,因此您可以简单地使用 localStorage 保存模型权重。

localStorage.setItem('weights', weights).

然后在加载模型时,您可以检查是否有一些权重已经存储并检索然后

关于javascript - 使用 JavaScript 训练神经网络后,如何保存神经网络中的权重和偏差?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/56283406/

相关文章:

javascript - 如何隐藏 qualtrics 上的下一个按钮,直到文本字段填满文本?

java - 反向传播的神经网络不收敛

tensorflow.js - 如何获取张量的元素 i, j 的值

tensorflow - ONNX 模型转换中“未注册架构”

javascript - Ext.grid.Panel xtype ExtJs 4

javascript - Google Analytics(分析)按天计算的唯一访客数与按周计算的不匹配

javascript - 如果单击链接后解决了某些异步条件,是否有一种可访问的方式告诉用户界面的某些部分可以重定向到其他页面?

python - PyBrain 神经元操作

c++ - 我的神经网络学习 sin x 而不是 cos x

javascript - 如何使用 Node.js (tfjs-node) 从 Tensorflow.js 中的检查点重新启动模型训练?