java - 神经网络: Backpropagation not working (Java)

标签 java neural-network backpropagation

我必须为学校项目创建一个 OCR 程序,因此我开始在维基百科的帮助下创建一个反向传播算法。为了训练我的网络,我使用了几天前提取的 MNIST 数据库,这样我就有了真实的图像文件。但现在误差始终约为 237,训练一段时间后,误差和权重都变为 NaN。我的代码有什么问题?

A screenshot of my images folder

这是我的主类(class),它将训练我的网络:

package de.Marcel.NeuralNetwork;

import java.awt.Color;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;

import javax.imageio.ImageIO;

public class OCR {
    public static void main(String[] args) throws IOException {
        // create network
        NeuralNetwork net = new NeuralNetwork(784, 450, 5, 0.2);

    // load Images
    File file = new File("images");

    int images= 0;
    double error = 0;
    for (File f : file.listFiles()) {
        BufferedImage image = ImageIO.read(f);

        int t = -1;
        double[] pixels = new double[784];
        for (int x = 0; x < image.getWidth(); x++) {
            for (int y = 0; y < image.getHeight(); y++) {
                t++;
                Color c = new Color(image.getRGB(x, y));

                if (c.getRed() == 0 && c.getGreen() == 0 && c.getBlue() == 0) {
                    pixels[t] = 1;
                } else if (c.getRed() == 255 && c.getGreen() == 255 && c.getBlue() == 255) {
                    pixels[t] = 0;
                }
            }
        }

        try {
            if (f.getName().startsWith("1")) {
                net.learn(pixels, new double[] { 1, 0, 0, 0, 0 });
                error += net.getError();

                images++;
            } else if (f.getName().startsWith("2")) {
                net.learn(pixels, new double[] { 0, 1, 0, 0, 0 });
                error += net.getError();

                images++;
            } else if (f.getName().startsWith("3")) {
                net.learn(pixels, new double[] { 0, 0, 1, 0, 0 });
                error += net.getError();

                images++;
            } else if (f.getName().startsWith("4")) {
                net.learn(pixels, new double[] { 0, 0, 0, 1, 0 });
                error += net.getError();

                images++;
            } else if (f.getName().startsWith("5")) {
                net.learn(pixels, new double[] { 0, 0, 0, 0, 1 });
                error += net.getError();

                images++;
            } else if (f.getName().startsWith("6")) {
                break;
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    error = error / iterations;

    System.out.println("Trained images: " + images);
    System.out.println("Error: " + error);

    //save
    System.out.println("Save");
    try {
        net.saveNetwork("network.nnet");
    } catch (Exception e) {
        e.printStackTrace();
    }
}
}

...这是我的神经元类(class):

package de.Marcel.NeuralNetwork;

public class Neuron {
    private double input, output;

public Neuron () {

}

public void setInput(double input) {
    this.input = input;
}

public void setOutput(double output) {
    this.output = output;
}

public double getInput() {
    return input;
}

public double getOutput() {
    return output;
}

}

...最后是我的神经网络

package de.Marcel.NeuralNetwork;

import java.io.File;
import java.io.FileWriter;
import java.util.Random;

public class NeuralNetwork {
    private Neuron[] inputNeurons, hiddenNeurons, outputNeurons;
    private double[] weightMatrix1, weightMatrix2;
    private double learningRate, error;

public NeuralNetwork(int inputCount, int hiddenCount, int outputCount, double learningRate) {
    this.learningRate = learningRate;

    // create Neurons
    // create Input
    this.inputNeurons = new Neuron[inputCount];
    for (int i = 0; i < inputCount; i++) {
        this.inputNeurons[i] = new Neuron();
    }
    // createHidden
    this.hiddenNeurons = new Neuron[hiddenCount];
    for (int i = 0; i < hiddenCount; i++) {
        this.hiddenNeurons[i] = new Neuron();
    }
    // createOutput
    this.outputNeurons = new Neuron[outputCount];
    for (int i = 0; i < outputCount; i++) {
        this.outputNeurons[i] = new Neuron();
    }

    // create weights
    Random random = new Random();
    // weightMatrix1
    this.weightMatrix1 = new double[inputCount * hiddenCount];
    for (int i = 0; i < inputCount * hiddenCount; i++) {
        this.weightMatrix1[i] = (random.nextDouble() * 2 - 1) / 0.25;
    }
    // weightMatrix2
    this.weightMatrix2 = new double[hiddenCount * outputCount];
    for (int i = 0; i < hiddenCount * outputCount; i++) {
        this.weightMatrix2[i] = (random.nextDouble() * 2 - 1) / 0.25;
    }
}

public void calculate(double[] input) throws Exception {
    // verfiy input length
    if (input.length == inputNeurons.length) {
        // forwardPropagation
        // set input array as input and output of input neurons
        for (int i = 0; i < input.length; i++) {
            inputNeurons[i].setInput(input[i]);
            inputNeurons[i].setOutput(input[i]);
        }

        // calculate output of hiddenNeurons
        for (int h = 0; h < hiddenNeurons.length; h++) {
            Neuron hNeuron = hiddenNeurons[h];
            double totalInput = 0;

            // sum up totalInput of Neuron
            for (int i = 0; i < inputNeurons.length; i++) {
                Neuron iNeuron = inputNeurons[i];
                totalInput += iNeuron.getOutput() * weightMatrix1[h * inputNeurons.length + i];
            }

            // set input
            hNeuron.setInput(totalInput);

            // calculate output by applying sigmoid
            double calculatedOutput = sigmoid(totalInput);

            // set output
            hNeuron.setOutput(calculatedOutput);
        }

        // calculate output of outputNeurons
        for (int o = 0; o < outputNeurons.length; o++) {
            Neuron oNeuron = outputNeurons[o];
            double totalInput = 0;

            // sum up totalInput of Neuron
            for (int h = 0; h < hiddenNeurons.length; h++) {
                Neuron hNeuron = hiddenNeurons[h];
                totalInput += hNeuron.getOutput() * weightMatrix2[o * hiddenNeurons.length + h];
            }

            // set input
            oNeuron.setInput(totalInput);

            // calculate output by applying sigmoid
            double calculatedOutput = sigmoid(totalInput);

            // set output
            oNeuron.setOutput(calculatedOutput);
        }
    } else {
        throw new Exception("[NeuralNetwork] input array is either too small or to big");
    }
}

public void learn(double[] input, double[] output) throws Exception {
    double partialOutput = 0;

    // verfiy input length
    if (input.length == inputNeurons.length) {
        // forwardPropagation
        // set input array as input and output of input neurons
        for (int i = 0; i < input.length; i++) {
            inputNeurons[i].setInput(input[i]);
            inputNeurons[i].setOutput(input[i]);
        }

        // calculate output of hiddenNeurons
        for (int h = 0; h < hiddenNeurons.length; h++) {
            Neuron hNeuron = hiddenNeurons[h];
            double totalInput = 0;

            // sum up totalInput of Neuron
            for (int i = 0; i < inputNeurons.length; i++) {
                Neuron iNeuron = inputNeurons[i];
                totalInput += iNeuron.getOutput() * weightMatrix1[h * inputNeurons.length + i];
            }

            // set input
            hNeuron.setInput(totalInput);

            // calculate output by applying sigmoid
            double calculatedOutput = sigmoid(totalInput);

            // set output
            hNeuron.setOutput(calculatedOutput);
        }

        // calculate output of outputNeurons
        for (int o = 0; o < outputNeurons.length; o++) {
            Neuron oNeuron = outputNeurons[o];
            double totalInput = 0;

            // sum up totalInput of Neuron
            for (int h = 0; h < hiddenNeurons.length; h++) {
                Neuron hNeuron = hiddenNeurons[h];
                totalInput += hNeuron.getOutput() * weightMatrix2[o * hiddenNeurons.length + h];
            }

            // set input
            oNeuron.setInput(totalInput);

            // calculate output by applying sigmoid
            double calculatedOutput = sigmoid(totalInput);

            // set output
            oNeuron.setOutput(calculatedOutput);
        }

        // backPropagation
        double totalError = 0;
        // calculate weights in matrix2
        for (int h = 0; h < hiddenNeurons.length; h++) {
            Neuron hNeuron = hiddenNeurons[h];

            for (int o = 0; o < outputNeurons.length; o++) {
                Neuron oNeuron = outputNeurons[o];

                // calculate weight
                double delta = learningRate * derivativeSigmoid(oNeuron.getInput())
                        * (output[o] - oNeuron.getOutput()) * hNeuron.getOutput();

                // set new weight
                weightMatrix2[h + o * hiddenNeurons.length] = weightMatrix2[h + o * hiddenNeurons.length] + delta;

                // update partial output
                partialOutput += (derivativeSigmoid(oNeuron.getInput()) * (output[o] - oNeuron.getOutput())
                        * weightMatrix2[h + o * hiddenNeurons.length]);

                //calculate error
                totalError += Math.pow((output[o] - oNeuron.getOutput()), 2);
            }
        }

        //set error
        this.error = 0.5 * totalError;

        // calculate weights in matrix1
        for (int i = 0; i < inputNeurons.length; i++) {
            Neuron iNeuron = inputNeurons[i];

            for (int h = 0; h < hiddenNeurons.length; h++) {
                Neuron hNeuron = hiddenNeurons[h];

                // calculate weight
                double delta = learningRate * derivativeSigmoid(hNeuron.getInput()) * partialOutput
                        * (iNeuron.getOutput());

                // set new weight
                weightMatrix1[i + h * inputNeurons.length] = weightMatrix1[i + h * inputNeurons.length] + delta;
            }
        }
    } else {
        throw new Exception("[NeuralNetwork] input array is either too small or to big");
    }
}

// save Network
public void saveNetwork(String fileName) throws Exception {
    File file = new File(fileName);
    FileWriter writer = new FileWriter(file);

    writer.write("weightmatrix1:");
    writer.write(System.lineSeparator());

    // write weightMatrix1
    for (double d : weightMatrix1) {
        writer.write(d + "-");
    }

    writer.write(System.lineSeparator());
    writer.write("weightmatrix2:");
    writer.write(System.lineSeparator());

    // write weightMatrix2
    for (double d : weightMatrix2) {
        writer.write(d + "-");
    }

    // save
    writer.close();
}

// sigmoid function
private double sigmoid(double input) {
    return Math.exp(input * (-1));
}

private double derivativeSigmoid(double input) {
    return sigmoid(input) * (1 - sigmoid(input));
}

public double getError() {
    return error;
}
}

最佳答案

看起来你的 sigmoid 函数不正确。它应该是 1/(1+exp(-x))。

如果您仍然遇到 NaN 错误,可能是因为使用该函数本身可能有点大材小用,尤其是对于大数字(即小于 -10 和大于 10 的数字)。

使用预先计算的 sigmoid(x) 值数组可能会避免较大数据集出现此问题,并且还有助于程序更高效地运行。

希望这有帮助!

关于java - 神经网络: Backpropagation not working (Java),我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/41778473/

相关文章:

java - 警告 - 访问静态字段年份

java - 使用动态规划复制书籍

python - Keras:使用 flow_from _directory() 函数为两个输入模型创建自定义生成器

c++ - 使用反向传播神经网络进行人脸识别?

artificial-intelligence - 激活函数和初始权重的选择是否会影响神经网络是否陷入局部最小值?

algorithm - 如何测试我的反向传播神经网络的实现是否正确

java - 之后使用 switch 分割字符串

Javacard - 签名和验证

tensorflow - 为什么 Pytorch 和 Tensorflow 中交叉熵的实现不同?

python - 神经网络 : Understanding theano Library