Deeplearning4j LSTM 输出大小

标签 deeplearning4j

我的情况 - 输入时我有 List<List<Float>> (单词表示向量列表)。并且-有一个Double在一个序列的输出处。

所以我构建下一个结构(第一个索引 - 示例编号,第二个 - 句子项目编号,第三个 - 单词向量元素编号):http://pastebin.com/KGdjwnki

输出:http://pastebin.com/fY8zrxEL

但是当我将下一个( http://pastebin.com/wvFFC4Hw )之一发送到 model.output 时 - 我得到向量 [0.25, 0.24, 0.25, 0.25] ,没有一个值。

可能出了什么问题?附加代码(在 Kotlin)。 classCount是一个。

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork
import org.deeplearning4j.nn.conf.NeuralNetConfiguration.Builder
import org.deeplearning4j.nn.api.OptimizationAlgorithm
import org.deeplearning4j.nn.conf.Updater
import org.deeplearning4j.nn.weights.WeightInit
import org.deeplearning4j.nn.conf.layers.GravesLSTM
import org.deeplearning4j.nn.conf.layers.RnnOutputLayer
import org.deeplearning4j.nn.conf.BackpropType
import org.nd4j.linalg.api.ndarray.INDArray
import org.nd4j.linalg.cpu.nativecpu.NDArray
import org.nd4j.linalg.indexing.NDArrayIndex
import org.nd4j.linalg.factory.Nd4j
import org.nd4j.linalg.lossfunctions.LossFunctions
import java.util.*

class ClassifierNetwork(wordVectorSize: Int, classCount: Int) {
    data class Dimension(val x: Array<Int>, val y: Array<Int>)
    val model: MultiLayerNetwork
    val optimization = OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT
    val iterations = 1
    val learningRate = 0.1
    val rmsDecay = 0.95
    val seed = 12345
    val l2 = 0.001
    val weightInit = WeightInit.XAVIER
    val updater = Updater.RMSPROP
    val backtropType = BackpropType.TruncatedBPTT
    val tbpttLength = 50
    val epochs = 50
    var dimensions = Dimension(intArrayOf(0).toTypedArray(), intArrayOf(0).toTypedArray())

    init {
        val baseConfiguration = Builder().optimizationAlgo(optimization)
                .iterations(iterations).learningRate(learningRate).rmsDecay(rmsDecay).seed(seed).regularization(true).l2(l2)
                .weightInit(weightInit).updater(updater)
                .list()
        baseConfiguration.layer(0, GravesLSTM.Builder().nIn(wordVectorSize).nOut(64).activation("tanh").build())
        baseConfiguration.layer(1, GravesLSTM.Builder().nIn(64).nOut(32).activation("tanh").build())
        baseConfiguration.layer(2, GravesLSTM.Builder().nIn(32).nOut(16).activation("tanh").build())
        baseConfiguration.layer(3, RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                .activation("softmax").weightInit(WeightInit.XAVIER).nIn(16).nOut(classCount).build())
        val cfg = baseConfiguration.build()!!
        cfg.backpropType = backtropType
        cfg.tbpttBackLength = tbpttLength
        cfg.tbpttFwdLength = tbpttLength
        cfg.isPretrain = false
        cfg.isBackprop = true
        model = MultiLayerNetwork(cfg)
    }

    private fun dataDimensions(x: List<List<Array<Double>>>, y: List<Array<Double>>): Dimension {
        assert(x.size == y.size)
        val exampleCount = x.size
        assert(x.size > 0)
        val sentenceLength = x[0].size
        assert(sentenceLength > 0)
        val wordVectorLength = x[0][0].size
        assert(wordVectorLength > 0)
        val classCount = y[0].size
        assert(classCount > 0)
        return Dimension(
                intArrayOf(exampleCount, wordVectorLength, sentenceLength).toTypedArray(),
                intArrayOf(exampleCount, classCount).toTypedArray()
        )
    }

    data class Fits(val x: INDArray, val y: INDArray)
    private fun fitConversion(x: List<List<Array<Double>>>, y: List<Array<Double>>): Fits {
        val dim = dataDimensions(x, y)
        val xItems = ArrayList<INDArray>()
        for (i in 0..dim.x[0]-1) {
            val itemList = ArrayList<DoubleArray>();
            for (j in 0..dim.x[1]-1) {
                var rowList = ArrayList<Double>()
                for (k in 0..dim.x[2]-1) {
                    rowList.add(x[i][k][j])
                }
                itemList.add(rowList.toTypedArray().toDoubleArray())
            }
            xItems.add(Nd4j.create(itemList.toTypedArray()))
        }
        val xFits = Nd4j.create(xItems, dim.x.toIntArray(), 'c')
        val yItems = ArrayList<DoubleArray>();
        for (i in 0..y.size-1) {
            yItems.add(y[i].toDoubleArray())
        }
        val yFits = Nd4j.create(yItems.toTypedArray())
        return Fits(xFits, yFits)
    }

    private fun error(epoch: Int, x: List<List<Array<Double>>>, y: List<Array<Double>>) {
        var totalDiff = 0.0
        for (i in 0..x.size-1) {
            val source = x[i]
            val result = y[i]
            val realResult = predict(source)
            var diff = 0.0
            for (j in 0..result.size-1) {
                val elementDiff = result[j] - realResult[j]
                diff += Math.pow(elementDiff, 2.0)
            }
            diff = Math.sqrt(diff)
            totalDiff += Math.pow(diff, 2.0)
        }
        totalDiff = Math.sqrt(totalDiff)
        print("Epoch ")
        print(epoch)
        print(", diff ")
        println(totalDiff)
    }

    fun train(x: List<List<Array<Double>>>, y: List<Array<Double>>) {
        dimensions = dataDimensions(x, y)
        val(xFit, yFit) = fitConversion(x, y)
        for (i in 0..epochs-1) {
            model.input = xFit
            model.labels = yFit
            model.fit()
            error(i+1, x, y)
        }
    }

    fun predict(x: List<Array<Double>>): Array<Double> {
        val xList = ArrayList<DoubleArray>();
        for (i in 0..dimensions.x[1]-1) {
            var row = ArrayList<Double>()
            for (j in 0..dimensions.x[2]-1) {
                row.add(x[j][i])
            }
            xList.add(row.toDoubleArray())
        }
        val xItem = Nd4j.create(xList.toTypedArray())
        val y = model.output(xItem)
        val result = ArrayList<Double>()
        return result.toTypedArray()
    }
}

更新。看起来下一个示例有“接近”任务,所以稍后我将检查它并发布解决方案:https://github.com/deeplearning4j/dl4j-0.4-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/recurrent/word2vecsentiment/Word2VecSentimentRNN.java

最佳答案

LSTM 输入/输出只能为 3 级:请参阅: http://deeplearning4j.org/usingrnns

关于Deeplearning4j LSTM 输出大小,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/38418859/

相关文章:

java - 获取deeplearning4j java中每一层的输入和输出大小

java - 无法解析项目org.deeplearning4j:dl4j-examples:jar:1.0.0-beta6 Java的依赖项

java - Deeplearning4j 在 Scala 中的线程间共享计算图

machine-learning - 使用部分输入向后运行神经网络以找到所需值

neural-network - 是否有使用加权损失进行像素分割/分类任务的示例?

machine-learning - 如何使用Knime对文本进行分类

java - Rnn 神经网络预测返回意想不到的预测

java - 有没有办法在maven中为javacv的 native 部分设置依赖关系,而无需手动安装和设置java.library.path?

android - 使用 deeplearning4j 加载 keras 模型时出错

java - Deeplearning4j 触发管道 : Convert a String type to org. apache.spark.mllib.linalg.VectorUDT