python - TensorFlow 模型获得零损失

标签 python tensorflow cross-entropy convolutional-neural-network

import tensorflow as tf
import numpy as np
import os
import re
import PIL


def read_image_label_list(img_directory, folder_name):
    # Input:
    #   -Name of folder (test\\\\train)
    # Output:
    #   -List of names of files in folder
    #   -Label associated with each file

    cat_label = 1
    dog_label = 0
    filenames = []
    labels = []

    dir_list = os.listdir(os.path.join(img_directory, folder_name))  # List of all image names in 'folder_name' folder

    # Loop through all images in directory
    for i, d in enumerate(dir_list):
        if re.search("train", folder_name):
            if re.search("cat", d):  # If image filename contains 'Cat', then true
                labels.append(cat_label)
            else:
                labels.append(dog_label)
        filenames.append(os.path.join(img_dir, folder_name, d))

    return filenames, labels


# Define convolutional layer
def conv_layer(input, channels_in, channels_out):
    w_1 = tf.get_variable("weight_conv", [5,5, channels_in, channels_out], initializer=tf.contrib.layers.xavier_initializer())
    b_1 = tf.get_variable("bias_conv", [channels_out], initializer=tf.zeros_initializer())
    conv = tf.nn.conv2d(input, w_1, strides=[1,1,1,1], padding="SAME")
    activation = tf.nn.relu(conv + b_1)
    return activation


# Define fully connected layer
def fc_layer(input, channels_in, channels_out):
    w_2 = tf.get_variable("weight_fc", [channels_in, channels_out], initializer=tf.contrib.layers.xavier_initializer())
    b_2 = tf.get_variable("bias_fc", [channels_out], initializer=tf.zeros_initializer())
    activation = tf.nn.relu(tf.matmul(input, w_2) + b_2)
    return activation


# Define parse function to make input data to decode image into
def _parse_function(img_path, label):
    img_file = tf.read_file(img_path)
    img_decoded = tf.image.decode_image(img_file, channels=3)
    img_decoded.set_shape([None,None,3])
    img_decoded = tf.image.resize_images(img_decoded, (28, 28), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
    img_decoded = tf.image.per_image_standardization(img_decoded)
    img_decoded = tf.cast(img_decoded, dty=tf.float32)
    label = tf.one_hot(label, 1)
    return img_decoded, label


tf.reset_default_graph()

# Define parameterspe
EPOCHS = 10
BATCH_SIZE_training = 64
learning_rate = 0.001
img_dir = 'C:/Users/tharu/PycharmProjects/cat_vs_dog/data'
batch_size = 128

# Define data
features, labels = read_image_label_list(img_dir, "train")

# Define dataset
dataset = tf.data.Dataset.from_tensor_slices((features, labels))  # Takes slices in 0th dimension
dataset = dataset.map(_parse_function)
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()

# Get next batch of data from iterator
x, y = iterator.get_next()

# Create the network (use different variable scopes for reuse of variables)
with tf.variable_scope("conv1"):
    conv_1 = conv_layer(x, 3, 32)
    pool_1 = tf.nn.max_pool(conv_1, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")

with tf.variable_scope("conv2"):
    conv_2 = conv_layer(pool_1, 32, 64)
    pool_2 = tf.nn.max_pool(conv_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
    flattened = tf.contrib.layers.flatten(pool_2)

with tf.variable_scope("fc1"):
    fc_1 = fc_layer(flattened, 7*7*64, 1024)
with tf.variable_scope("fc2"):
    logits = fc_layer(fc_1, 1024, 1)


# Define loss function
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf.cast(y, dtype=tf.int32)))

# Define optimizer
train = tf.train.AdamOptimizer(learning_rate).minimize(loss)


with tf.Session() as sess:
    # Initiliaze all the variables
    sess.run(tf.global_variables_initializer())

    # Train the network
    for i in range(EPOCHS):
        # Initialize iterator so that it starts at beginning of training set for each epoch
        sess.run(iterator.initializer)
        print("EPOCH", i)
        while True:
            try:
                _, epoch_loss = sess.run([train, loss])

            except tf.errors.OutOfRangeError:  # Error given when out of data
                if i % 2 == 0:
                    # [train_accuaracy] = sess.run([accuracy])
                    # print("Step ", i, "training accuracy = %{}".format(train_accuaracy))
                    print(epoch_loss)
                break

我花了几个小时试图系统地弄清楚为什么我在运行这个模型时一直得到 0 损失。

  • 特征 = 每个图像的文件位置列表(例如 ['\data\train\cat.0.jpg',/data\train\cat.1.jpg])
  • Labels = [Batch_size, 1] one_hot 向量

起初我以为是因为我的数据有问题。但我在调整大小后查看了数据,图像看起来不错。

然后我尝试了几个不同的损失函数,因为我想我可能误解了 tensorflow 函数 softmax_cross_entropy 的作用,但这并没有解决任何问题。

我试过只运行“logits”部分来查看输出是什么。这只是一个小样本,我觉得这些数字还不错:

 [[0.06388957]
 [0.        ]
 [0.16969752]
 [0.24913025]
 [0.09961276]]

softmax_cross_entropy 函数肯定能够计算出相应标签为 0 或 1 的损失?我不确定我是否遗漏了什么。任何帮助将不胜感激。

最佳答案

作为documented :

logits and labels must have the same shape, e.g. [batch_size, num_classes] and the same dtype (either float16, float32, or float64).

由于您提到您的标签是“[Batch_size, 1] one_hot vector”,我假设您的 logitslabels 都是 [Batch_size, 1] 形状。这肯定会导致零损失。从概念上讲,您只有 1 个类 (num_classes=1),并且您不会错 (loss=0)。

所以至少对于你的 labels,你应该转换它:tf.one_hot(indices=labels, depth=num_classes)。您的预测 logits 也应该具有形状 [batch_size, num_classes] 输出。

或者,您可以使用 sparse_softmax_cross_entropy_with_logits ,其中:

A common use case is to have logits of shape [batch_size, num_classes] and labels of shape [batch_size]. But higher dimensions are supported.

关于python - TensorFlow 模型获得零损失,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/49931055/

相关文章:

python - 输入层的 TensorFlow Keras 维度误差

keras - 如何在 keras 中计算非 0 或 1 的目标值的交叉熵

tensorflow - TensorFlow:我的登录信息是否采用正确的格式以实现交叉熵功能?

Python - 如何使用 pos_tag (NLTK) 中的标签?

TensorFlow 自定义估算器 - 在 model_fn 中进行小幅更改后恢复模型

python - Protobuf 编译问题

python - 如何使用 sparse_softmax_cross_entropy_with_logits 在 tensorflow 中实现加权交叉熵损失

python - cx_Freeze 不包括库中的所有模块

python - 由于输入输出错误无法安装anaconda3

python - 使用重新采样(多索引)填充日期之间的空白