python - VGG16 Keras 微调 : low accuracy

标签 python neural-network deep-learning classification keras

我已经问过类似的问题here ,但现在我的问题略有不同,因此提出了新问题。

我决定使用稍微不同的方法而不是在引用问题的答案中提出来训练,然后微调模型。

Update: I've replaced old question provided here with more suitable version

这是我的操作顺序:

  1. 构建 VGG16 模型并删除顶层(称之为无顶层模型)
  2. 使用 no-top 模型生成瓶颈特征
  3. 使用瓶颈特征训练一个单独的全连接模型
  4. 构建新的 VGG16 模型,删除顶层,并附加预训练的顶层模型
  5. 在狗/猫数据上训练串联模型

这是我用来实现上述一系列操作的代码:

import warnings
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', DeprecationWarning)

from __future__ import print_function
from itertools import izip_longest as zip_longest
from pprint import pformat as pf
from pprint import pprint as pp
import os

from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.layers import Conv2D, MaxPooling2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Dropout, Flatten, Dense, InputLayer, Lambda
from keras.models import Sequential, Model, load_model
from keras.utils.data_utils import get_file
from keras.optimizers import SGD
import keras.backend as K

import numpy as np


RANDOM_STATE = 1
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
BATCH_SIZE = 4
VGG_MEAN = np.array([123.68, 116.779, 103.939]).reshape((3, 1, 1))
VGG16_WEIGHTS_PATH = 'http://www.platform.ai/models/vgg16.h5'
DATA_ROOT = os.path.join(os.path.expanduser('~'), 'data', 'dogscats')
TRAIN_DIR = os.path.join(DATA_ROOT, 'train')
VALID_DIR = os.path.join(DATA_ROOT, 'valid')
SAMPLES_DIR = os.path.expanduser('~/dogscats_samples')


np.random.seed(RANDOM_STATE)
K.set_image_dim_ordering('th')


def get_batches(dirname, gen=ImageDataGenerator(), shuffle=True, 
                batch_size=BATCH_SIZE, class_mode='categorical'):
    return gen.flow_from_directory(
        os.path.join(SAMPLES_DIR, dirname),
        target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
        class_mode=class_mode,
        shuffle=shuffle,
        batch_size=batch_size)

def vgg_preprocess(x):
    x = x - VGG_MEAN
    return x[:, ::-1]

def conv_block(model, n_layers, n_filters, name='block'):
    for i in range(n_layers):
        model.add(ZeroPadding2D((1, 1), name='%s_padding_%s' % (name, i)))
        model.add(Conv2D(n_filters, (3, 3), activation='relu', name='%s_conv2d_%s' % (name, i)))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='%s_maxpool' % name))

def fc_block(model, name='block'):
    model.add(Dense(4096, activation='relu', name=name + '_dense'))
    model.add(Dropout(0.5))

def build_vgg_16():
    model = Sequential()
    input_shape = (3, IMAGE_WIDTH, IMAGE_HEIGHT) 
    model.add(InputLayer(input_shape=input_shape))
    model.add(Lambda(vgg_preprocess))
    conv_block(model, n_layers=2, n_filters=64, name='block1')
    conv_block(model, n_layers=2, n_filters=128, name='block2')
    conv_block(model, n_layers=3, n_filters=256, name='block3')
    conv_block(model, n_layers=3, n_filters=512, name='block4')
    conv_block(model, n_layers=3, n_filters=512, name='block5')
    model.add(Flatten())
    fc_block(model)
    fc_block(model)
    model.add(Dense(1000, activation='softmax'))
    return model

def train_finetuned_model():
    file_path = get_file('vgg16.h5', VGG16_WEIGHTS_PATH, cache_subdir='models')
    print('Building VGG16 (no-top) model to generate bottleneck features')
    vgg16_notop = build_vgg_16()
    vgg16_notop.load_weights(file_path)
    for _ in range(6):
        vgg16_notop.pop()
    vgg16_notop.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

    train_batches = get_batches('train', shuffle=False, class_mode=None)
    train_labels = np.array([0]*1000 + [1]*1000)
    bottleneck_train = vgg16_notop.predict_generator(train_batches, steps=2000 // BATCH_SIZE)

    valid_batches = get_batches('valid', shuffle=False, class_mode=None)
    valid_labels = np.array([0]*400 + [1]*400)
    bottleneck_valid = vgg16_notop.predict_generator(valid_batches, steps=800 // BATCH_SIZE)

    print('Training top model on bottleneck features')
    top_model = Sequential()
    top_model.add(Flatten(input_shape=bottleneck_train.shape[1:]))
    top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(0.5))
    top_model.add(Dense(1, activation='sigmoid'))
    top_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
    top_model.fit(bottleneck_train, train_labels, 
                  batch_size=32, epochs=50, 
                  validation_data=(bottleneck_valid, valid_labels))

    print('Concatenate new VGG16 (without top layer) with pretrained top model')
    vgg16_fine = build_vgg_16()    
    vgg16_fine.load_weights(file_path)
    for _ in range(6):
        vgg16_fine.pop()
    vgg16_fine.add(Flatten(name='top_flatten'))    
    vgg16_fine.add(Dense(256, activation='relu', name='top_dense'))
    vgg16_fine.add(Dropout(0.5, name='top_dropout'))
    vgg16_fine.add(Dense(1, activation='sigmoid', name='top_sigmoid'))
    for i, layer in enumerate(reversed(top_model.layers), 1):
        pretrained_weights = layer.get_weights()
        vgg16_fine.layers[-i].set_weights(pretrained_weights)
    for layer in vgg16_fine.layers[:26]:
        layer.trainable = False
    vgg16_fine.compile(optimizer=SGD(lr=1e-4, momentum=0.9),
                       loss='binary_crossentropy',
                       metrics=['accuracy'])

    print('Train concatenated model on dogs/cats dataset sample')
    train_datagen = ImageDataGenerator(rescale=1./255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)
    test_datagen = ImageDataGenerator(rescale=1./255)
    train_batches = get_batches('train', gen=train_datagen, class_mode='binary')
    valid_batches = get_batches('valid', gen=test_datagen, class_mode='binary')
    vgg16_fine.fit_generator(train_batches,
                             steps_per_epoch=2000 // BATCH_SIZE,
                             epochs=50,
                             validation_data=valid_batches,
                             validation_steps=800 // BATCH_SIZE)
    return vgg16_fine


final_model = train_finetuned_model()

但问题是模型的准确性急剧下降。在 50 个 epoch 之后,其准确率约为 50%。因此,可能我做错了什么。

也许参数有问题,例如学习率、批量大小等?

最佳答案

您的全连接层看起来与原始 VGG 架构完全不同。

# yours
Flatten()
Dense(256, activation='relu')
Dense(1, activation='sigmoid')

# original
Flatten()
Dense(4096, activation='relu')
Dense(4096, activation='relu')
Dense(2, activation='softmax')

两点。

  1. 最后一层应该是 2-class-softmax 而不是 sigmoid。这 我猜,如果您使用 sigmoid,则不会像您期望的那样计算准确度。

  2. 复杂度(神经元和层数)似乎太低了。

关于python - VGG16 Keras 微调 : low accuracy,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/43965581/

相关文章:

python - 如何在管理中的字段后添加详细名称 - Django

Python - 当一个整数变量改变时,使第二个变量相同

python - 使用前瞻/后视与空字符串分割字符串

machine-learning - 为具有 k-hot 标签的图像数据集创建 LMDB

python - 在 python 中有效地从包含大量文件的 zip 中读取一个文件

neural-network - Tensorflow:GPU 利用率几乎始终为 0%

machine-learning - 如何绘制 keras 实验的学习曲线?

python - PyTorch 中 RNN(nn.LSTM、nn.GRU 等)的输出 h_n 是如何构造的?

machine-learning - 在没有提供注释的情况下,如何在 PascalVOC 2012 或 COCO 测试集上测试模型?

neural-network - 用于时间序列预测的深度学习神经网络