考虑以下 TensorFlow 代码:
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
mnist_dataset, mnist_info = tfds.load(name = 'mnist', with_info=True, as_supervised=True)
mnist_train, mnist_test = mnist_dataset['train'], mnist_dataset['test']
num_validation_samples = 0.1 * mnist_info.splits['train'].num_examples
num_validation_samples = tf.cast(num_validation_samples, tf.int64)
num_test_samples = mnist_info.splits['test'].num_examples
num_test_samples = tf.cast(num_test_samples, tf.int64)
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.
return image, label
scaled_train_and_validation_data = mnist_train.map(scale)
test_data = mnist_test.map(scale)
BUFFER_SIZE = 10_000
shuffled_train_and_validation_data = scaled_train_and_validation_data.shuffle(BUFFER_SIZE)
validation_data = shuffled_train_and_validation_data.take(num_validation_samples)
train_data = shuffled_train_and_validation_data.skip(num_validation_samples)
BATCH_SIZE = 100
train_data = train_data.batch(BATCH_SIZE)
validation_data = validation_data.batch(num_validation_samples) # Single batch, having size equal to number of validation samples
test_data = test_data.batch(num_test_samples)
validation_inputs, validation_targets = next(iter(validation_data))
input_size = 784 # One for each pixel of the 28 * 28 image
output_size = 10
hidden_layer_size = 50 # Arbitrary chosen
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28,28,1)),
tf.keras.layers.Dense(hidden_layer_size, activation='relu'), # First hidden layer
tf.keras.layers.Dense(hidden_layer_size, activation='relu'),
tf.keras.layers.Dense(output_size, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
NUM_EPOCHS = 5
model.fit(train_data, epochs = NUM_EPOCHS, validation_data=(validation_inputs, validation_targets), verbose=2)
运行它时 tf 给出错误:
ValueError:
batch_size
orsteps
is required forTensor
orNumPy
input data.
当在 fit()
的调用中添加 batch_size 时:
model.fit(train_data, batch_size = BATCH_SIZE, epochs = NUM_EPOCHS, validation_data=(validation_inputs, validation_targets), verbose=2)
然后它提示:
ValueError: The
batch_size
argument must not be specified for the given input type. Received input: , batch_size: 100
这里有什么错误?
最佳答案
错误发生是因为 tf.Dataset提供给 Model.fit 的参数 validation_data
,但 Keras 不知道要验证多少步。要解决此问题,您只需设置参数 validation_steps
即可。例如:
model.fit(train_data,
batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS,
validation_data=(validation_inputs, validation_targets),
validation_steps=10)
关于python - Tensorflow: `batch_size` 或 `steps` 输入数据需要 `Tensor` 或 `NumPy`,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/58752538/