我正在使用 tf.estimator.Estimator
训练模型:
def model_fn(features, labels, mode, params, config):
input_image = features["input_image"]
eval_metric_ops = {}
predictions = {}
# Create model
with tf.name_scope('Model'):
W = tf.Variable(tf.zeros([784, 10]), name="W")
b = tf.Variable(tf.zeros([10]), name="b")
logits = tf.nn.softmax(tf.matmul(input_image, W, name="MATMUL") + b, name="logits")
loss = None
train_op = None
if mode != tf.estimator.ModeKeys.PREDICT:
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
train_op = tf.contrib.layers.optimize_loss(loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=params["learning_rate"],
optimizer=params["optimizer"])
# Add prediction
classes = tf.as_string(tf.argmax(input=logits, axis=1, name="class"))
with tf.name_scope('Predictions'):
predictions["logits"] = logits
predictions["classes"] = classes
export_outputs = {"classes": tf.estimator.export.ClassificationOutput(classes=classes)}
export_outputs = {"classes": tf.estimator.export.PredictOutput({"labels": classes})}
spec = tf.estimator.EstimatorSpec(mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs,
training_chief_hooks=None,
training_hooks=None,
scaffold=None)
return spec
def input_fn(dataset, n=10):
return dataset.images[:n], dataset.labels[:n]
model_params = {"learning_rate": 1e-3,
"optimizer": "Adam"}
#run_path = os.path.join(runs_path, datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
run_path = os.path.join(runs_path, "run1")
if os.path.exists(run_path):
shutil.rmtree(run_path)
estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=run_path, params=model_params)
# Train
inputs = lambda: input_fn(mnist.train, n=15)
estimator.train(input_fn=inputs, steps=1000)
模型和权重在训练期间正确保存。
现在我想在另一个脚本中重新加载模型 + 权重以进行预测。
但我不知道如何指定输入,因为我在
model_fn
中没有引用它。功能。# Get some data to predict
input_data = mnist.test.images[:5]
tf.reset_default_graph()
run_path = os.path.join(runs_path, "run1")
# Load the model (graph)
input_checkpoint = os.path.join(run_path, "model.ckpt-1000")
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True)
# Restore the weights
sess = tf.InteractiveSession()
saver.restore(sess, input_checkpoint)
graph = sess.graph
# Get the op to compute for prediction
predict_op = graph.get_operation_by_name("Predictions/class")
# predictions = sess.run(predict_op, feed_dict=????)
这是返回
graph.get_collection("variables")
的内容:[<tf.Variable 'global_step:0' shape=() dtype=int64_ref>,
<tf.Variable 'Model/W:0' shape=(784, 10) dtype=float32_ref>,
<tf.Variable 'Model/b:0' shape=(10,) dtype=float32_ref>,
<tf.Variable 'OptimizeLoss/learning_rate:0' shape=() dtype=float32_ref>,
<tf.Variable 'OptimizeLoss/beta1_power:0' shape=() dtype=float32_ref>,
<tf.Variable 'OptimizeLoss/beta2_power:0' shape=() dtype=float32_ref>,
<tf.Variable 'OptimizeLoss/Model/W/Adam:0' shape=(784, 10) dtype=float32_ref>,
<tf.Variable 'OptimizeLoss/Model/W/Adam_1:0' shape=(784, 10) dtype=float32_ref>,
<tf.Variable 'OptimizeLoss/Model/b/Adam:0' shape=(10,) dtype=float32_ref>,
<tf.Variable 'OptimizeLoss/Model/b/Adam_1:0' shape=(10,) dtype=float32_ref>]
我是否需要指定
tf.placeholder
输入?但是,Tensorflow 如何知道输入应该提供给这个特定的占位符呢?另外,如果我指定类似
features = tf.constant(features, name="input")
在模型的开头,我不能使用它,因为它不是张量而是操作。编辑
经过更多调查,我发现我需要使用
Estimator.export_savedmodel()
保存我的模型方法(并且在使用估计器进行训练期间不重新使用自动保存的检查点。feature_spec = {"input_image": tf.placeholder(dtype=tf.float32, shape=[None, 784])}
input_receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
estimator.export_savedmodel(model_path, input_receiver_fn, as_text=True)
然后我尝试加载模型并进行预测,但我不知道如何用我的 numpy 图像输入模型:
preds = sess.run("class", feed_dict={"input_image": input_data})
和异常(exception)的错误:
/home/hadim/local/conda/envs/ws/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
776 try:
777 result = self._run(None, fetches, feed_dict, options_ptr,
--> 778 run_metadata_ptr)
779 if run_metadata:
780 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/home/hadim/local/conda/envs/ws/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
931 except Exception as e:
932 raise TypeError('Cannot interpret feed_dict key as Tensor: '
--> 933 + e.args[0])
934
935 if isinstance(subfeed_val, ops.Tensor):
TypeError: Cannot interpret feed_dict key as Tensor: The name 'input_image' looks like an (invalid) Operation name, not a Tensor. Tensor names must be of the form "<op_name>:<output_index>".
最佳答案
至于TypeError,我是这样解决的。
首先,命名占位符:
feature_spec = {"input_image": tf.placeholder(dtype=tf.float32, shape=[None, 784], name='input_image')}
然后你可以像这样使用它:
feed_dict={"input_image:0": input_data}
希望它可以帮助某人。
编辑
在这个问题中,在
estimator.export_savedmodel(...)
之后你可以做这样的预测:
with tf.Session(graph=tf.Graph()) as sess:
meta_graph_def = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], model_path)
signature = meta_graph_def.signature_def
x_tensor_name = signature['classes'].inputs['input_image'].name
y_tensor_name = signature['classes'].outputs['labels'].name
x = sess.graph.get_tensor_by_name(x_tensor_name)
y = sess.graph.get_tensor_by_name(y_tensor_name)
predictions = sess.run(y, {x: mnist.test.images[:5]})
关于tensorflow - 在 Tensorflow 中使用 `tf.estimator.Estimator` 保存的模型进行预测,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/43966073/