python - "0-th value returned by pyfunc_0 is double, but expects float"虽然我认为它返回 float

标签 python python-3.x tensorflow keras

完整的代码和错误可以在这里看到 - https://www.kaggle.com/pradhyo/keras-style-transfer-different-losses/data?scriptVersionId=9726696 (页面中大约有 60% 的错误 - 黑色背景的长部分)

我认为问题是下面的 _loss 似乎是两倍,即使 wd_float() 返回 float。

我尝试从 wd_float() 返回 int 并尝试在 style_loss_wasserstein() 中转换为 int 只是为了看看会发生什么,错误变成了“pyfunc_0 返回的第 0 个值是 int,但期望是 float”。所以我最好的猜测是 tf.py_func 以某种方式将输出从 wd_float 转换为 double。

from scipy.stats import wasserstein_distance
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.wasserstein_distance.html
# wasserstein_distance is supposed to return float based on docs
def wd_float(x, y):
    _wd = wasserstein_distance(x, y)
    return float(_wd)


def style_loss_wasserstein():
    ...
    _loss = tf.py_func(wd_float, [style_features, combination_features], tf.float32)
    _loss = K.cast(_loss, dtype='float32')
    print(f"first loss: {_loss}")
    return tf.convert_to_tensor(_loss, tf.float32)


...



from keras import backend as K
from scipy.optimize import fmin_l_bfgs_b


class Evaluator(object):
    ...
    def eval_loss_and_grads(self, x):
        ...
        outs = self.f_outputs([x]) # line causing the error
        loss_value = outs[0]
        ...

    def loss(self, x):
        assert self.loss_value is None
        loss_value, grad_values = self.eval_loss_and_grads(x)
        self.loss_value = loss_value
        self.grad_values = grad_values
        return self.loss_value          



outputs = [style_loss_wasserstein]
f_outputs = K.function([combination_image], outputs)
evaluator = Evaluator(f_outputs, img_nrows, img_ncols)



x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
                                 fprime=evaluator.grads, maxfun=20)

这是堆栈跟踪

---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
<ipython-input-24-fcc21e1fc999> in <module>()
     13                loss_fn,
     14                iterations,
---> 15                save_every)
     16 
     17 # Display each output iteration for a style

<ipython-input-16-2397e37b5e4f> in style_transfer(base_image_path, style_reference_image_path, result_prefix, loss_fn, iterations, save_every)
     47     for i in range(iterations):
     48         x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
---> 49                                          fprime=evaluator.grads, maxfun=20)
     50         print('Iteration ' + str(i) + ' loss value:', min_val)
     51         # save current generated image

/opt/conda/lib/python3.6/site-packages/scipy/optimize/lbfgsb.py in fmin_l_bfgs_b(func, x0, fprime, args, approx_grad, bounds, m, factr, pgtol, epsilon, iprint, maxfun, maxiter, disp, callback, maxls)
    197 
    198     res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
--> 199                            **opts)
    200     d = {'grad': res['jac'],
    201          'task': res['message'],

/opt/conda/lib/python3.6/site-packages/scipy/optimize/lbfgsb.py in _minimize_lbfgsb(fun, x0, args, jac, bounds, disp, maxcor, ftol, gtol, eps, maxfun, maxiter, iprint, callback, maxls, **unknown_options)
    333             # until the completion of the current minimization iteration.
    334             # Overwrite f and g:
--> 335             f, g = func_and_grad(x)
    336         elif task_str.startswith(b'NEW_X'):
    337             # new iteration

/opt/conda/lib/python3.6/site-packages/scipy/optimize/lbfgsb.py in func_and_grad(x)
    283     else:
    284         def func_and_grad(x):
--> 285             f = fun(x, *args)
    286             g = jac(x, *args)
    287             return f, g

/opt/conda/lib/python3.6/site-packages/scipy/optimize/optimize.py in function_wrapper(*wrapper_args)
    291     def function_wrapper(*wrapper_args):
    292         ncalls[0] += 1
--> 293         return function(*(wrapper_args + args))
    294 
    295     return ncalls, function_wrapper

<ipython-input-14-687064fde378> in loss(self, x)
     29     def loss(self, x):
     30         assert self.loss_value is None
---> 31         loss_value, grad_values = self.eval_loss_and_grads(x)
     32         self.loss_value = loss_value
     33         self.grad_values = grad_values

<ipython-input-14-687064fde378> in eval_loss_and_grads(self, x)
     19         else:
     20             x = x.reshape((1, self.img_nrows, self.img_ncols, 3))
---> 21         outs = self.f_outputs([x])
     22         loss_value = outs[0]
     23         if len(outs[1:]) == 1:

/opt/conda/lib/python3.6/site-packages/Keras-2.2.4-py3.6.egg/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2919                     return self._legacy_call(inputs)
   2920 
-> 2921             return self._call(inputs)
   2922         else:
   2923             if py_any(is_tensor(x) for x in inputs):

/opt/conda/lib/python3.6/site-packages/Keras-2.2.4-py3.6.egg/keras/backend/tensorflow_backend.py in _call(self, inputs)
   2877             fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
   2878         else:
-> 2879             fetched = self._callable_fn(*array_vals)
   2880         return fetched[:len(self.outputs)]
   2881 

/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in __call__(self, *args, **kwargs)
   1437           ret = tf_session.TF_SessionRunCallable(
   1438               self._session._session, self._handle, args, status,
-> 1439               run_metadata_ptr)
   1440         if run_metadata:
   1441           proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
    526             None, None,
    527             compat.as_text(c_api.TF_Message(self.status.status)),
--> 528             c_api.TF_GetCode(self.status.status))
    529     # Delete the underlying status object from memory otherwise it stays alive
    530     # as there is a reference to status from this from the traceback due to

InvalidArgumentError: 0-th value returned by pyfunc_0 is double, but expects float
     [[{{node PyFunc}} = PyFunc[Tin=[DT_FLOAT, DT_FLOAT], Tout=[DT_FLOAT], token="pyfunc_0", _device="/job:localhost/replica:0/task:0/device:CPU:0"](Reshape_20/_609, Reshape_21/_611)]]
     [[{{node PyFunc/_613}} = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device_incarnation=1, tensor_name="edge_103_PyFunc", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:GPU:0"]()]]                                     

最佳答案

我今天遇到了类似的问题,但在 TF2 中的自定义损失函数中。

对我来说,解决方案是使用 np.ndarray.astype 函数将数据转换为 np.float32 类型,然后再将其返回到我的自定义 numpy 函数中,例如:

def custom_loss_function_loss2(y_true, y_pred):
   loss = np.random.random(50)
   return np.ndarray.astype(loss, np.float32)

@tf.function
def custom_loss_function(y_true, y_pred):
   loss1 = tf.keras.losses.binary_crossentropy(y_true, y_pred)
   loss2 = tf.numpy_function(custom_loss_function_loss2, [y_true, y_pred], np.float32)
   return loss1 + loss2

希望以后能帮助其他遇到同样错误的人。

关于python - "0-th value returned by pyfunc_0 is double, but expects float"虽然我认为它返回 float,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/54278894/

相关文章:

javascript - 如何从 Odoo 10 中的 JS 函数更新 One2many 字段?

python-3.x - Pydev,如何在异常时暂停

python-3.x - 齿轮 discord.py 中的 @bot.event

python - 扩展 mnist 数据库

tensorflow - 自动编码器的 tensorflow 输入数据字符串

python - PyODBC 从简单的 SELECT 子句获取结果

python - Flask 模板的 {% block page_content %} - 文档

python - "AttributeError: ' float ' object has no attribute ' 替换 '"安装 python 包时出错

Python TensorFlow

python - 我们是否在双向 LSTM 中为每个批处理使用不同的权重?