python - 为 GPU 编译 Tensor Flow 示例自定义操作

标签 python c++ tensorflow bazel

在线关注example由 Tensorflow 提供 我在使用他们在 GPU 内核 下定义的自定义操作时遇到了问题。构建示例的说明列出了三个必需的文件:

头文件

// kernel_example.h
#ifndef KERNEL_EXAMPLE_H_
#define KERNEL_EXAMPLE_H_

template <typename Device, typename T>
struct ExampleFunctor {
  void operator()(const Device& d, int size, const T* in, T* out);
};

#if GOOGLE_CUDA
// Partially specialize functor for GpuDevice.
template <typename Eigen::GpuDevice, typename T>
struct ExampleFunctor {
  void operator()(const Eigen::GpuDevice& d, int size, const T* in, T* out);
};
#endif

#endif //KERNEL_EXAMPLE_H_ [1] commented out 

((1) 这里我在最后一行注释掉了 KERNEL_EXAMPLE_H_,因为它会导致编译错误。)

.cc 文件

// kernel_example.cc
#include "kernel_example.h"    <--------[2] replaced example.h
#include "tensorflow/core/framework/op_kernel.h"

using namespace tensorflow;

using CPUDevice = Eigen::ThreadPoolDevice;
using GPUDevice = Eigen::GpuDevice;

// CPU specialization of actual computation.
template <typename T>
struct ExampleFunctor<CPUDevice, T> {
  void operator()(const CPUDevice& d, int size, const T* in, T* out) {
    for (int i = 0; i < size; ++i) {
      out[i] = 2 * in[i];
    }
  }
};

// OpKernel definition.
// template parameter <T> is the datatype of the tensors.
template <typename Device, typename T>
class ExampleOp : public OpKernel {
 public:
  explicit ExampleOp(OpKernelConstruction* context) : OpKernel(context) {}

  void Compute(OpKernelContext* context) override {
    // Grab the input tensor
    const Tensor& input_tensor = context->input(0);

    // Create an output tensor
    Tensor* output_tensor = NULL;
    OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(),
                                                     &output_tensor));

    // Do the computation.
    OP_REQUIRES(context, input_tensor.NumElements() <= tensorflow::kint32max,
                errors::InvalidArgument("Too many elements in tensor"));
    ExampleFunctor<Device, T>()(
        context->eigen_device<Device>(),
        static_cast<int>(input_tensor.NumElements()),
        input_tensor.flat<T>().data(),
        output_tensor->flat<T>().data());
  }
};

// Register the CPU kernels.
#define REGISTER_CPU(T)                                          \
  REGISTER_KERNEL_BUILDER(                                       \
      Name("Example").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
      ExampleOp<CPUDevice, T>);
REGISTER_CPU(float);
REGISTER_CPU(int32);

// Register the GPU kernels.
#ifdef GOOGLE_CUDA
#define REGISTER_GPU(T)                                          \
  /* Declare explicit instantiations in kernel_example.cu.cc. */ \
  extern template ExampleFunctor<GPUDevice, T>;                  \
  REGISTER_KERNEL_BUILDER(                                       \
      Name("Example").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
      ExampleOp<GPUDevice, T>);
REGISTER_GPU(float);
REGISTER_GPU(int32);
#endif  // GOOGLE_CUDA

([2] 这里我更改了头文件的名称以匹配文件名。) 和

.cu.cc 文件

// kernel_example.cu.cc
#ifdef GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "kernel_example.h"    //[3] replaced example.h
#include "tensorflow/core/util/cuda_kernel_helper.h"

using namespace tensorflow;

using GPUDevice = Eigen::GpuDevice;

// Define the CUDA kernel.
template <typename T>
__global__ void ExampleCudaKernel(const int size, const T* in, T* out) {
  for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
       i += blockDim.x * gridDim.x) {
    out[i] = 2 * ldg(in + i);
  }
}

// Define the GPU implementation that launches the CUDA kernel.
template <typename T>
void ExampleFunctor<GPUDevice, T>::operator()(
    const GPUDevice& d, int size, const T* in, T* out) {
  // Launch the cuda kernel.
  //
  // See core/util/cuda_kernel_helper.h for example of computing
  // block count and thread_per_block count.
  int block_count = 1024;
  int thread_per_block = 20;
  ExampleCudaKernel<T>
      <<<block_count, thread_per_block, 0, d.stream()>>>(size, in, out);
}

// Explicitly instantiate functors for the types of OpKernels registered.
template struct ExampleFunctor<GPUDevice, float>;
template struct ExampleFunctor<GPUDevice, int32>;

#endif  // GOOGLE_CUDA

[3] 这里我更改了头文件的名称以匹配文件名。

我所做的仅有的 3 处小改动列在每个脚本下方。

使用建议的方法构建操作库:

TF_CFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_compile_flags()))') )
TF_LFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_link_flags()))') )
g++ -std=c++11 -shared kernel_example.cc kernel_example.cu.cc -o gpu_op.so -fPIC ${TF_CFLAGS[@]} ${TF_LFLAGS[@]} -O2

显示成功。并且生成了gpu_op.so。但是导入这个 op 库并尝试使用它:

# run_op.py
import tensorflow as tf
import numpy as np
my_module = tf.load_op_library('./gpu_op.so')

a = np.ones((20,5,5))
in1 = tf.convert_to_tensor(a, dtype = float)

print("input1: ", in1)

with tf.Session() as sess:
    ans = sess.run(my_module.example(in1))
print("output:", ans)

导致找不到操作:

  File "run_op.py", line 11, in <module>
    ans = sess.run(my_module.example(in1))
AttributeError: module '33c9073b4d33739023b5757fe9acdd79' has no attribute 'example'

我对 C++ 比较陌生,可能无法正确编译。那么我应该怎么做才能使这个模块可导入呢?我对上述代码进行 3 处更改是否正确?

最佳答案

原来我忽略了在这个例子中使用 CUDA 代码需要使用 nvidia 编译器 nvcc

可以使用以下方式编译:

TF_CFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_compile_flags()))') )
TF_LFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_link_flags()))') )
nvcc -std=c++11 cuda_op_kernel.cc cuda_op_kernel.cu.cc -o cuda_op_kernel.so -shared -Xcompiler -fPIC ${TF_CFLAGS[@]} ${TF_LFLAGS[@]} -O2

关于python - 为 GPU 编译 Tensor Flow 示例自定义操作,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/52445338/

相关文章:

python - 如何在使用 Python 3 和 asyncio 实时写入文件时读取文件,例如 "tail -f"

python - Pandas dataframe - 从字典中添加列

python-3.x - 当我执行 "rasa init"错误 : "failed to install native tensorflow runtime"

python - 我只从我的视差 Map OpenCV SBGM 得到错误的距离/深度

c++ - delete[] 怎么知道它是一个数组?

c++ - 函数签名返回抽象类

c++ - 从抽象基类指针转换为派生类

python - Tensorflow 1.5 构建失败 - 缺少路径?

python - 使用 tensorflow 在 docker 上导入 pandas

python - 我想异常处理 'list index out of range.'