c++ - 使用自定义内核或 CUBLAS 对 vector 张量积进行 CUDA 优化

标签 c++ cuda gpu

我有两个 vector ab。每个 vector 包含一个 3d 点的坐标 x, y, z vector3f.

struct Vector3f
{ 
    float x;
    float y;
    float z;
}

vector a 的大小为 n = 5000 点, vector b 的大小为 m = 4000。我需要像图片右侧那样在它们之间做一个张量 vector 积。结果 vector 的长度应为 5000 * 4000 并包含 float ,结果存储在 c 中。 enter image description here

__global__ void tensor3dProdcutClassic(const int n, const int m, const Vector3f *a, const Vector3f *b, float *c) {
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    // int j = blockIdy.y * blockDim.y + threadIdx.y;

    //check if  the idx is out of range
    if (i < n) {
        for (int j = 0; j < m; j++) {
            int idx = j + m * i;
            c[idx] = a[i].x * b[j].x + a[i].y * b[j].y + a[i].z * b[j].z;
        } 
    }
} 

dim3 blockSize(32, 1, 1);
dim3 gridSize((n + blockSize.x - 1) / blockSize.x, 1, 1);

tensor3dProdcutClassic<<<gridSize, blockSize>>>(n, m, x, y, out);

我在 Volta arch 上的执行时间很长。
我的问题是如何优化内核以减少时间,这主要是因为内核中的 for 循环。我在这里知道所有全局读写都没有合并。

最佳答案

您可以让内核同时通过 ab,如下所示:

__global__ void tensor3dProdcutClassic(const int n, const int m, const Vector3f *a, const Vector3f *b, float *c)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    int j = blockIdy.y * blockDim.y + threadIdx.y;

    if (i < n && j < m)
    {
        int idx = j + m * i;
        c[idx] = a[i].x * b[j].x + a[i].y * b[j].y + a[i].z * b[j].z;
    }
}

dim3 blockSize(32, 32);
dim3 gridSize((int)ceil(n / 32.0), (int)ceil(m / 32.0));

tensor3dProdcutClassic<<<gridSize, blockSize>>>(n, m, x, y, out);

更新
我尝试修改代码以使用带和不带共享内存的单个数组,不带共享内存的代码总是快 3 或 4 倍。

共享内存:

#define BLOCK_SIZE 32
void tensor3dProdcut(const int n, const int m, const float* a, const float* b, float* c)
{
    float* d_a;
    size_t size = (uint64_t)n * 3 * sizeof(float);
    cudaMalloc(&d_a, size);
    cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
    float* d_b;
    size = (uint64_t)m * 3 * sizeof(float);
    cudaMalloc(&d_b, size);
    cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
    float* d_c;
    size = (uint64_t)n * m * sizeof(float);
    cudaMalloc(&d_c, size);
    dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
    dim3 dimGrid((int)ceil((double)n / BLOCK_SIZE), (int)ceil((double)m / BLOCK_SIZE));
    tensor3dProdcutKernel<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n, m);
    cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
    cudaFree(d_a);
    cudaFree(d_b);
    cudaFree(d_c);
}

__global__ void tensor3dProdcutKernel(float* a, float* b, float* c, int n, int m)
{
    int i, blockRow, blockCol, row, col;
    float Cvalue;
    blockRow = blockIdx.x;
    blockCol = blockIdx.y;
    row = threadIdx.x;
    col = threadIdx.y;
    if (blockRow * BLOCK_SIZE + row >= n || blockCol * BLOCK_SIZE + col >= m)
        return;
    __shared__ double as[BLOCK_SIZE][3];
    __shared__ double bs[BLOCK_SIZE][3];
    for (i = 0; i < 3; i++)
    {
        as[row][i] = a[(BLOCK_SIZE * blockRow + row) * 3 + i];
        bs[col][i] = b[(BLOCK_SIZE * blockCol + col) * 3 + i];
    }
    __syncthreads();
    Cvalue = 0;
    for (i = 0; i < 3; i++)
        Cvalue += as[row][i] * bs[col][i];
    c[(BLOCK_SIZE * blockRow + row) * m + BLOCK_SIZE * blockCol + col] = Cvalue;
}

没有共享内存:

__global__ void tensor3dProdcutKernel(float* a, float* b, float* c, int n, int m)
{
    int i, blockRow, blockCol, row, col;
    float Cvalue;
    blockRow = blockIdx.x;
    blockCol = blockIdx.y;
    row = threadIdx.x;
    col = threadIdx.y;
    if (blockRow * BLOCK_SIZE + row >= n || blockCol * BLOCK_SIZE + col >= m)
        return;
    Cvalue = 0;
    for (i = 0; i < 3; i++)
        Cvalue += a[(BLOCK_SIZE * blockRow + row) * 3 + i] * b[(BLOCK_SIZE * blockCol + col) * 3 + i];
    c[(BLOCK_SIZE * blockRow + row) * m + BLOCK_SIZE * blockCol + col] = Cvalue;
}

关于c++ - 使用自定义内核或 CUBLAS 对 vector 张量积进行 CUDA 优化,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/67663427/

相关文章:

caching - 如何避免 CUDA GPU 中的 TLB 未命中(以及高全局内存重放开销)?

cuda - 在GPU上什么更快?将 bool 类型转换为int或使用分支语句?

Python 和 gpu OpenCV 函数

c++ - 解决 Linux 库依赖关系?

c++ - 使用 ffmpeg.dll 或 avcodec.dll 将视频文件转换为 TIFF? "on-the-fly"可能吗?

c++ - 哪个函数在 cuda/nvml 库中返回 "nvmlDevice_t"类型变量?

synchronization - OpenCL 和 GPU 全局同步

c++ - 在 boost::heap::priority_queue 中推送结构对象时出错

c++ - C++03使用函数调用结果初始化多个成员?

c - 关于全局内存访问方式