c++ - 简单的 CUDA vector 搜索/索引程序无法正常工作

标签 c++ cuda

我尝试编写的 CUDA 程序似乎遇到了一些问题。这个想法是添加两个随机数数组,然后记录一个高于特定值的索引。我已经让程序运行良好,然后当我更改阈值并重新编译它时,就好像该值没有改变一样。我有一个 sh 用于编译,它删除了原始的可执行文件和 .o 文件。我是 CUDA 的新手,所以可能是编译问题之类的简单问题,或者我没有正确解锁内存。我在设备和主机上都释放了数组,但我不确定如何释放我在设备上声明的两个 float 。 任何帮助将不胜感激提前致谢

规范:340.93 驱动程序 CUDA 6.5 上的 nvidia gt220

这里是kernel.cu

#include <cuda_runtime.h> 
#include <stdio.h>

__device__ void my_push_back(int gg, float *index, int num)
{
int insert_pt = atomicAdd(&dev_count, 1);
  if (insert_pt < num)
  {
  index[insert_pt] = gg;
  }
}

__device__ int dev_count = 0;

__global__ void
vectorAdd(const float *A, const float *B, float *C, float *index, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float dev_value = 1.990;
float dev_value2 = 2.000;

  if (i < numElements)
  {
  C[i] = A[i] + B[i];
  float temp = C[i];
    if(temp > dev_value && temp < dev_value2)
    {
    my_push_back(i, index, numElements);

    }

  }
}


void kernel_wrap(float *h_A, float *h_B, float *h_C, float *h_Index, int numElements)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
size_t size = numElements * sizeof(float);
// Print the vector length to be used, and compute its size


//now allocate memory on device GPU
    // Allocate the device input vector A
    float *d_A = NULL;
    err = cudaMalloc((void **)&d_A, size);

    if (err != cudaSuccess)
    {
    fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
    }

    // Allocate the device input vector B
    float *d_B = NULL;
    err = cudaMalloc((void **)&d_B, size);

    if (err != cudaSuccess)
    {
    fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
    }

    // Allocate the device output vector C
    float *d_C = NULL;
    err = cudaMalloc((void **)&d_C, size);

    if (err != cudaSuccess)
    {
    fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
    }

    float *d_Index = NULL;
    err = cudaMalloc((void **)&d_Index, size);

    if (err != cudaSuccess)
    {
    fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
    }

    // Copy the host input vectors A and B in host memory to the device input vectors in
    // device memory
    printf("Copy input data from the host memory to the CUDA device\n");
    err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);

    if (err != cudaSuccess)
    {
    fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
    }

    err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);

    if (err != cudaSuccess)
    {
    fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
    }

// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_Index, numElements);
err = cudaGetLastError();

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

//Retrieve data from GPU memeory

// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

    printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_Index, d_Index, size, cudaMemcpyDeviceToHost);

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

//Free up memeory on GPU

// Free device global memory
err = cudaFree(d_A);

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

err = cudaFree(d_B);

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

err = cudaFree(d_C);

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

 err = cudaFree(d_Index);

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}


// Free host memory



// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice.  It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

printf("Done\n");

这里是 main.cpp

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include <cstdlib>
#include <fstream>
#include <vector>
#include <algorithm>
#include <sstream>

extern void kernel_wrap(float *h_A, float *h_B, float *h_C, float *h_Index, int numElements);

int main(int argc, char *argv[]){

int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);

//allocate memeory of vectors

// Allocate the host input vector A
float *h_AA = (float *)malloc(size);

// Allocate the host input vector B
float *h_BB = (float *)malloc(size);

// Allocate the host output vector C
float *h_CC = (float *)malloc(size);

float *h_Indexx = (float *)malloc(size);

// Verify that allocations succeeded
if (h_AA == NULL || h_BB == NULL || h_CC == NULL || h_Indexx == NULL)
{
    fprintf(stderr, "Failed to allocate host vectors!\n");
    exit(EXIT_FAILURE);
}

//create intial values for A and B

// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
    h_AA[i] = rand()/(float)RAND_MAX;
    h_BB[i] = rand()/(float)RAND_MAX;
}

    for (int i = 0; i < numElements; ++i)
{
    h_Indexx[i] = -1;
}



kernel_wrap(h_AA, h_BB, h_CC, h_Indexx, numElements);

for (int i = 0; i < numElements; ++i)
{

int temp = h_Indexx[i];
if(temp > 0 && temp <= numElements)
{
float tomp = h_AA[temp];
float tom = h_BB[temp];
float to = h_CC[temp];
std::cout << "\n@ i = "<< temp << " is  " << tomp << " + " << tom << " = " << to;
}
}
free(h_AA);
free(h_BB);
free(h_CC);
free(h_Indexx);




return 0;
}

下面是我的编译方式:

rm main
rm *.o
g++ -c main.cpp
nvcc -arch=sm_11 -c kernel.cu 
nvcc -o main main.o kernel.o

首先是 dev_value @ 1.99 的示例输出:

@i = 39948 是 0.998919 + 0.993153 = 1.99207

然后使用 dev_value @ 1.98:

@i = 5485 是 0.986223 + 0.995066 = 1.98129

@i = 1348 是 0.999652 + 0.983039 = 1.98269

@i = 6921 是 0.992085 + 0.992336 = 1.98442

@i = 24666 是 0.993531 + 0.994337 = 1.98787

@i = 27882 是 0.985079 + 0.998244 = 1.98332

@i = 39948 是 0.998919 + 0.993153 = 1.99207

@i = 46811 是 0.992908 + 0.993858 = 1.98677

@i = 47066 是 0.991757 + 0.992284 = 1.98404

然后 dev_value 回到 1.99:

@i = 39948 是 0.998919 + 0.993153 = 1.99207

@i = 1348 是 0.999652 + 0.983039 = 1.98269

@i = 6921 是 0.992085 + 0.992336 = 1.98442

@i = 24666 是 0.993531 + 0.994337 = 1.98787

@i = 27882 是 0.985079 + 0.998244 = 1.98332

@i = 39948 是 0.998919 + 0.993153 = 1.99207

@i = 46811 是 0.992908 + 0.993858 = 1.98677

@i = 47066 是 0.991757 + 0.992284 = 1.98404

不确定发生了什么,重新编译并不能解决问题,通常如果更改变量名称它会再次开始工作。

最佳答案

正如罗伯特·克罗维拉 (Robert Crovella) 发现的那样:

您永远不会将设备上的索引数组初始化为 -1。您在主机端将其设置为 -1,但您永远不会将该数据复制到设备。在内核启动之前添加一个额外的主机->设备 cudaMemcpy 操作,它将 h_Index 复制到 d_Index,我认为您的问题将得到解决。而且,当我这样做时,所有的 initcheck 错误都消失了。

即我添加

err = cudaMemcpy(d_Index, h_Index, size, cudaMemcpyHostToDevice);

关于c++ - 简单的 CUDA vector 搜索/索引程序无法正常工作,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/33642607/

相关文章:

c++ - 我可以在没有任何实例变量的情况下使用 decltype 吗?

c++ - CUDA 中的 3D 元素矩阵乘法?

c++ - 两个 CUDA 内核复制内存 - 为什么速度不同?

cuda - JCuda中的JIT,加载多个ptx模块

c++ - 在 .cpp 文件上链接静态库

C++11 move 语义行为特定问题

java - 使用 API21 编译时出现 UnsatisfiedLinkError

python - CUDA 问题 - 如何在 Win 10 中清理安装 CUDA 以解决 cudaGetDevice() 失败

CUDA - 使用(未对齐的)子图像(用于纹理绑定(bind)、NPP...)时有任何问题吗?

c++ - 对包含三个项目的数组进行排序