c++ - CUDA 内核异常行为,生成随机值

标签 c++ cuda particle-system

我正在尝试使用 CUDA 模拟 Spring 质量系统。下面是更新粒子位置的内核:

__global__ void timestep(float3 *pos, float3 *pos_antiga, float3 *aceleracao, int numParticulas) {

    int index = threadIdx.x + blockIdx.x * blockDim.x;

    if(index > (numParticulas - 1))
        return;
t
    float3 temp = pos[index];

    pos[index].x = pos[index].x + (pos[index].x - pos_antiga[index].x) * (1.0f - DAMPING) + aceleracao[index].x * TIMESTEP;
    pos[index].y = pos[index].y + (pos[index].y - pos_antiga[index].y) * (1.0f - DAMPING) + aceleracao[index].y * TIMESTEP;
    pos[index].z = pos[index].z + (pos[index].z - pos_antiga[index].z) * (1.0f - DAMPING) + aceleracao[index].z * TIMESTEP;

    pos_antiga[index] = temp;

}

pos表示实际位置的3D vector ,pos_antiga为上一时间步的位置,DAMPING为0.01, TIMESTEP 是 0.25。我正在使用 Verlet 集成。在没有任何强制的系统中,aceleracao为零,所以pospos_antigo在内核调用前后相同。

但是,在第一次迭代之后,CUDA 会为某些轴返回疯狂的值,例如 1.QNAN 和 -1.6241e+016。我认为这与 block 和线程大小有关。内核调用如下:

timestep<<<16, 16>>>(pos_d, pos_antiga_d, aceleracao_d, numParticulas);

那么,我错过了什么?

编辑:下面是调用者代码:

void timestepGPU(vector<Particula> *p) {

// vector<Particula> has all the particles of the system.

      // CPU
    float *pos;
    float *pos_antiga;
    float *aceleracao;

    // GPU
    float *pos_d;
    float *pos_antiga_d;
    float *aceleracao_d;

    // Number of particles
    int numParticulas = p->size();

    // Init
    pos = new float[numParticulas * 3];
    pos_antiga = new float[numParticulas * 3];
    aceleracao = new float[numParticulas * 3];

    // Transfering the values from the class to a plain vector
    vector<Particula>::iterator p_tmp;
    int i = 0;
    for(p_tmp = p->begin(); p_tmp != p->end(); p_tmp++)
    {
        pos[i] = (*p_tmp).getPos().f[0];
        pos[i + 1] = (*p_tmp).getPos().f[1];
        pos[i + 2] = (*p_tmp).getPos().f[2];

        pos_antiga[i] = (*p_tmp).getPosAntiga().f[0];
        pos_antiga[i + 1] = (*p_tmp).getPosAntiga().f[1];
        pos_antiga[i + 2] = (*p_tmp).getPosAntiga().f[2];

        aceleracao[i] = (*p_tmp).getAceleracao().f[0];
        aceleracao[i + 1] = (*p_tmp).getAceleracao().f[1];
        aceleracao[i + 2] = (*p_tmp).getAceleracao().f[2];

        i += 3;
    }

    // Here, I print the particle data BEFORE moving it to GPU
      cout << "PRINT PARTICLE DATA" << endl;
    for(i = 0; i < numParticulas * 3; i += 3) {
        cout << i/3 << " - Pos: " << pos[i] << " " << pos[i + 1] << " " << pos[i + 2] << " | Pos Ant: " << pos_antiga[i] << " " << pos_antiga[i + 1] << " " << pos_antiga[i + 2] << " | Acel: " << aceleracao[i] << " " << aceleracao[i + 1] << " " << aceleracao[i + 2] << endl;
    }
    cout << "END" << endl;

    // GPU
    ErroCUDA(cudaMalloc((void**) &pos_d, numParticulas * 3 * sizeof(float)));
    ErroCUDA(cudaMalloc((void**) &pos_antiga_d, numParticulas * 3 * sizeof(float)));
    ErroCUDA(cudaMalloc((void**) &aceleracao_d, numParticulas * 3 * sizeof(float)));

    // Moving data
    ErroCUDA(cudaMemcpy(pos_d, pos, numParticulas * 3 * sizeof(float), cudaMemcpyHostToDevice));
    ErroCUDA(cudaMemcpy(pos_antiga_d, pos_antiga, numParticulas * 3 * sizeof(float), cudaMemcpyHostToDevice));
    ErroCUDA(cudaMemcpy(aceleracao_d, aceleracao, numParticulas * sizeof(float), cudaMemcpyHostToDevice));

    // Setting number of blocks and threads per block
    unsigned int numThreads, numBlocos;
    calcularGrid(numParticulas, 64, numBlocos, numThreads);
    //cout << numBlocos << "----------" << numThreads << endl;

    // Kernel
    timestep<<<numBlocos, numThreads>>>((float3 *) pos_d, (float3 *) pos_antiga_d, (float3 *) aceleracao_d, numParticulas);
    ErroCUDA(cudaPeekAtLastError());
    cudaDeviceSynchronize();

    // Moving data back to the CPU
    ErroCUDA(cudaMemcpy(pos, pos_d, numParticulas * 3 * sizeof(float), cudaMemcpyDeviceToHost));
    ErroCUDA(cudaMemcpy(pos_antiga, pos_antiga_d, numParticulas * 3 * sizeof(float), cudaMemcpyDeviceToHost));

      // Printing the particles' data AFTER Kernel call. At my GT 4xx, close to the 48th particle, it starts to show crazy values
    cout << "PARTICLE DATA" << endl;
    for(i = 0; i < numParticulas * 3; i += 3) {
        cout << i/3 << " - Pos: " << pos[i] << " " << pos[i + 1] << " " << pos[i + 2] << " | Pos Ant: " << pos_antiga[i] << " " << pos_antiga[i + 1] << " " << pos_antiga[i + 2] << " | Acel: " << aceleracao[i] << " " << aceleracao[i + 1] << " " << aceleracao[i + 2] << endl;
    }
    cout << "END" << endl;

    system("pause");

    i = 0;
    for(p_tmp = p->begin(); p_tmp != p->end(); p_tmp++)
    {
        if((*p_tmp).getMovel())
        {
            (*p_tmp).setPos(Vetor(pos[i], pos[i + 1], pos[i + 2]));
            (*p_tmp).setPosAntiga(Vetor(pos_antiga[i], pos_antiga[i + 1], pos_antiga[i + 2]));
(*p_tmp).setAceleracao(Vetor(0, 0, 0));
        }

        i += 3;
    }

    ErroCUDA(cudaFree(pos_d));
    ErroCUDA(cudaFree(pos_antiga_d));
    ErroCUDA(cudaFree(aceleracao_d));

    free(pos);
    free(pos_antiga);
    free(aceleracao);
}

在我的示例中,属性 p 有 100 个项目(10 x 10 个粒子)。它是 3D 空间中从 (0, 0, 0) 开始到 (20, 20, 20) 的网格。

再次感谢大家的帮助!

最佳答案

我认为你的问题出在这一行..

 ErroCUDA(cudaMemcpy(aceleracao_d, aceleracao, numParticulas * sizeof(float), cudaMemcpyHostToDevice));

应该是..

  ErroCUDA(cudaMemcpy(aceleracao_d, aceleracao, numParticulas * 3 *sizeof(float), cudaMemcpyHostToDevice));

关于c++ - CUDA 内核异常行为,生成随机值,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/19042224/

相关文章:

opencv - 为什么对于简单的阈值设置,我的OpenCV CUDA运行速度低于CPU?

c++ - 在命名空间内部还是外部实现 C++ 函数?

CUDA:停止所有其他线程

c++ - 如何在 CUDA 中(有效地)打包位?

javascript - Particles.js 中的多个图像作为形状

xcode - 在 Xcode 中将 numParticlesToEmit 设置为非零时,SKEmitterNode 不起作用?

c# - 没有OpenGL和其他库的粒子系统

C++ 生成器 [bcc32 错误] Unit1.cpp(18) : E2040 Declaration terminated incorrectly

c++ - 我如何使用 std::shared_ptr 和 box2d?

c++ - opencv函数中的内存泄漏