c++ - 改进并行计算的内存布局

标签 c++ parallel-processing gpgpu c++-amp

我正在尝试使用 C++ AMP 优化用于并行计算的算法 (Lattice Boltzmann)。并寻找一些优化内存布局的建议,只是发现从结构中删除一个参数到另一个 vector (阻塞 vector )中可以增加大约 10%。

有人有任何可以进一步改善这一点的提示,或者我应该考虑的事情吗? 下面是每个时间步执行的最耗时的函数,以及用于布局的结构。

struct grid_cell {
//  int blocked;    // Define if blocked
    float n;        // North
    float ne;       // North-East
    float e;        // East
    float se;       // South-East
    float s;
    float sw;
    float w;
    float nw;
    float c;        // Center
};

int collision(const struct st_parameters param, vector<struct grid_cell> &node, vector<struct grid_cell> &tmp_node, vector<int> &obstacle) {
    int x,y;
    int i = 0;
    float c_sq = 1.0f/3.0f;     // Square of speed of sound
    float w0 = 4.0f/9.0f;       // Weighting factors
    float w1 = 1.0f/9.0f;
    float w2 = 1.0f/36.0f;

    int chunk = param.ny/20;
    float total_density = 0;

    float u_x,u_y;              // Avrage velocities in x and y direction
    float u[9];                 // Directional velocities
    float d_equ[9];             // Equalibrium densities
    float u_sq;                 // Squared velocity
    float local_density;        // Sum of densities in a particular node

    for(y=0;y<param.ny;y++) {
        for(x=0;x<param.nx;x++) {
            i = y*param.nx + x; // Node index
            // Dont consider blocked cells
            if (obstacle[i] == 0) {
                // Calculate local density
                local_density = 0.0;
                local_density += tmp_node[i].n;
                local_density += tmp_node[i].e;
                local_density += tmp_node[i].s;
                local_density += tmp_node[i].w;
                local_density += tmp_node[i].ne;
                local_density += tmp_node[i].se;
                local_density += tmp_node[i].sw;
                local_density += tmp_node[i].nw;
                local_density += tmp_node[i].c;

                // Calculate x velocity component
                u_x = (tmp_node[i].e + tmp_node[i].ne + tmp_node[i].se - 
                      (tmp_node[i].w + tmp_node[i].nw + tmp_node[i].sw)) 
                      / local_density;
                // Calculate y velocity component
                u_y = (tmp_node[i].n + tmp_node[i].ne + tmp_node[i].nw - 
                      (tmp_node[i].s + tmp_node[i].sw + tmp_node[i].se)) 
                      / local_density;
                // Velocity squared
                u_sq = u_x*u_x +u_y*u_y;

                // Directional velocity components;
                u[1] =  u_x;        // East
                u[2] =        u_y;  // North
                u[3] = -u_x;        // West
                u[4] =      - u_y;  // South
                u[5] =  u_x + u_y;  // North-East
                u[6] = -u_x + u_y;  // North-West
                u[7] = -u_x - u_y;  // South-West
                u[8] =  u_x - u_y;  // South-East

                // Equalibrium densities
                // Zero velocity density: weight w0
                d_equ[0] = w0 * local_density * (1.0f - u_sq / (2.0f * c_sq));
                // Axis speeds: weight w1
                d_equ[1] = w1 * local_density * (1.0f + u[1] / c_sq
                                 + (u[1] * u[1]) / (2.0f * c_sq * c_sq)
                                 - u_sq / (2.0f * c_sq));
                d_equ[2] = w1 * local_density * (1.0f + u[2] / c_sq
                                 + (u[2] * u[2]) / (2.0f * c_sq * c_sq)
                                 - u_sq / (2.0f * c_sq));
                d_equ[3] = w1 * local_density * (1.0f + u[3] / c_sq
                                 + (u[3] * u[3]) / (2.0f * c_sq * c_sq)
                                 - u_sq / (2.0f * c_sq));
                d_equ[4] = w1 * local_density * (1.0f + u[4] / c_sq
                                 + (u[4] * u[4]) / (2.0f * c_sq * c_sq)
                                 - u_sq / (2.0f * c_sq));
                // Diagonal speeds: weight w2
                d_equ[5] = w2 * local_density * (1.0f + u[5] / c_sq
                                 + (u[5] * u[5]) / (2.0f * c_sq * c_sq)
                                 - u_sq / (2.0f * c_sq));
                d_equ[6] = w2 * local_density * (1.0f + u[6] / c_sq
                                 + (u[6] * u[6]) / (2.0f * c_sq * c_sq)
                                 - u_sq / (2.0f * c_sq));
                d_equ[7] = w2 * local_density * (1.0f + u[7] / c_sq
                                 + (u[7] * u[7]) / (2.0f * c_sq * c_sq)
                                 - u_sq / (2.0f * c_sq));
                d_equ[8] = w2 * local_density * (1.0f + u[8] / c_sq
                                 + (u[8] * u[8]) / (2.0f * c_sq * c_sq)
                                 - u_sq / (2.0f * c_sq));

                // Relaxation step
                node[i].c = (tmp_node[i].c + param.omega * (d_equ[0] - tmp_node[i].c));
                node[i].e = (tmp_node[i].e + param.omega * (d_equ[1] - tmp_node[i].e));
                node[i].n = (tmp_node[i].n + param.omega * (d_equ[2] - tmp_node[i].n));
                node[i].w = (tmp_node[i].w + param.omega * (d_equ[3] - tmp_node[i].w));
                node[i].s = (tmp_node[i].s + param.omega * (d_equ[4] - tmp_node[i].s));
                node[i].ne = (tmp_node[i].ne + param.omega * (d_equ[5] - tmp_node[i].ne));
                node[i].nw = (tmp_node[i].nw + param.omega * (d_equ[6] - tmp_node[i].nw));
                node[i].sw = (tmp_node[i].sw + param.omega * (d_equ[7] - tmp_node[i].sw));
                node[i].se = (tmp_node[i].se + param.omega * (d_equ[8] - tmp_node[i].se));
            }
        }
    }
    return 1;
}

最佳答案

一般来说,您应该确保不同 cpu 上使用的数据不共享(简单)并且不在同一缓存行上(错误共享,请参见此处的示例:False Sharing is No Fun)。同一个 CPU 使用的数据应该靠近在一起,以便从缓存中获益。

关于c++ - 改进并行计算的内存布局,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/9684747/

相关文章:

cuda - GPU 中的同步

c++ - 检查 fopen 不支持的文件名

c++ - 如何使用 Google Test 捕获段错误?

c++ - C++ 中的继承和虚拟析构函数问题

c++ - 如何避免为类指定模板参数?

java - run 方法内部的线程状态冲突;为什么线程状态不是 "RUNNING"

c - OpenMP - 在 for 循环中生成和终止线程时的开销

c - 在 OpenCL C 中声明 cl_uint 变量会导致段错误(核心转储)

python - pycuda.debug 实际上做了什么?

parallel-processing - NVidia 是否支持 OpenCL SPIR?