c++ - 在多个节点上运行时 MPI_Reduce() 中的死锁

标签 c++ parallel-processing mpi distributed-computing

我的 MPI 代码有问题,当代码在多个节点 上运行时挂起。它在单个节点 上运行时成功 完成。我不确定如何调试它。有人可以帮我调试这个问题吗?

程序使用:

mpicc -o string strin.cpp
mpirun -np 4 -npernode 2 -hostfile hosts ./string 12 0.1 0.9 10 2

我的代码:

#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"

int main ( int argc, char **argv )
{

    float *y, *yold;
    float *v, *vold;
    int nprocs, myid;
    FILE *f = NULL;
    MPI_Status   status;
    int namelen;
    char processor_name[MPI_MAX_PROCESSOR_NAME];


    //  const int NUM_MASSES = 1000;
    //  const float Ktension = 0.1;
    //  const float Kdamping = 0.9;
    //  const float duration = 10.0;

#if 0
    if ( argc != 5 ) {
        std::cout << "usage: " << argv[0] << " NUM_MASSES durationInSecs Ktension Kdamping\n";
        return 2;
    }
#endif

    int NUM_MASSES  = atoi ( argv[1] );
    float duration = atof ( argv[2] );
    float Ktension = atof ( argv[3] );
    float Kdamping = atof ( argv[4] );
    const int PICKUP_POS = NUM_MASSES / 7;    // change this for diff harmonics
    const int OVERSAMPLING = 16;  // run sim at this multiple of audio sampling rate

    MPI_Init(&argc,&argv);
    MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD,&myid);
    MPI_Get_processor_name(processor_name, &namelen);

    // open output file
    if (myid  == 0) {
        f = fopen ( "rstring.raw", "wb" );
        if (!f) {
            std::cout << "can't open output file\n";
            return 1;
        }
    }

    // allocate displacement and velocity arrays
    y = new float[NUM_MASSES];
    yold = new float[NUM_MASSES];
    v = new float[NUM_MASSES];

    // initialize displacements (pluck it!) and velocities
    for (int i = 0; i < NUM_MASSES; i++ ) {
        v[i]  = 0.0f;
        yold[i] = y[i] = 0.0f;
        if (i == NUM_MASSES/2 )
            yold[i] = 1.0; // impulse at string center
    }

    // Broadcast data
    //MPI_Bcast(y, NUM_MASSES, MPI_FLOAT, 0, MPI_COMM_WORLD);
    //MPI_Bcast(yold, NUM_MASSES, MPI_FLOAT, 0, MPI_COMM_WORLD);
    //MPI_Bcast(v, NUM_MASSES, MPI_FLOAT, 0, MPI_COMM_WORLD);

    //int numIters = duration * 44100 * OVERSAMPLING; 
    int numIters = atoi( argv[5] );
    for ( int t = 0; t < numIters; t++ ) {
        // for each mass element
        float sum = 0;
        float gsum = 0;
        int i_start;
        int i_end ;

        i_start = myid * (NUM_MASSES/nprocs);
        i_end = i_start + (NUM_MASSES/nprocs);

        for ( int i = i_start; i < i_end; i++ ) {
            if ( i == 0 || i == NUM_MASSES-1 ) {
            } else {
                float accel = Ktension * (yold[i+1] + yold[i-1] - 2*yold[i]);
                v[i] += accel;
                v[i] *= Kdamping;
                y[i] = yold[i] + v[i];
                sum += y[i];
            }
        }

        MPI_Reduce(&sum, &gsum, 1, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);

        float *tmp = y;
        y = yold;
        yold = tmp;


        if (myid == 0) {
            //printf("%f\n", gsum);
            if ( t % OVERSAMPLING == 0 ) {
                fwrite ( &gsum, sizeof(float), 1, f );
            }
        }
    }
    if (myid  == 0) {
        fclose ( f );
    }
    MPI_Finalize();
}

最佳答案

如果有可能,您可以尝试在并行调试器(如 Totalview)中运行您的应用程序。

否则,当程序挂起时,您可以一次将一个免费可用的串行调试器(如 GDB)附加到一个进程,以便查看潜在问题可能出在哪里。

关于c++ - 在多个节点上运行时 MPI_Reduce() 中的死锁,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/12791591/

相关文章:

c++ - 仅更改一个元素时如何在排序列表中进行快速排序

c++ - 不间断地从循环中删除列表中的元素

C++ Intel TBB和Microsoft PPL,如何在并行循环中使用next_permutation?

python - Apache Beam 管道步骤未并行运行? (Python)

r - 如何在 R 中使用相同的种子在并行或不并行的情况下产生相同的输出?

c - 专门针对 NULL 通信器错误的 MPI 自定义处理程序

memory-leaks - MPI_REDUCE 导致内存泄漏

c++ - 是否存在阻止内置函数拥有静态成员的技术限制?

c - C 中的结构序列化并通过 MPI 传输

c++ - 有没有人知道在哪里可以找到适用于 Windows 的免费 imap 客户端 C++(不是 libEtpen)