c++ - 将数组分散到仅工作任务

标签 c++ mpi scatter

我有一个存储数组的数组(称为 sendbuff),我想通过使用 MPI::Scatter 将这些数组发送到其他线程。

                        sendbuff
   #####    ###############################
p  # 0 # -> # -1 # -1 # -1 # -1 # -1 # -1 # (values)
o  #####    ###############################
s  # 1 # -> # -1 # -1 # -1 # -1 # -1 # -1 # (values)
   #####    ###############################

可以看出,sendbuff[0] 包含一个大小为 6 的数组,该数组有 6 个值(全部为 -1),而 sendbuff[1] 具有相同的大小事物。我想将那些 -1 的数组发送到其他线程,并将它们保存在一个名为 recvbuff 的数组中,该数组用 0 填充:

        recvbuff
#########################
# 0 # 0 # 0 # 0 # 0 # 0 #
#########################

我研究了操作系统的答案并找到了一些,但他们使用 MPI_Datatype 但我想避免它。 为了尝试实现这个目标,我编写了以下不起作用的代码:

int main( int argc, char *argv[]){

    //variable innitialization
    int taskid, ntasks, buffsize, **sendbuff, *recvbuff;

    MPI::Init(argc, argv);

    taskid = MPI::COMM_WORLD.Get_rank();
    ntasks = MPI::COMM_WORLD.Get_size();    

    buffsize = 6;

    //memory innitialization
    recvbuff = new int[buffsize];
    sendbuff = new int*[ntasks];
    for(int i = 0; i < ntasks; i++){
        sendbuff[i] = new int[buffsize];
    }

    //array innitialization
    for(int i = 0; i < buffsize; i++){
        recvbuff[i] = 0;
    }
    for(int i = 0; i < ntasks; i++){
        for(int j = 0; j < buffsize; j++){
            sendbuff[i][j] = -1;
        }
    }

    //communication
    MPI::COMM_WORLD.Scatter(sendbuff[0], buffsize, MPI::INT, recvbuff, buffsize,
        MPI::INT, 0);

    //output
    for(int i = 0; i < buffsize; i++){
        cout<<"Task"<<taskid<<" recvbuff["<<i<<"] = "<<recvbuff[i] << endl;
    }

    //cleaning
    for(int i = 0; i < ntasks; i++){
            delete[] sendbuff[i];
    }
    delete[] sendbuff;
    delete[] recvbuff;


    MPI::Finalize();

    return EXIT_SUCCESS;
}

在使用分散后,我希望他的 recvbuff 变量填充有 -1 值,但是我得到了 -1 和垃圾的混合,如下所示:

$ mpirun -np 3 a.out 
Task0 recvbuff[0] = -1
Task0 recvbuff[1] = -1
Task0 recvbuff[2] = -1
Task0 recvbuff[3] = -1
Task0 recvbuff[4] = -1
Task0 recvbuff[5] = -1    
Task1 recvbuff[0] = 33
Task1 recvbuff[1] = 0
Task1 recvbuff[2] = -1
Task1 recvbuff[3] = -1
Task1 recvbuff[4] = -1
Task1 recvbuff[5] = -1
Task2 recvbuff[0] = -1
Task2 recvbuff[1] = -1
Task2 recvbuff[2] = 33
Task2 recvbuff[3] = 0
Task2 recvbuff[4] = 1768975727
Task2 recvbuff[5] = 7496543

我做错了什么? 提前致谢,佩德罗。

最佳答案

分散和聚集的描述比较详细in this answer . Scatter 拆分数据并将碎片分散到其他任务,但数据必须存储在连续的内存中 - MPI_Scatter 无法知道它需要跟随指针,如果需要,有多少 - 以及你分配 sendbuff 的方式:

sendbuff = new int*[ntasks];
for(int i = 0; i < ntasks; i++){
    sendbuff[i] = new int[buffsize];
}

sendbuff 的不同行可能分散在整个系统内存中。如果您连续分配数据,您就差不多了:

sendbuff = new int*[ntasks];
sendbuff[0] = new int[ntasks * 6];
for(int i = 1; i < ntasks; i++){
    sendbuff[i] = &(sendbuff[0][i*6];
}

现在您应该可以分散了,但要注意第 0 行将进入第 0 行;也就是说,分散到通信器中的所有进程。如果您只是想发送给您的非零秩任务,最简单的做法是在 sendbuff 中为秩 0 保留一行虚拟数据,以便正常的分散工作正常进行:

#include <iostream>
#include <mpi.h>

int main(int argc, char **argv)
{
    int rank, size;
    const int nelem = 6;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    int **sendbuff = new int*[size];
    int *recvbuff  = new int[nelem];

    if (rank == 0) {
        sendbuff[0] = new int[nelem * size];
        for (int i=0; i<size; i++)
            sendbuff[i] = &(sendbuff[0][nelem*i]);

        for (int i=0; i<size; i++)
            for (int j=0; j<nelem; j++)
                sendbuff[i][j] = i-1;
    }

    MPI_Scatter(sendbuff[0], nelem, MPI_INT, recvbuff, nelem, MPI_INT, 0, MPI_COMM_WORLD);

    if (rank != 0) {
        std::cout << "Scatter: [ " << rank << "]: ";
        for (int i=0; i<nelem; i++)
            std::cout << recvbuff[i] << " ";
        std::cout << std::endl;

        for (int i=0; i<nelem; i++)
            recvbuff[i] *= recvbuff[i];
    }

    MPI_Gather(recvbuff, nelem, MPI_INT, sendbuff[0], nelem, MPI_INT, 0, MPI_COMM_WORLD);
    if (rank == 0) {
        for (int j=1; j<size; j++) {
            std::cout << "Gather: [ " << j << "]: ";
            for (int i=0; i<nelem; i++)
                    std::cout << sendbuff[j][i] << " ";
            std::cout << std::endl;
        }
    }

    delete [] recvbuff;
    if (rank == 0)
        delete [] sendbuff[0];
    delete [] sendbuff;

    MPI_Finalize();
}

请注意,我们正在分散数据,工作人员正在对数字进行平方,而主服务器将其收集回来。编译运行给出:

$ mpic++ -o intercomm intercomm.cxx
$ mpirun -np 4 ./intercomm
Scatter: [ 2]: 1 1 1 1 1 1
Scatter: [ 1]: 0 0 0 0 0 0
Scatter: [ 3]: 2 2 2 2 2 2
Gather: [ 1]: 0 0 0 0 0 0
Gather: [ 2]: 1 1 1 1 1 1
Gather: [ 3]: 4 4 4 4 4 4

如果您宁愿避免排名 0 的虚拟数据——可能很大——您可以将任务分成两组,主任务和工作任务,并设置一个 intercommunicator这允许它们之间的集体通信。这是一个简单的程序,它就是这样做的:

#include <iostream>
#include <mpi.h>

int main(int argc, char **argv)
{
    MPI_Comm   localComm;    /* intra-communicator of local sub-group */
    MPI_Comm   interComm;    /* inter-communicator */
    int masterworker;
    int rank, size;
    const int nelem = 6;
    int rootrank;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    masterworker = (rank == 0 ? 0 : 1);
    MPI_Comm_split(MPI_COMM_WORLD, masterworker, rank, &localComm);

    if (masterworker == 0)
    {
        MPI_Intercomm_create( localComm, 0, MPI_COMM_WORLD, 1, 1, &interComm);
        rootrank = ( rank == 0 ? MPI_ROOT : MPI_PROC_NULL );
    }
    else {
        MPI_Intercomm_create( localComm, 0, MPI_COMM_WORLD, 0, 1, &interComm);
        rootrank = 0;
    }

    int **sendbuff = new int*[size-1];
    int *recvbuff  = new int[nelem];

    if (rank == 0) {

        sendbuff[0] = new int[nelem * (size-1)];
        for (int i=1; i<size-1; i++)
            sendbuff[i] = &(sendbuff[0][nelem*i]);

        for (int i=0; i<size-1; i++)
            for (int j=0; j<nelem; j++)
                sendbuff[i][j] = i;
    }

    MPI_Scatter(sendbuff[0], nelem, MPI_INT, recvbuff, nelem, MPI_INT, rootrank, interComm);

    if (masterworker == 1) {
        std::cout << "Scatter: [ " << rank << "]: ";
        for (int i=0; i<nelem; i++)
            std::cout << recvbuff[i] << " ";
        std::cout << std::endl;

        for (int i=0; i<nelem; i++)
            recvbuff[i] *= recvbuff[i];
    }

    MPI_Gather(recvbuff, nelem, MPI_INT, sendbuff[0], nelem, MPI_INT, rootrank, interComm);
    if (masterworker == 0) {
        for (int j=0; j<size-1; j++) {
            std::cout << "Gather: [ " << j << "]: ";
            for (int i=0; i<nelem; i++)
                    std::cout << sendbuff[j][i] << " ";
            std::cout << std::endl;
        }
    }




    MPI_Comm_free(&interComm);
    MPI_Comm_free(&localComm);
    delete [] recvbuff;
    if (rank == 0)
        delete [] sendbuff[0];
    delete [] sendbuff;

    MPI_Finalize();
}

同样,编译和运行给出:

$ mpic++ -o intercomm intercomm.cxx
$ mpirun -np 4 ./intercomm
Scatter: [ 1]: 0 0 0 0 0 0
Scatter: [ 2]: 1 1 1 1 1 1
Scatter: [ 3]: 2 2 2 2 2 2
Gather: [ 0]: 0 0 0 0 0 0
Gather: [ 1]: 1 1 1 1 1 1
Gather: [ 2]: 4 4 4 4 4 4

或者,如果您不想弄乱内部通信器,只需在 sendbuff 中为等级 0 保留一行虚拟数据,以便正常的散布正常工作:

#include <iostream>
#include <mpi.h>

int main(int argc, char **argv)
{
    int rank, size;
    const int nelem = 6;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    int **sendbuff = new int*[size];
    int *recvbuff  = new int[nelem];

    if (rank == 0) {
        sendbuff[0] = new int[nelem * size];
        for (int i=0; i<size; i++)
            sendbuff[i] = &(sendbuff[0][nelem*i]);

        for (int i=0; i<size; i++)
            for (int j=0; j<nelem; j++)
                sendbuff[i][j] = i-1;
    }

    MPI_Scatter(sendbuff[0], nelem, MPI_INT, recvbuff, nelem, MPI_INT, 0, MPI_COMM_WORLD);

    if (rank != 0) {
        std::cout << "Scatter: [ " << rank << "]: ";
        for (int i=0; i<nelem; i++)
            std::cout << recvbuff[i] << " ";
        std::cout << std::endl;

        for (int i=0; i<nelem; i++)
            recvbuff[i] *= recvbuff[i];
    }

    MPI_Gather(recvbuff, nelem, MPI_INT, sendbuff[0], nelem, MPI_INT, 0, MPI_COMM_WORLD);
    if (rank == 0) {
        for (int j=1; j<size; j++) {
            std::cout << "Gather: [ " << j << "]: ";
            for (int i=0; i<nelem; i++)
                    std::cout << sendbuff[j][i] << " ";
            std::cout << std::endl;
        }
    }

    delete [] recvbuff;
    if (rank == 0)
        delete [] sendbuff[0];
    delete [] sendbuff;

    MPI_Finalize();
}

再次编译运行得到:

$ mpic++ -o intercomm intercomm.cxx
$ mpirun -np 4 ./intercomm
Scatter: [ 2]: 1 1 1 1 1 1
Scatter: [ 1]: 0 0 0 0 0 0
Scatter: [ 3]: 2 2 2 2 2 2
Gather: [ 1]: 0 0 0 0 0 0
Gather: [ 2]: 1 1 1 1 1 1
Gather: [ 3]: 4 4 4 4 4 4

关于c++ - 将数组分散到仅工作任务,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/15589000/

相关文章:

c++ - 使用 Index 运算符插入带有指向 std::map 的指针的元素

c# - 你推荐 Native C++ 到 C++\CLI 的转变吗?

c++ - 如何在 C++ 中返回 Null 作为引用?

c - 用于 MPI_FILE_OPEN 的 MPI 感知 mkstemp(3)?

arrays - MPI - 发送数组段

c - 高斯消除的内存管理

c++ - 在模板化函数中使用 unique_ptr<int[]>、vector<int> 和 int[]

javascript - NVD3 javascript : add colors to points in a scatter plot

R - 差异散点图