c - MPI_Scatter and Gather - 二维数组,不均匀 block

标签 c arrays matrix mpi distributed-computing

我正在使用 MPI,我尝试将不均匀的二维数组 block 发送到不同的处理器。 例如,如果我没有 squere 大小为 333x225 的图像,并且我想将不同大小的 block 发送到不同的处理器。

我见过@Jonathan Dursi 偶数数组的方法: sending blocks of 2D array in C using MPI

我试着让它适应我的问题。到目前为止,我设法像这样将数据 block 发送到两个进程:

#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "mpi.h"

int malloc2dchar(char ***array, int n, int m) {

    /* allocate the n*m contiguous items */
    char *p = (char *)malloc(n*m*sizeof(char));
    if (!p) return -1;

    /* allocate the row pointers into the memory */
    (*array) = (char **)malloc(n*sizeof(char*));
    if (!(*array)) {
       free(p);
       return -1;
    }

    /* set up the pointers into the contiguous memory */
    for (int i=0; i<n; i++)
       (*array)[i] = &(p[i*m]);

    return 0;
}

int free2dchar(char ***array) {
    /* free the memory - the first element of the array is at the start */
    free(&((*array)[0][0]));

    /* free the pointers into the memory */
    free(*array);

    return 0;
}

int main(int argc, char **argv) {
    char **global, **local;
    const int gridsize=10; // size of grid
    const int procgridsize=2;  // size of process grid
    int rank, size;        // rank of current process and no. of processes

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (rank == 0) {
        /* fill in the array, and print it */
        malloc2dchar(&global, gridsize, gridsize);
        for (int i=0; i<gridsize; i++) {
            for (int j=0; j<gridsize; j++)
                global[i][j] = '0'+(3*i+j)%10;
        }


        printf("Global array is:\n");
        for (int i=0; i<gridsize; i++) {
            for (int j=0; j<gridsize; j++)
                putchar(global[i][j]);

            printf("\n");
        }
    }

    /* create the local array which we'll process */

    malloc2dchar(&local, 5, 10);

    /* create a datatype to describe the subarrays of the global array */

    int sizes[2]    = {gridsize, gridsize};         /* global size */
    int subsizes[2] = {5, 10};     /* local size */
    int starts[2]   = {0,0};                        /* where this one starts */
    MPI_Datatype type, subarrtype;
    MPI_Type_create_subarray(2, sizes, subsizes, starts, MPI_ORDER_C, MPI_CHAR, &type);
    MPI_Type_create_resized(type, 0, 10*sizeof(char), &subarrtype);
    MPI_Type_commit(&subarrtype);

    char *globalptr=NULL;
    if (rank == 0) globalptr = &(global[0][0]);

    /* scatter the array to all processors */
    int sendcounts[2];
    int displs[2];

    if (rank == 0) {
        for (int i=0; i<2; i++) sendcounts[i] = 1;
        int disp = 0;
        displs[0]=0;
        displs[1]=5;

        //for (int i=0; i<procgridsize; i++) {
        //    for (int j=0; j<procgridsize; j++) {
        //        displs[i*procgridsize+j] = disp;
        //        disp += 1;
        //    }
        //    disp += ((gridsize/procgridsize)-1)*procgridsize;
        //}
    }


    MPI_Scatterv(globalptr, sendcounts, displs, subarrtype, &(local[0][0]),
                 gridsize*gridsize/2, MPI_CHAR,
                 0, MPI_COMM_WORLD);

    /* now all processors print their local data: */

    for (int p=0; p<size; p++) {
        if (rank == p) {
            printf("Local process on rank %d is:\n", rank);
            for (int i=0; i<5; i++) {
                putchar('|');
                for (int j=0; j<10; j++) {
                    putchar(local[i][j]);
                }
                printf("|\n");
            }
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }

    /* now each processor has its local array, and can process it */
    for (int i=0; i<5; i++) {
        for (int j=0; j<10; j++) {
            local[i][j] = 'A' + rank;
        }
    }

    /* it all goes back to process 0 */
    MPI_Gatherv(&(local[0][0]), gridsize*gridsize/2,  MPI_CHAR,
                 globalptr, sendcounts, displs, subarrtype,
                 0, MPI_COMM_WORLD);

    /* don't need the local data anymore */
    free2dchar(&local);

    /* or the MPI data type */
    MPI_Type_free(&subarrtype);

    if (rank == 0) {
        printf("Processed grid:\n");
        for (int i=0; i<gridsize; i++) {
            for (int j=0; j<gridsize; j++) {
                putchar(global[i][j]);
            }
            printf("\n");
        }

        free2dchar(&global);
    }


    MPI_Finalize();

    return 0;
}

所以我得到:

Global array is:
0123456789
3456789012
6789012345
9012345678
2345678901
5678901234
8901234567
1234567890
4567890123
7890123456

Local process on rank 0 is:
|0123456789|
|3456789012|
|6789012345|
|9012345678|
|2345678901|

Local process on rank 1 is:
|5678901234|
|8901234567|
|1234567890|
|4567890123|
|7890123456|

Processed grid:
AAAAAAAAAA
AAAAAAAAAA
AAAAAAAAAA
AAAAAAAAAA
AAAAAAAAAA
BBBBBBBBBB
BBBBBBBBBB
BBBBBBBBBB
BBBBBBBBBB
BBBBBBBBBB

但我希望数据是这样的(甚至不是 block ):

    AAAAAAAAAA
    AAAAAAAAAA
    AAAAAAAAAA
    AAAAAAAAAA
    AAAAAAAAAA
    AAAAAAAAAA
    BBBBBBBBBB
    BBBBBBBBBB
    BBBBBBBBBB
    BBBBBBBBBB

更新

我尝试根据进程级别设置 tab_size。但它并不能完全正常工作。

代码如下:

#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "mpi.h"

int malloc2dchar(char ***array, int n, int m) {

    /* allocate the n*m contiguous items */
    char *p = (char *)malloc(n*m*sizeof(char));
    if (!p) return -1;

    /* allocate the row pointers into the memory */
    (*array) = (char **)malloc(n*sizeof(char*));
    if (!(*array)) {
       free(p);
       return -1;
    }

    /* set up the pointers into the contiguous memory */
    for (int i=0; i<n; i++)
       (*array)[i] = &(p[i*m]);

    return 0;
}

int free2dchar(char ***array) {
    /* free the memory - the first element of the array is at the start */
    free(&((*array)[0][0]));

    /* free the pointers into the memory */
    free(*array);

    return 0;
}

int main(int argc, char **argv) {
    char **global, **local;
    const int gridsize=10; // size of grid
    const int procgridsize=2;  // size of process grid
    int rank, size;        // rank of current process and no. of processes

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);


    //if (size != procgridsize*procgridsize) {
    //    fprintf(stderr,"%s: Only works with np=%d for now\n", argv[0], procgridsize);
    //    MPI_Abort(MPI_COMM_WORLD,1);
    //}

    int tab_size;
    if (rank == 0) {
        /* fill in the array, and print it */
        malloc2dchar(&global, gridsize, gridsize);
        for (int i=0; i<gridsize; i++) {
            for (int j=0; j<gridsize; j++)
                global[i][j] = '0'+(3*i+j)%10;
        }


        printf("Global array is:\n");
        for (int i=0; i<gridsize; i++) {
            for (int j=0; j<gridsize; j++)
                putchar(global[i][j]);

            printf("\n");
        }
        tab_size = 4;
    }
    if(rank == 1)
    {
        tab_size = 6;
    }

    /* create the local array which we'll process */

    malloc2dchar(&local, tab_size, 10);

    /* create a datatype to describe the subarrays of the global array */

    int sizes[2]    = {gridsize, gridsize};         /* global size */
    int subsizes[2] = {tab_size, 10};     /* local size */
    int starts[2]   = {0,0};                        /* where this one starts */
    MPI_Datatype type, subarrtype;
    MPI_Type_create_subarray(2, sizes, subsizes, starts, MPI_ORDER_C, MPI_CHAR, &type);
    MPI_Type_create_resized(type, 0, 10*sizeof(char), &subarrtype);
    MPI_Type_commit(&subarrtype);

    char *globalptr=NULL;
    if (rank == 0) globalptr = &(global[0][0]);

    /* scatter the array to all processors */
    int sendcounts[2];
    int displs[2];

    int tabsize;
    if (rank == 0) {
        for (int i=0; i<2; i++) sendcounts[i] = 1;
        int disp = 0;
        displs[0]=0;
        displs[1]=tab_size;

        //for (int i=0; i<procgridsize; i++) {
        //    for (int j=0; j<procgridsize; j++) {
        //        displs[i*procgridsize+j] = disp;
        //        disp += 1;
        //    }
        //    disp += ((gridsize/procgridsize)-1)*procgridsize;
        //}
    }


    MPI_Scatterv(globalptr, sendcounts, displs, subarrtype, &(local[0][0]),
                 gridsize*gridsize/2, MPI_CHAR,
                 0, MPI_COMM_WORLD);

    /* now all processors print their local data: */

    for (int p=0; p<size; p++) {
        if (rank == p) {
            printf("Local process on rank %d is:\n", rank);
            for (int i=0; i<tab_size; i++) {
                putchar('|');
                for (int j=0; j<10; j++) {
                    putchar(local[i][j]);
                }
                printf("|\n");
            }
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }

    /* now each processor has its local array, and can process it */
    for (int i=0; i<tab_size; i++) {
        for (int j=0; j<10; j++) {
            local[i][j] = 'A' + rank;
        }
    }

    /* it all goes back to process 0 */
    MPI_Gatherv(&(local[0][0]), gridsize*gridsize/2,  MPI_CHAR,
                 globalptr, sendcounts, displs, subarrtype,
                 0, MPI_COMM_WORLD);

    /* don't need the local data anymore */
    free2dchar(&local);

    /* or the MPI data type */
    MPI_Type_free(&subarrtype);

    if (rank == 0) {
        printf("Processed grid:\n");
        for (int i=0; i<gridsize; i++) {
            for (int j=0; j<gridsize; j++) {
                putchar(global[i][j]);
            }
            printf("\n");
        }

        free2dchar(&global);
    }


    MPI_Finalize();

    return 0;
}

输出看起来像这样:

Global array is:
0123456789
3456789012
6789012345
9012345678
2345678901
5678901234
8901234567
1234567890
4567890123
7890123456
Local process on rank 0 is:
|0123456789|
|3456789012|
|6789012345|
|9012345678|
Local process on rank 1 is:
|2345678901|
|5678901234|
|8901234567|
|1234567890|
||
||
[blade001:3727] *** An error occurred in MPI_Gatherv
[blade001:3727] *** reported by process [2497249281,0]
[blade001:3727] *** on communicator MPI_COMM_WORLD
[blade001:3727] *** MPI_ERR_TRUNCATE: message truncated
[blade001:3727] *** MPI_ERRORS_ARE_FATAL (processes in this communicator will now abort,
[blade001:3727] ***    and potentially your MPI job)

最佳答案

为什么你的代码错了

您定义了一个数据类型,该数据类型在不同的级别上应该是相同的。这不是它完成的方式。

如何正确地做你想做的事

正如您所描述的,按完整行分解连续数据要简单得多。不需要复杂的派生数据类型,事实上您根本不需要它们。您可以使用非常简单的数据类型来表示一行。然后唯一的任务是正确设置 MPI_Scatterv 的大小/位移:

int local_rows[2] = {6, 4};

malloc2dchar(&local, local_rows[rank], gridsize);

MPI_Datatype row_type;
MPI_Type_contiguous(gridsize, MPI_CHAR, &row_type);
MPI_Type_commit(&row_type);

int displs[2];

if (rank == 0) {
  displs[0] = 0;
  for (int r = 1; r < 2; r++) {
    displs[r] = displs[r - 1] + local_rows[r - 1];
  }
}

MPI_Scatterv(globalptr, local_rows, displs, row_type, &(local[0][0]),
             local_rows[rank], row_type, 0, MPI_COMM_WORLD);

...

MPI_Gatherv(&(local[0][0]), local_rows[rank], row_type, globalptr, local_rows,
            displs, row_type, 0, MPI_COMM_WORLD);

这假设预期的尺寸 {6, 4} 为所有等级所知。您可以让每个人都确定地计算它,或者只让根计算并分散它(非根等级只需要知道他们自己的行数)。

真正的不规则二维分解

如果你真的想拆分出不仅由整行组成的 block ,它会变得更加复杂。有 a very good answer已经讲过了,这里就不重复了。请务必仔细阅读并严格遵守。

由于复杂性,我建议仅在您绝对确定需要时才这样做。

重叠

您不能发送具有单个散点的重叠数据。如果您需要重叠,请考虑在拥有 halo exchange 范围内的相邻进程之间直接交换数据。 .

关于c - MPI_Scatter and Gather - 二维数组,不均匀 block ,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/43023845/

相关文章:

c - 如何衡量我的代码质量?

python - 操作系统 X : python ctypes global variables

c - 用于更新指向未知维度的二维数组的指针的原型(prototype)

将日期数组转换为整数

c - 在shell中运行C程序时将相对路径扩展为完整路径

c - C中的结构体数组初始化

c++ - 传递数组时类中出现错误指针错误

c++ - 我应该使用简单的类还是高维矩阵?

algorithm - 将线拟合到网格矩阵中 [竞赛,而非硬件]

c - 如何使用 int ** 而不是 int [][] 二维数组