我在使用 MPI 发送和(或)接收动态数组时遇到了很大的问题。下面只是我的代码的一部分,但足以看出我做错了什么。请帮助我,我整晚都在寻找解决方案。 我得到这样的东西: * 检测到 glibc ./mv2.out:munmap_chunk():无效指针:0x0000000000da2a70 检测到 glibc ./mv2.out: malloc(): 内存损坏(快速):0x0000000000da2a50 *
当我用静态数组替换动态数组时,一切正常。
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#define MASTER 0
#define FROM_MASTER 1
#define FROM_WORKER 2
double **alloc_2d_array(int rows, int cols) {
int i;
double *data = (double *)malloc(rows*cols*sizeof(double));
double **array= (double **)malloc(rows*sizeof(double*));
for (i=0; i<rows; i++)
array[i] = &(data[cols*i]);
return array;
}
int main (int argc, char *argv[])
{
int degree,
numtasks,
taskid,
numworkers,
source,
dest,
mtype,
rows,
offset,
averow,
extra,
i, j, k, rc;
MPI_Status status;
double **a, *b, *c;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
if (numtasks < 2 )
{
printf("Aby rozpoczac obliczenia rownolegle potrzeba co najmniej 2 procesow.\n");
MPI_Abort(MPI_COMM_WORLD, rc);
exit(1);
}
numworkers = numtasks-1;
if (taskid == MASTER)
{
printf("Podaj stopien macierzy: \n");
scanf ("%d", °ree);
printf("Obecnie dostepnych jest %d procesow do dyspozycji mastera.\n", numtasks);
FILE *file;
file = fopen("matrix.txt", "r");
if(file == NULL)
{
printf("Nie mozna otworzyc pliku!\n");
MPI_Finalize();
exit(0);
}
a = alloc_2d_array(degree, degree);
b = (double*) malloc(sizeof(double) * degree);
c = (double*) malloc(sizeof(double) * degree);
printf("Tworzenie macierzy z pliku\n");
for(i = 0; i < degree; i++)
for(j = 0; j < degree; j++)
fscanf(file, "%lf", &a[i][j]);
for(i = 0; i < degree; i++)
{
for(j = 0; j < degree; j++)
{
printf("%f", a[i][j]);
}
printf("\n");
}
printf("Tworzenie wektora z pliku\n");
for(i = 0; i < degree; i++)
fscanf(file, "%lf", &b[i]);
for(i = 0; i < degree; i++)
{
printf("%f\n", b[i]);
}
fclose(file);
averow = degree / numworkers;
extra = degree % numworkers;
offset = 0;
mtype = FROM_MASTER;
for (dest = 1; dest <= numworkers; dest++)
{
rows = (dest <= extra) ? (averow + 1) : averow;
printf("Wysylanie %d wierszy do procesu nr %d, z offset'em = %d\n", rows, dest, offset);
MPI_Send(°ree, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&a[offset][0], rows * degree, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&b, degree, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD);
offset = offset + rows;
}
mtype = FROM_WORKER;
for (i=1; i<=numworkers; i++)
{
source = i;
MPI_Recv(&offset, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&c[offset], rows, MPI_DOUBLE, source, mtype, MPI_COMM_WORLD, &status);
printf("Otrzymalem wyniki od procesu nr %d\n", source);
}
printf("***\n");
printf("Wektor wynikowy:\n");
for (i = 0; i < degree; i++)
{
printf("\n%6.2f", c[i]);
}
printf("\n***\n");
printf ("KONIEC\n");
}
if (taskid > MASTER)
{
b = (double*) malloc(sizeof(double) * degree);
c = (double*) malloc(sizeof(double) * degree);
mtype = FROM_MASTER;
MPI_Recv(°ree, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&a, rows * degree, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status); //HERE IS THE PROBLEM
MPI_Recv(&b, degree, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status);
for (i = 0; i < rows; i++)
{
c[i] = 0.0;
for (j = 0; j < degree; j++)
c[i] += a[i][j] * b[j];
}
mtype = FROM_WORKER;
MPI_Send(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
MPI_Send(&c, rows, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD);
}
MPI_Finalize();
}
重现错误的最短版本:
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#define MASTER 0
#define FROM_MASTER 1
#define FROM_WORKER 2
double **alloc_2d_array(int rows, int cols) {
int i;
double *data = (double *)malloc(rows*cols*sizeof(double));
double **array= (double **)malloc(rows*sizeof(double*));
for (i=0; i<rows; i++)
array[i] = &(data[cols*i]);
return array;
}
int main (int argc, char *argv[])
{
int degree,
taskid,
source,
dest,
mtype,
rows,
offset,
i, j;
MPI_Status status;
double **a, *b, *c;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
if (taskid == MASTER)
{
FILE *file;
file = fopen("matrix.txt", "r");
a = alloc_2d_array(degree, degree);
b = (double*) malloc(sizeof(double) * degree);
c = (double*) malloc(sizeof(double) * degree);
for(i = 0; i < degree; i++)
for(j = 0; j < degree; j++)
fscanf(file, "%lf", &a[i][j]);
for(i = 0; i < degree; i++)
fscanf(file, "%lf", &b[i]);
fclose(file);
offset = 0;
mtype = FROM_MASTER;
MPI_Send(°ree, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&a[offset][0], rows * degree, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&b, degree, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD);
}
}
if (taskid > MASTER)
{
a = alloc_2d_array(degree, degree);
b = (double*) malloc(sizeof(double) * degree);
c = (double*) malloc(sizeof(double) * degree);
mtype = FROM_MASTER;
MPI_Recv(°ree, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
// v HERE IS THE PROBLEM v
MPI_Recv(&a, rows * degree, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&b, degree, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status);
}
MPI_Finalize();
}
编辑:没有从 worker 发送到 master 的版本工作正常,但是当我添加响应应用程序时暂停。 在下面的链接中,您可以找到静态数组的示例。当我用动态的替换它们时,应用程序无法正常工作。 https://computing.llnl.gov/tutorials/mpi/samples/C/mpi_mm.c
最佳答案
在这里必须有点通灵,因为您还没有发布可以运行和测试的完整程序(请在以后这样做,例如在这里您没有包含所有需要的#includes 和#defines,并且你也不提供输入文件)。但是据我所知,您在 recv 中的问题有两个方面。首先,您还没有为“工作”进程分配任何内存。其次,您为 a 的 recv 指定的起始地址不正确。这是您的代码的缩减版本,它可以满足您的需求(但我还是在猜测)。另外请注意 C 是我的第二语言,所以请检查我做了什么。不管怎样,它在这里:
ian@ian-pc:~/test$ cat mpi.c
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#define FROM_MASTER 1
#define FROM_WORKER 2
#define MASTER 0
double **alloc_2d_array(int rows, int cols) {
int i;
double *data = (double *)malloc(rows*cols*sizeof(double));
double **array= (double **)malloc(rows*sizeof(double*));
for (i=0; i<rows; i++)
array[i] = &(data[cols*i]);
return array;
}
int main (int argc, char *argv[])
{
int degree, numtasks, taskid, numworkers, dest, mtype, rows, offset, averow, extra, i, j;
MPI_Status status;
double **a;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
if (numtasks < 2 )
{
printf("Aby rozpoczac obliczenia rownolegle potrzeba co najmniej 2 procesow.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
exit(1);
}
numworkers = numtasks-1;
if (taskid == MASTER)
{
printf("Podaj stopien macierzy: \n");
scanf ("%d", °ree);
printf("Obecnie dostepnych jest %d procesow do dyspozycji mastera.\n", numtasks);
a = alloc_2d_array(degree, degree);
printf("Tworzenie macierzy z pliku\n");
for(i = 0; i < degree; i++)
for(j = 0; j < degree; j++)
a[ i ][ j ] = i + 10 * j;
printf( "Initial\n" );
for(i = 0; i < degree; i++) {
for(j = 0; j < degree; j++)
printf("%f ", a[i][j]);
printf("\n");
}
averow = degree / numworkers;
extra = degree % numworkers;
offset = 0;
mtype = FROM_MASTER;
for (dest = 1; dest <= numworkers; dest++)
{
rows = (dest <= extra) ? (averow + 1) : averow;
MPI_Send(°ree, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&(a[offset][0]), rows * degree, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD);
offset = offset + rows;
}
}
/* Attempt to order output, not gauranteed to work */
MPI_Barrier( MPI_COMM_WORLD );
if (taskid > MASTER)
{
mtype = FROM_MASTER;
MPI_Recv(°ree, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
a = alloc_2d_array( rows, degree);
/* MPI_Recv(&a, rows * degree, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status); */
MPI_Recv(&(a[0][0]), rows * degree, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status);
printf( "Final from %d\n", taskid );
for(i = 0; i < rows; i++) {
printf( "%d ", taskid );
for(j = 0; j < degree; j++)
printf("%f ", a[i][j]);
printf("\n");
}
}
MPI_Finalize();
return EXIT_SUCCESS;
}
ian@ian-pc:~/test$ mpicc -std=c89 -Wall -Wextra -pedantic -O mpi.c
In file included from mpi.c:5:0:
/usr/lib/openmpi/include/mpi.h:220:9: warning: ISO C90 does not support ‘long long’ [-Wlong-long]
mpi.c: In function ‘main’:
mpi.c:45:13: warning: ignoring return value of ‘scanf’, declared with attribute warn_unused_result [-Wunused-result]
ian@ian-pc:~/test$ mpirun -np 3 ./a.out
Podaj stopien macierzy:
2
Obecnie dostepnych jest 3 procesow do dyspozycji mastera.
Tworzenie macierzy z pliku
Initial
0.000000 10.000000
1.000000 11.000000
Final from 1
1 0.000000 10.000000
Final from 2
2 1.000000 11.000000
ian@ian-pc:~/test$ mpirun -np 3 ./a.out
Podaj stopien macierzy:
4
Final from 1
1 0.000000 10.000000 20.000000 30.000000
1 1.000000 11.000000 21.000000 31.000000
Final from 2
2 2.000000 12.000000 22.000000 32.000000
2 3.000000 13.000000 23.000000 33.000000
Obecnie dostepnych jest 3 procesow do dyspozycji mastera.
Tworzenie macierzy z pliku
Initial
0.000000 10.000000 20.000000 30.000000
1.000000 11.000000 21.000000 31.000000
2.000000 12.000000 22.000000 32.000000
3.000000 13.000000 23.000000 33.000000
ian@ian-pc:~/test$ mpirun -np 3 ./a.out
Podaj stopien macierzy:
5
Final from 2
2 3.000000 13.000000 23.000000 33.000000 43.000000
2 4.000000 14.000000 24.000000 34.000000 44.000000
Obecnie dostepnych jest 3 procesow do dyspozycji mastera.
Tworzenie macierzy z pliku
Initial
0.000000 10.000000 20.000000 30.000000 40.000000
1.000000 11.000000 21.000000 31.000000 41.000000
2.000000 12.000000 22.000000 32.000000 42.000000
3.000000 13.000000 23.000000 33.000000 43.000000
4.000000 14.000000 24.000000 34.000000 44.000000
Final from 1
1 0.000000 10.000000 20.000000 30.000000 40.000000
1 1.000000 11.000000 21.000000 31.000000 41.000000
1 2.000000 12.000000 22.000000 32.000000 42.000000
ian@ian-pc:~/test$
但是也了解 MPI_Bcast,它在这里很有用......
关于c - 在 MPI 中发送和接收动态数组,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/24950636/