MPI用MPI_Send, MPI_Recv实现MPI_Alltoall的块方法

来源:互联网 发布:获取淘宝买家手机号 编辑:程序博客网 时间:2024/06/03 19:36
用了一个多小时,终于搞定用MPI_Send, MPI_Recv实现MPI_Alltoall,网上的类似的源码都是转置单个元素,想按块转置真不容易。首先科普一下什么是MPI_Alltoall, MPI的n个进程,一个进程有n个块,如果块只包含一个元素,则形成N*N的矩阵,MPI_Alltoall要做的是转置这个矩阵,即第i 个进程的第j块发送到第j个进程的第i块。废话不多,上源码。
#include "mpi.h"#include "stdio.h"int My_Alltoall(void *sendBuffer,int sendcnt,MPI_Datatype sendtype,                 void *receiveBuffer,int recvcnt,MPI_Datatype recvtype,MPI_Comm comm,int rank,int size){  int i,j;  MPI_Status status;  for(i=0;i<size;i++)  {     if(rank==i)     {       MPI_Sendrecv(sendBuffer+(sendcnt*i)*sizeof(sendtype),sendcnt,sendtype,i,99,receiveBuffer+(recvcnt*i)*sizeof(recvtype),recvcnt,recvtype,i,99,comm,&status);     }  }    for(i=0;i<size;i++)  {        if(rank!=i)                {                        MPI_Send(sendBuffer+(sendcnt*i)*sizeof(sendtype),sendcnt,sendtype,i,i,comm);                }  }    for(i=0;i<size;i++)  {        if(rank!=i)                {                        MPI_Recv(receiveBuffer+(recvcnt*i)*sizeof(recvtype),recvcnt,recvtype,i,rank,comm,&status);                }  }return 1;}int main (int argc, char *argv[]){  int i, myrank, size;  int *send_buffer;  int *recv_buffer;  MPI_Init (&argc, &argv);  MPI_Comm_rank (MPI_COMM_WORLD, &myrank);  MPI_Comm_size (MPI_COMM_WORLD, &size);     size = size *2;  send_buffer = (int *) calloc (size, sizeof (int));  recv_buffer = (int *) calloc (size, sizeof (int));  for (i = 0; i < size; i++)    send_buffer[i] = i+myrank*size;  My_Alltoall (send_buffer, 2, MPI_INT,                recv_buffer, 2, MPI_INT, MPI_COMM_WORLD,myrank,size/2);  for(i=0;i<size;i++){      printf("myrank=%d,sendbuf[%d]=%d\n",myrank,i,send_buffer[i]);  }  for (i = 0; i < size; i++)    printf ("myrank = %d, recv_buffer[%d] = %d\n", myrank, i, recv_buffer[i]);  free (recv_buffer);  free (send_buffer);  MPI_Finalize ();  return 0;}


0 0
原创粉丝点击