Linux下MPI并行编程环境搭建配置

来源:互联网 发布:网络用语及其意思 编辑:程序博客网 时间:2024/06/06 20:49

MPI的全称是Message Passing Interface即标准消息传递界面,可以用于并行计算。MPI有多种实现版本,如MPICH, CHIMP以及OPENMPI。这里我们采用MPICH版本。

一、MPICH安装

下载:http://www.mpich.org/static/downloads/3.0.4/mpich-3.0.4.tar.gz

tar -xzvf soft/mpich-3.0.4.tar.gz
cd mpich-3.0.4/
./configure --prefix=/usr/local/mpich
make  

make install

安装后加入环境变量/etc/profile,并执行 source /etc/profile

PATH=$PATH:/usr/local/mpich/bin
MANPATH=$MANPATH:/usr/local/mpich/man
export PATH MANPATH


  • root@nami:~# echo $PATH,查看PATH变量是否发生变化;
  • root@nami:~# ls /usr/local/mpich/bin,查看bin下的可执行程序;
  • root@nami:~# which mpiexec,查看命令是否是我们安装目录下的命令

二、单节点测试

复制源代码包下的examples目录到安装目录下

cp -r examples/ /usr/local/mpich

执行

mpirun -np 10 ./examples/cpi

输出结果如下:

Process 0 of 10 is on server150
Process 9 of 10 is on server150
Process 1 of 10 is on server150
Process 4 of 10 is on server150
Process 5 of 10 is on server150
Process 7 of 10 is on server150
Process 2 of 10 is on server150
Process 3 of 10 is on server150
Process 6 of 10 is on server150
Process 8 of 10 is on server150

pi is approximately 3.1415926544231256, Error is 0.0000000008333325
wall clock time = 0.020644

三、集群配置

1、集群机器上面需要配置ssh登录权限。参考:Hadoop-0.21.0在linux分布式集群配置  中的ssh配置(密钥无密码登录)部分。

2、复制编译程序到其他机器上面

scp -r mpich server140:/usr/local/
scp -r mpich server151:/usr/local/
scp -r mpich server130:/usr/local/
scp -r mpich server143:/usr/local/

同时在每台机器上面相应加入环境变量中。

3、
在/usr/local/mpich 下新建servers文件,内容如下:

server150:2 #run 2 process
server140:2
server130:2
server143:2
server151:2

执行下面命令,并指定servers文件

mpiexec -n 10 -f servers ./examples/cpi

输出

Process 0 of 10 is on server150
Process 1 of 10 is on server150
Process 4 of 10 is on server140
Process 5 of 10 is on server140
Process 6 of 10 is on server143
Process 7 of 10 is on server143
Process 8 of 10 is on server130
Process 9 of 10 is on server130
Process 2 of 10 is on server151
Process 3 of 10 is on server151
pi is approximately 3.1415926544231256, Error is 0.0000000008333325
wall clock time = 0.018768


两个demo程序(windows下的程序)

#include "mpi.h"
#include <iostream>


int main(int argc,  char* argv[])
{
int rank;
int size;
MPI_Init(0,0);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);

std::cout<<"Hello world from process "<<rank<<" of "<<size<<std::endl;


MPI_Finalize();


return 0;
}

编译: mpicxx -o test_mpi test_mpi.cpp

执行:mpirun -n 2 ./test_mpi


2.计算矩阵和

#include "mpi.h"


#include <iostream>
#include <fstream>


int main(int argc, char* argv[])
{
using namespace std;
int rank;
int size;
MPI_Status Status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&size);


const int MAXX=8;
const int MAXY=3; 
const int MAXPROCESSOR=64;
float Data[MAXX][MAXX]; 
int ArraySize[2];
int i,j,k;


if (rank == 0) 
{
ifstream in("input.txt"); 
in>>ArraySize[0]>>ArraySize[1];
for (i=1; i<=ArraySize[0]; i++ ) 
{
for (j=1; j<=ArraySize[1]; j++) 
{
in>>Data[i][j]; 
}
}
}


MPI_Bcast(ArraySize,2,MPI_INT,0,MPI_COMM_WORLD);
int AverageLineNumber,HeavyProcessorNumber,MyLineNumber; 
int CurrentLine,StartLine,SendSize; 
float SendArray[MAXX*MAXY];


AverageLineNumber=ArraySize[0] / size;
HeavyProcessorNumber=ArraySize[0] % size; 
if (rank < HeavyProcessorNumber) 
{
MyLineNumber=AverageLineNumber+1;
}
else
{
MyLineNumber=AverageLineNumber;
}


if (rank == 0)
{
CurrentLine=ArraySize[0]; 
for (i=size-1; i >= 0; i--) 
{
SendSize=0; 


if (i < HeavyProcessorNumber) 
StartLine=CurrentLine-AverageLineNumber; 
else 
StartLine=CurrentLine-AverageLineNumber+1; 


for (j=StartLine; j <= CurrentLine; j++) 
for (k=1; k <= ArraySize[1]; k++) 
SendArray[SendSize++]=Data[j][k]; 


if (i != 0) 
MPI_Send(SendArray,SendSize, 
MPI_FLOAT,i,10,MPI_COMM_WORLD); 
CurrentLine=StartLine-1;
}
}
else
{
MPI_Recv(SendArray,MyLineNumber*ArraySize[1], 
    MPI_FLOAT,0,10,MPI_COMM_WORLD,&Status); 
}


float *Sum=new(float); 
*Sum=0; 
for (i=0; i < MyLineNumber*ArraySize[1]; i++) 
*Sum+=SendArray[i]; 
float AllSum[MAXPROCESSOR];
std::cout << "rank:" << rank << " cal sum is " << *Sum << std::endl;
MPI_Gather(Sum,1,MPI_FLOAT,AllSum,1,MPI_FLOAT,0,MPI_COMM_WORLD); 


if (rank == 0)
{
*Sum=0; 
for (i=0; i < size; i++) 
*Sum+=AllSum[i];
cout<<"The Sum of the Array is:"<<*Sum<<endl; 
}

MPI_Finalize(); 
return 0;


0 0
原创粉丝点击