Cluster Lab Session 03
Cluster Lab Session 03
Group Communication
MPI_Bcast(void* buffer,
int count,
MPI_Datatype datatype,
int rootID,
MPI_Comm comm )
On entry (i.e., before the call), only the rootID processor contains the correct
value in the buffer buffer
On exit (i.e., when the call returns - finishes), all processors have a copy
of buffer
Example 01:
MPI_Status stat;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
// ------------------------------------------
// Node 0 obtains the secret number
// ------------------------------------------
if ( myid == 0 )
{
secret_num = atoi(argv[1]);
}
// ------------------------------------------
// Node 0 shares the secret with everybody
// ------------------------------------------
MPI_Bcast (&secret_num, 1, MPI_INT, 0, MPI_COMM_WORLD);
if ( myid == 0 )
{
for( i = 1; i < numprocs; i++)
{
MPI_Recv(buff, 128, MPI_CHAR, i, 0, MPI_COMM_WORLD, &stat);
cout << buff << endl;
}
}
else
{
sprintf(buff, "Processor %d knows the secret code: %d",
myid, secret_num);
MPI_Send(buff, 128, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
}
MPI_Finalize();
MPI_Scatter(void* sendbuf,
int sendcount,
MPI_Datatype sendtype,
void* recvbuf,
int recvcount,
MPI_Datatype recvtype,
int rootID,
MPI_Comm comm)
Example 02:
Processor 0 distributes 2 integer to every processor
Each processor adds the numbers and returns the sum to proc 0
MPI_Status stat;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
if ( myid == 0 )
{
cout << "WE have " << numprocs << " processors" << endl;
// -----------------------------------------------
// Node 0 prepare 2 number for each processor
// [1][2] [3][4] [5][6] .... etc
// -----------------------------------------------
k = 1;
for ( i = 0; i < 2*numprocs; i += 2 )
{
buff[i] = k++;
buff[i+1] = k++;
}
}
// ------------------------------------------
// Node 0 scatter the array to the processors:
// ------------------------------------------
if ( myid == 0 )
{
mysum = recvbuff[0] + recvbuff[1];
cout << "Processor " << myid << ": sum = " << mysum << endl;
MPI_Finalize();
}
MPI_Gather(void* sendbuf,
int sendcount,
MPI_Datatype sendtype,
void* recvbuf,
int recvcount,
MPI_Datatype recvtype,
int rootID,
MPI_Comm comm)
int myid;
int i, k;
int mysum;
MPI_Status stat;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
if ( myid == 0 )
{
cout << "WE have " << numprocs << " processors" << endl;
// -----------------------------------------------
// Node 0 prepare 2 number for each processor
// [1][2] [3][4] [5][6] .... etc
// -----------------------------------------------
k = 1;
for ( i = 0; i < 2*numprocs; i += 2 )
{
buff[i] = k++;
buff[i+1] = k++;
}
}
// ------------------------------------------
// Node 0 scatter the array to the processors:
// ------------------------------------------
// ------------------------------------------
// Node 0 collects the results in "buff":
// ------------------------------------------
MPI_Gather (&mysum, 1, MPI_INT, &buff, 1, MPI_INT, 0, MPI_COMM_WORLD);
// ------------------------------------------
// Node 0 prints result
// ------------------------------------------
if ( myid == 0 )
{
for( i = 0; i < numprocs; i++)
{
cout << "Processor " << i << ": sum = " << buff[i] << endl;
}
}
MPI_Finalize();
}
MPI provides a convenient on the fly gather and compute function to make
the programming easier: MPI_Reduce()....
Syntax of the MPI_Reduce() call:
MPI_Reduce(void* sendbuf,
void* recvbuf,
int recvcount,
MPI_Datatype recvtype,
MPI_Op op,
int rootID,
MPI_Comm comm)
double f(double a)
{
return( 2.0 / sqrt(1 - a*a) );
}
/* =======================
MAIN
======================= */
MPI_Init(&argc,&argv); // Initialize
MPI_Comm_size(MPI_COMM_WORLD, &num_procs); // Get # processors
MPI_Comm_rank(MPI_COMM_WORLD, &myid); // Get my rank (id)
if ( myid == 0 )
N = atoi(argv[1]);
w = 1.0/(double) N;
/* *******************************************************************
*/
mypi = 0.0;
/* *******************************************************************
*/
if ( myid == 0 )
{
cout << "Pi = " << final_pi << endl << endl;
}
MPI_Finalize();
}