0% found this document useful (0 votes)
53 views

1.hello World Programme in Mpi

The document contains code snippets demonstrating the use of MPI (Message Passing Interface) for parallel programming. Specifically, it shows: 1. A basic "Hello World" MPI program to initialize the environment and print process information. 2. Code to distribute an array across processes, have each process calculate a partial sum, and collect the results to find the total sum. 3. Examples of broadcasting a message to all processes using MPI_Bcast and MPI_Ibcast, as well as a custom broadcast function. 4. Code using MPI_Gather to collect arrays distributed across processes. 5. Examples of using MPI_Scatter, MPI_Gather, and MPI_Reduce to distribute data

Uploaded by

Aditya Shetti
Copyright
© © All Rights Reserved
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
53 views

1.hello World Programme in Mpi

The document contains code snippets demonstrating the use of MPI (Message Passing Interface) for parallel programming. Specifically, it shows: 1. A basic "Hello World" MPI program to initialize the environment and print process information. 2. Code to distribute an array across processes, have each process calculate a partial sum, and collect the results to find the total sum. 3. Examples of broadcasting a message to all processes using MPI_Bcast and MPI_Ibcast, as well as a custom broadcast function. 4. Code using MPI_Gather to collect arrays distributed across processes. 5. Examples of using MPI_Scatter, MPI_Gather, and MPI_Reduce to distribute data

Uploaded by

Aditya Shetti
Copyright
© © All Rights Reserved
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 11

1.

hello world programme in mpi


#include <mpi.h>
#include <stdio.h>

int main(int argc, char** argv) {


// Initialize the MPI environment
MPI_Init(NULL, NULL);

// Get the number of processes


int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);

// Get the rank of the process


int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);

// Get the name of the processor


char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);

// Print off a hello world message


printf("Hello world from processor %s, rank %d out of %d processors\n",
processor_name, world_rank, world_size);

// Finalize the MPI environment.


MPI_Finalize();
}

2. bsic sum of 10 elements of arry functions


#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>

// size of array
#define n 10

int a[] = { 10, 21, 32, 43, 54, 65, 76, 87, 98, 109 };

// Temporary array for slave process


int a2[1000];

int main(int argc, char* argv[])


{

int pid, np,


elements_per_process,
n_elements_recieved;
// np -> no. of processes
// pid -> process id
MPI_Status status;

// Creation of parallel processes

MPI_Init(&argc, &argv);

// find out process ID,


// and how many processes were started
MPI_Comm_size(MPI_COMM_WORLD, &np);
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
// master process
if (pid == 0) {
int index, i;
elements_per_process = n / np;

// check if more than 1 processes are run


if (np > 1) {
// distributes the portion of array
// to child processes to calculate
// their partial sums
for (i = 1; i < np - 1; i++) {
index = i * elements_per_process;

MPI_Send(&elements_per_process,
1, MPI_INT, i, 0,
MPI_COMM_WORLD);
MPI_Send(&a[index],
elements_per_process,
MPI_INT, i, 0,
MPI_COMM_WORLD);
}
// last process adds remaining elements
index = i * elements_per_process;
int elements_left = n - index;

MPI_Send(&elements_left,
1, MPI_INT,
i, 0,
MPI_COMM_WORLD);
MPI_Send(&a[index],
elements_left,
MPI_INT, i, 0,
MPI_COMM_WORLD);
}
// master process add its own sub array
int sum = 0;
for (i = 0; i < elements_per_process; i++)
sum += a[i];

// collects partial sums from other processes


int tmp;
for (i = 1; i < np; i++) {
MPI_Recv(&tmp, 1, MPI_INT,
MPI_ANY_SOURCE, 0,
MPI_COMM_WORLD,
&status);
int sender = status.MPI_SOURCE;

sum += tmp;
}

// prints the final sum of array


printf("Sum of array is : %d\n", sum);
}
// slave processes
else {
MPI_Recv(&n_elements_recieved,
1, MPI_INT, 0, 0,
MPI_COMM_WORLD,
&status);

// stores the received array segment


// in local array a2
MPI_Recv(&a2, n_elements_recieved,
MPI_INT, 0, 0,
MPI_COMM_WORLD,
&status);

// calculates its partial sum


int partial_sum = 0;
for (int i = 0; i < n_elements_recieved; i++)
partial_sum += a2[i];

// sends the partial sum to the root process


MPI_Send(&partial_sum, 1, MPI_INT,
0, 0, MPI_COMM_WORLD);
}

// cleans up all MPI state before exit of process


MPI_Finalize();
return 0;
}

3.BRODCSTING TO LL

1.BCST
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<mpi.h>
int main(int argc, char **argv)
{
char messg[20];
int rank,size;
MPI_Status status;
int root = 0;

MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&size);

if(rank == root)
{
strcpy(messg,"HELLO WORLD!");
}
MPI_Bcast(messg,13,MPI_CHAR,root,MPI_COMM_WORLD);
printf("Message from process %d sent by root process %d:
%s\n",rank,root,messg);

MPI_Finalize();
return 0; }

2.BCSTI
For MPI_Ibcast() wait function is used to attain synchronization.
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include "mpi.h"
int main(int argc,char **argv)
{
char messg[20];
int i,rank,size;
MPI_Status status;
MPI_Request request = MPI_REQUEST_NULL;
int root = 0;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD,&size);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
if(rank == root)
{
strcpy(messg,"hello world!");
}
MPI_Ibcast(messg,13,MPI_CHAR,root,MPI_COMM_WORLD,&request);
MPI_Wait(&request,&status);
if(rank == root)
{
strcpy(messg,"What will happen?");
}
printf("Message from process %d sent by root process
%d:%.13s\n",rank,root,messg);
MPI_Finalize();
return 0;
}

2.Use your own function broad_cast with MPI Send and Recv.
#include<stdio.h>
#include<stdlib.h>
#include<mpi.h>
void broad_cast(void *data,int count,MPI_Datatype datatype,int root,MPI_Comm
communicator)
{
int world_rank;
MPI_Comm_rank(communicator,&world_rank);
int world_size;
MPI_Comm_size(communicator,&world_size);
if(world_rank == root)
{
for(int i = 0 ;i < world_size;i++)
{
if(i!=world_rank)
{
MPI_Send(data,count,datatype,i,0,communicator);
}
}

}
else
{
MPI_Recv(data,count,datatype,root,0,communicator,MPI_STATUS_IGNORE);
}
}
int main(int argc, char**argv)
{
MPI_Init(&argc,&argv);
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD,&world_rank);
int data;

if(world_rank == 0)
{
data = 100;
printf("Root proces %d broadcasts data %d\n",world_rank,data);
broad_cast(&data,1,MPI_INT,0,MPI_COMM_WORLD);
}
else {
broad_cast(&data,1,MPI_INT,0,MPI_COMM_WORLD);
printf("Process %d received data %d from root
process\n",world_rank,data);
}

MPI_Finalize();
return 0;
}

Use Gather to get array 0f [30] where p1 has a1[10], p2 has a2[10] and p3 has a3[10].

#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
int main(int argc, char **argv) {

int pid, np;

MPI_Init(&argc,&argv);

MPI_Comm_rank(MPI_COMM_WORLD,&pid);
MPI_Comm_size(MPI_COMM_WORLD,&np);

int rand_nums[10];
int enrand[30];

if(pid != 0) {
int l = sizeof(rand_nums) / sizeof(rand_nums[0]);
for(int i = 0 ; i < l ; i++ )
{
rand_nums[i] = pid * i;
}
printf("Numbers from process %d\n",pid);
for(int i = 0 ; i < l ; i++)
{
printf("%d ",rand_nums[i]);
}
printf("\n");
}
MPI_Gather(&rand_nums,10,MPI_INT,&enrand,10,MPI_INT,0,MPI_COMM_WORLD);
if (pid == 0){

int l = sizeof(enrand) / sizeof(enrand[0]);


printf("GATHERED ELEMENTS\n");
for(int i = 0 ; i < l ; i++)
{
printf("Element at %d : %d\n",i,enrand[i]);
}
}

MPI_Finalize();
}

Mpi sctter nd gther


int globaldata[4];/*wants to declare array this way*/
int localdata;/*without using pointers*/

int i;
if (rank == 0) {

for (i=0; i<size; i++)


globaldata[i] = i;

printf("1. Processor %d has data: ", rank);


for (i=0; i<size; i++)
printf("%d ", globaldata[i]);
printf("\n");
}

MPI_Scatter(globaldata, 1, MPI_INT, &localdata, 1, MPI_INT, 0, MPI_COMM_WORLD);

printf("2. Processor %d has data %d\n", rank, localdata);


localdata= 5;
printf("3. Processor %d now has %d\n", rank, localdata);

MPI_Gather(&localdata, 1, MPI_INT, globaldata, 1, MPI_INT, 0, MPI_COMM_WORLD);

if (rank == 0) {
printf("4. Processor %d has data: ", rank);
for (i=0; i<size; i++)
printf("%d ", globaldata[i]);
printf("\n");
}

MPI_Finalize();
return 0;
}

Use Reduce to compute sum of 10 elements.

#include<stdio.h>
#include<stdlib.h>
#include<mpi.h>
int main(int argc, char **argv) {
int pid,np;

MPI_Init(&argc, &argv);

MPI_Comm_rank(MPI_COMM_WORLD,&pid);
MPI_Comm_size(MPI_COMM_WORLD,&np);

int arr[5];
int ls;
int gs;

if(pid != 0)
{
for(int i = 0 ; i < 5 ; i++)
{
arr[i] = (pid) * 8 % ((i+1)*9);
ls+=arr[i];
}
printf("Elements in process %d\n",pid);
for(int i = 0 ; i < 5 ; i++)
{ printf("%d ",arr[i]);
}
printf("\n");

MPI_Reduce(&ls, &gs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);

if( pid == 0)
{
printf("Total sum of 10 elements : %d\n",gs);
}

MPI_Finalize();
}

Mpi reduce some methods


Predicting pi vlue
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<mpi.h>
int main(int argc,char **argv)
{
int t = 100000;
int p;
double x,y;
int i;
int c = 0;
double pi;
int rt;
int rc;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&p);
for(i = 0 ; i < t ; ++i)
{
x = (double) rand() / RAND_MAX;
y = (double) rand() / RAND_MAX;
if(x*x + y*y <=1)
{
c++;
}
}
MPI_Reduce(&c,&rc,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce(&t,&rt,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD);
if(p == 0)
{
pi = ((double)rc/(double)rt)*4.0;
printf("Estimated pi value: %.4f\n",pi);
printf("Actual pi value: %.4f\n", (22.0/7.0));
}
MPI_Finalize();
return 0;
}

Clculte sum of n prime numbers

#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <mpi.h>
int prime_arr[100];
int s, ts;
int main(int argc, char **argv)
{
int n,i=2;
printf("Enter number of prime numbers: ");
scanf("%d", &n);
int c = 0;

while(c <= n)
{
int f = 0;
for(int j = 2 ; j <= i/2 ; j++)
{
if(i%j == 0)
{
f = 1;
break;
}
}
if(f==0)
{
prime_arr[c++] = i;
}
i++;
}
int p, s;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &p);
MPI_Comm_size(MPI_COMM_WORLD, &s);
for (int m = 0; m < n; m++)
{
ts += prime_arr[m];
}
MPI_Reduce(&ts, &s, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (p == 0)
{
for(int l = 0 ; l < n ; l++)
{
printf("%d\n",prime_arr[l]);
}
printf("Sum of %d prime numbers : %d\n", n, s);
}
return 0;
}

You might also like