Mpi Assignment
Mpi Assignment
Assignment 04 – MPI
PRABHAHARAN NM
2021103556
BE CSE N BATCH
1
Q1: Non-blocking Round Robin
Generate a random number from process zero and send that number to 1st process. Send the
received number to 2nd process, while sending the number calculate (rank + received value)
and print(hint:use nonblocking call). Do the same till process 2 but from process 2 send the
value back to process 0 and while sending it to process 0 calculate (rank + received value)
and print.
C PROGRAM : round.c
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <time.h>
int main(int argc, char* argv[]) {
int rank, size;
int number;
int send_to, recv_from;
MPI_Request request;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
send_to = (rank+1)%3; //calculate next rank
recv_from = (rank-1)%3; //calculate previous rank
if (rank == 0) {
srand(time(NULL));
number = rand() % 100; // get a random number between 0 and 99
printf("Process %d generated number %d\n", rank, number);
MPI_Isend(&number, 1, MPI_INT, send_to, 0, MPI_COMM_WORLD, &request);
//send message to next process
}
MPI_Recv(&number, 1, MPI_INT, recv_from, 0, MPI_COMM_WORLD, &status);
//receive from previous process (0 proc)
number += rank; //add rank to number
2
if (rank != 0){
MPI_Isend(&number, 1, MPI_INT, send_to, 0, MPI_COMM_WORLD, &request);
//send to next process (non 0 proc)
}
printf("Process %d received number %d and calculated %d\n", rank, number - rank,
number);
MPI_Wait(&request, &status); //wait for send to complete before exiting
MPI_Finalize();
return 0;
}
round.sh:
#!/bin/sh
#SBATCH -N 8
#SBATCH --ntasks-per-node=48
#SBATCH --time=06:00:00
#SBATCH --job-name=roundrobin
#SBATCH --error=job.%J.err
#SBATCH --output=job.%J.out
#SBATCH --partition=standard
module load compiler/intel/2018.2.199
export I_MPI_FABRICS=shm:dapl
cd $SLURM_SUBMIT_DIR
mpiexec.hydra -n 3 ./roundrobin
output:
mpicc -o round round.c && mpirun -np 3 ./round
Process 1 received num 43 and calculated 44
Process 0 generated num 43
Process 2 received num 44 and calculated 47
Process 0 received num 47 and calculated 47
3
Q2: Send to many
Spawn 5 process,Create an array with 4 elements at process 0, send single element of the
array to every other process (send 0th element of array to process 1, send 1st element of array
to process 2 so on till process 4).
Send2many.c:
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <time.h>
int main(int argc, char* argv[]) {
int rank, size;
int sendbuf[4] = {1,2,3,4}, recvbuf;
MPI_Request request;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size != 5){
printf("Needs 5 processes, found %d.\n",size);
MPI_Finalize();
return 1;
}
if (rank == 0) {
printf("Rank: 0 \t Sent: [1,2,3,4]\n");
}
MPI_Scatter(&sendbuf[-1], 1, MPI_INT, &recvbuf, 1, MPI_INT, 0,
MPI_COMM_WORLD); //sendbuf[-1] to send dummy value to rank 0
if (rank != 0){
printf("Rank: %d \t Received: %d\n",rank,recvbuf); //for printing on the non 0 ranks
}
MPI_Finalize();
4
return 0;
}
Send2many.sh:
#!/bin/sh
#SBATCH -N 8
#SBATCH --ntasks-per-node=48
#SBATCH --time=06:00:00
#SBATCH --job-name=roundrobin
#SBATCH --error=job.%J.err
#SBATCH --output=job.%J.out
#SBATCH --partition=standard
module load compiler/intel/2018.2.199
export I_MPI_FABRICS=shm:dapl
cd $SLURM_SUBMIT_DIR
mpiexec.hydra -n 5 ./sendtomany
Output:
mpicc -o send2many send2many.c && mpirun – np 5 ./send2many
Rank: 0
Sent: [1,2,3,4]
Rank: 1
Received: 1
Rank: 4
Received: 4
Rank: 2
Received: 2
Rank: 3
Received: 3
5
#include <stdlib.h>
#include <mpi.h>
#include <time.h>
int main(int argc, char* argv[]) {
int rank, size;
int sendbuf[4] = {1,2,3,4}, recvbuf;
int sum;
MPI_Request request;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size != 4){
printf("Needs 4 processes, found %d.\n",size);
MPI_Finalize();
return 1;
}
if (rank == 0) {
printf("Rank: 0 \t Sent: [1,2,3,4]\n");
}
MPI_Scatter(&sendbuf[-1], 1, MPI_INT, &recvbuf, 1, MPI_INT, 0,
MPI_COMM_WORLD);
MPI_Reduce(&recvbuf, &sum, 4, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if(rank != 0) {
printf("Rank: %d \t Received: %d\n",rank,recvbuf);
}
if (rank == 0){
printf("Rank: 0 \t Reduced sum: %d\n",sum);
}
MPI_Finalize();
return 0;
6
}
reduce.sh:
#!/bin/sh
#SBATCH -N 8
#SBATCH --ntasks-per-node=48
#SBATCH --time=06:00:00
#SBATCH --job-name=roundrobin
#SBATCH --error=job.%J.err
#SBATCH --output=job.%J.out
#SBATCH --partition=standard
module load compiler/intel/2018.2.199
export I_MPI_FABRICS=shm:dapl
cd $SLURM_SUBMIT_DIR
mpiexec.hydra -n 4 ./reduce
Output:
mpicc -o reduce reduce.c && mpirun -np 4 ./reduce
Rank: 4 Received: 4
Rank: 0 Sent: [1,2,3,4]
Rank: 3 Received: 3
Rank: 1 Received: 1
Rank: 0 Reduced sum: 8