MAP Lab Completed
MAP Lab Completed
1
EX.NO:1
WRITE A SIMPLE PROGRAM TO DEMONSTRATE AN OPENMP
DATE: FORK-JOIN PARALLELISM.
AIM:
PROGRAM:
#include<stdio.h>
#include <omp.h>
int main(void)
{
printf("Before: total thread number is %d\n", omp_get_num_threads());
#pragmaomp parallel
{
printf("Thread id is %d\n",omp_get_thread_num());
}
printf("After: total thread number is %d\n", omp_get_num_threads());return 0;
}
OUTPUT:
2
Result:
3
CREATE A PROGRAM THAT COMPUTES A SIMPLE
EX.NO:2
MATRIX-VECTOR MULTIPLICATION B=AX,
DATE: EITHER INC/C++. USE OPENMP DIRECTIVES TO
MAKE IT RUN IN PARALLEL.
AIM:
PROGRAM:
#include
<stdio.h>
#include <omp.h>
int main() {
float A[2][2] = {{1,2},{3,4}};
float b[] = {8,10};
float c[2];
int i,j;
4
{
printf("c[%i]=%f \n",i,c[i]);
}
return 0;
}
OUTPUT:
Result:
Thus the program has been executed successfully.
5
CREATE A PROGRAM THAT COMPUTES THE SUM OF
EX.NO:3 ALL THE ELEMENTS IN AN ARRAY A (C/C++). USE
DATE: OPENMPDIRECTIVES TO MAKE IT RUN IN PARALLEL.
AIM:
ALGORITHM:
Step 1: Start
Step 2: Creation of a program for computing the sum of all the elements an array.
Step 3: Input the array elements.
Step4: Process of addition.
Step 5: Print the resultant sum.
Step 6: Stop.
PROGRAM:
#include<omp.h>
#include<bits/stdc++.h>
usingnamespace std;
intmain(){
vector<int>arr{3,1,2,5,4,0};
queue<int> data;
intarr_sum=accumulate(arr.begin(),arr.end(),0);
intarr_size=arr.size();
intnew_data_size, x, y;
for(inti=0;i<arr_size;i++){
data.push(arr[i]);
}
omp_set_num_threads(ceil(arr_size/2));
#pragmaomp parallel
{
#pragmaomp critical
{
6
new_data_size=data.size();
for(int j=1; j<new_data_size; j=j*2){x
=data.front();
data.pop();
y =data.front();
data.pop();
data.push(x+y);
}
}
}
return0;
}
7
OUTPUT:
Array of elements: 1 5 7 9 11
Sum: 33
Result:
8
EX.NO:4 WRITE A SIMPLE PROGRAM DEMONSTRATING MESSAGE-
DATE: PASSING LOGIC USINGOPENMP.
AIM:
To write a simple program demonstrating Message-Passing logic usingOpenMP.
ALGORITHM:
Step 1: Start
Step 2: Creation of simple program demonstrating message-
passing logic.
Step 3: The message creation for transformation across web.
Step 4: Input the message.
Step 5: Process and print the result.
Step 6: Stop
PROGRAM:
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
9
OUTPUT:
Hello World
Result:
10
EX.NO:5 IMPLEMENT THE ALL-PAIRS SHORTEST-PATH PROBLEM
DATE: (FLOYD'S ALGORITHM) USINGOPENMP
AIM:
//Define minimum function that will be used later on to calcualte minimumvalues between
two numbers
#ifndef min
#define min(a,b) (((a) < (b)) ? (a) : (b))#endif
11
{
int nthreads;
int src, dst, middle;
12
}
}
13
double time = omp_get_wtime() - start_time;
printf("Total time for thread %d (in sec):%.2f\n", nthreads, time);
}
return 0;
Output:
Matrix of all pair shortest
path.0 3 4 5 6 7 7
3021344
4201323
5110233
6332021
7423201
7433110
14
Result:
Thus the program has been executed successfully.
15
EX.NO:6 IMPLEMENT A PROGRAM PARALLEL RANDOM
NUMBER GENERATORS USING MONTE CARLO METHODS IN
DATE: OPENMP
AIM:
PROGRAM:
#include<omp.h>
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
16
// lying inside
squareint pSquare = 0;
int i = 0;
// If d is less than or
// equal to 1if
(d <= 1) {
// Increment pCircle by 1
pCircle++;
}
// Increment pSquare by 1 pSquare+
+;
}
}
// Stores the estimated value of PI
double pi = 4.0 * ((double)pCircle / (double)(pSquare));
17
}
// Driver Codeint
main()
{
// Input
int N = 100000;
int K = 8;
// Function call
monteCarlo(N, K);
OUTPUT:
Final Estimation of Pi =3.1320757
Result:
18
EX.NO:7 WRITE A PROGRAM TO DEMONSTRATE MPI-
DATE: BROADCAST-AND-COLLECTIVE-COMMUNICATION IN C
AIM:
To write a program to demonstrate MPI-broadcast-and-collective
communication in C.
ALGORITHM:
Step 1: Start
Step 2: Get the values for broadcasting.
Step 3: Process using MPI-broadcast-and-collective communication
Step 4: Print the output
Step 5: Stop
PROGRAM:
#include<mpi.h>
#include<stdio.h>
intmain(intargc, char**
argv) {int rank;
intbuf;
MPI_Status
status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD,
&rank);
if(rank == 0)
{ buf = 777;
MPI_Bcast(&buf, 1, MPI_INT, 0, MPI_COMM_WORLD);
}
else {
MPI_Recv(&buf, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
printf("rank %d receiving received %d\n", rank, buf);
}
MPI_Finalize();
return0;
}
19
OUTPUT:
Result:
20
EX.NO:8 WRITE A PROGRAM TO DEMONSTRATE MPI-SCATTER-
GATHER-AND-ALL GATHER IN C
DATE:
AIM:
To write a program to demonstrate MPI-scatter-gather-and-all gather.
ALGORITHM:
Step 1: Start
Step 2: Get an array of random numbers as input.
Step 3: Compute the average of array of numbers.
Step 4: Process and print the result.
Step 5: Stop
PROGRAM:
#include <stdio.h>
#include
<stdlib.h>
#include <time.h>
#include <mpi.h>
#include <assert.h>
// Creates an array of random numbers. Each number has a value from 0 - 1float
*create_rand_nums(int num_elements) {
float *rand_nums = (float *)malloc(sizeof(float) * num_elements);
assert(rand_nums != NULL);
int i;
for (i = 0; i<num_elements; i++) { rand_nums[i] =
(rand() / (float)RAND_MAX);
}
return rand_nums;
}
21
int i;
for (i = 0; i<num_elements; i++) {sum
+= array[i];
}
return sum / num_elements;
}
MPI_Init(NULL, NULL);
22
// For each process, create a buffer that will hold a subset of the entire
// array
float *sub_rand_nums = (float *)malloc(sizeof(float) *
num_elements_per_proc);
assert(sub_rand_nums != NULL);
// Scatter the random numbers from the root process to all processes in
// the MPI world
MPI_Scatter(rand_nums, num_elements_per_proc, MPI_FLOAT,
sub_rand_nums,
num_elements_per_proc, MPI_FLOAT, 0, MPI_COMM_WORLD);
// Gather all partial averages down to all the processes float *sub_avgs
= (float *)malloc(sizeof(float) * world_size);assert(sub_avgs != NULL);
MPI_Allgather(&sub_avg, 1, MPI_FLOAT, sub_avgs, 1, MPI_FLOAT,
MPI_COMM_WORLD);
// Clean up
if (world_rank == 0) {
free(rand_nums);
}
23
free(sub_avgs);
free(sub_rand_nums);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
}
OUTPUT:
Result:
24
EX.NO:9 WRITE A PROGRAM TO DEMONSTRATE MPI-SEND-AND-
DATE: RECEIVE IN C
AIM:
Step 5: Stop
PROGRAM:
if (rank == 0)
{
array = malloc (10 * sizeof(int)); // Array of 10 elementsif(!array)
// error checking
{
MPI_Abort (MPI_COMM_WORLD,1);
}
MPI_Send(&array,10,MPI_INT,1,tag,MPI_COMM_WORLD);
}
if (rank == 1)
{
MPI_Recv (&array,10,MPI_INT,0,tag,MPI_COMM_WORLD,&status);
// more code here
}
MPI_Finalize();
25
OUTPUT:
>>> ./run.py send_recv
mpirun –n/2 ./send_recv
Process 1 received number -1 from process 0
Result:
Thus the program has been executed successfully.
26
EX.NO:10 WRITE A PROGRAM TO DEMONSTRATE BY
PERFORMING-PARALLEL-RANK-WITH-MPI IN C
DATE:
AIM:
PROGRAM:
#include <stdio.h>
#include<stdlib.h>
#include <mpi.h>
#include "tmpi_rank.h"
#include <time.h>
int main(int argc, char** argv)
{ MPI_Init(NULL, NULL);
27
// Seed the random number generator to get different results each time
srand(time(NULL) * world_rank);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
}
28
OUTPUT:
>>> ./run.py random_rank Mpirun
–n 4 ./random_rank 100
Rank for 0.242578 on process 0 – 0
Rank for 0.894732 on process 1 – 3
Rank for 0.789463 on process 2 – 2
Rank for 0.684195 on process 3 – 1
Result:
Thus the program has been executed successfully.
29