100% found this document useful (1 vote)
272 views5 pages

Parallel and Distributed Computing CSE4001 Lab - 4

This document contains C code examples using OpenMP for parallel programming. The first example uses OpenMP to calculate the dot product of two vectors in parallel. The second example implements loop work sharing to calculate the sum of two arrays in parallel. The third example uses OpenMP sections to have different threads calculate the sum and product of two arrays concurrently.

Uploaded by

Sasank Chunduri
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as ODT, PDF, TXT or read online on Scribd
100% found this document useful (1 vote)
272 views5 pages

Parallel and Distributed Computing CSE4001 Lab - 4

This document contains C code examples using OpenMP for parallel programming. The first example uses OpenMP to calculate the dot product of two vectors in parallel. The second example implements loop work sharing to calculate the sum of two arrays in parallel. The third example uses OpenMP sections to have different threads calculate the sum and product of two arrays concurrently.

Uploaded by

Sasank Chunduri
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as ODT, PDF, TXT or read online on Scribd
You are on page 1/ 5

Parallel and Distributed Computing CSE4001

Lab – 4
Sasank Chunduri
17BCE0523

4A. Write a C program using OpenMP to find dot product.

Code:
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include<malloc.h>

#define VECLEN 100


#define NUMTHREADS 8

int main (int argc, char* argv[]) {


int i, tid, len=VECLEN, threads=NUMTHREADS;
double *a, *b;
double sum, psum;

printf("Starting omp_dotprod_openmp. Using %d threads\n",threads);

a = (double*) malloc (len*threads*sizeof(double));


b = (double*) malloc (len*threads*sizeof(double));

for (i=0; i<len*threads; i++) {


a[i]=1.0;
b[i]=a[i];
}
sum = 0.0;

#pragma omp parallel private(i,tid,psum) num_threads(threads)


{
psum = 0.0;
tid = omp_get_thread_num();

#pragma omp for reduction(+:sum)


for (i=0; i<len*threads; i++) {
sum += (a[i] * b[i]);
psum = sum;
}
printf("Thread %d partial sum = %f\n",tid, psum);
}

printf ("Done. OpenMP version: sum = %f \n", sum);

free (a);
free (b);
}
Output:

4B. Write a C program with OpenMP to implement Loop Work Sharing.

Code:
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define CHUNKSIZE 10
#define N 100

int main (int argc, char *argv[])


{
int nthreads, tid, i, chunk;
float a[N], b[N], c[N];

for (i=0; i < N; i++)


a[i] = b[i] = i * 1.0;
chunk = CHUNKSIZE;

#pragma omp parallel shared(a,b,c,nthreads,chunk) private(i,tid)


{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n",tid);

#pragma omp for schedule(dynamic,chunk)


for (i=0; i<N; i++)
{
c[i] = a[i] + b[i];
printf("Thread %d: c[%d]= %f\n",tid,i,c[i]);
}
}
}

Output:
4C. Write a C program with OpenMP to implement section work sharing.

Code:

#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 50

int main (int argc, char *argv[])


{
int i, nthreads, tid;
float a[N], b[N], c[N], d[N];

for (i=0; i<N; i++)


{
a[i] = i * 1.5;
b[i] = i + 22.35;
c[i] = d[i] = 0.0;
}

#pragma omp parallel shared(a,b,c,d,nthreads) private(i,tid)


{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n",tid);

#pragma omp sections nowait


{
#pragma omp section
{
printf("Thread %d doing section 1\n",tid);
for (i=0; i<N; i++)
{
c[i] = a[i] + b[i];
printf("Thread %d: c[%d]= %f\n",tid,i,c[i]);
}
}

#pragma omp section


{
printf("Thread %d doing section 2\n",tid);
for (i=0; i<N; i++)
{
d[i] = a[i] * b[i];
printf("Thread %d: d[%d]= %f\n",tid,i,d[i]);
}
}
}
printf("Thread %d done.\n",tid);
}
}

Output:

You might also like