HPC Programs
HPC Programs
HPC Programs
1.
#include <iostream>
#include <stdio.h>
#include <omp.h>
/* Main Program */
int main()
{
int NoofRows, NoofCols, Vectorsize, i, j;
float** Matrix, * Vector, * Result, * Checkoutput;
if (NoofCols != Vectorsize) {
printf("Matrix Vector computation cannot be possible \n");
exit(1);
}
/* Dynamic Memory Allocation And Initialization Of Matrix Elements */
printf("\n");
/* vector Initialization */
/* Serial Computation */
free(Vector);
free(Result);
free(Matrix);
free(Checkoutput);
}
2.
#include <iostream>
#include<stdio.h>
#include<omp.h>
/* Main Program */
int main()
{
float* Array, * Check, serial_sum, sum, partialsum;
int array_size, i;
if (array_size <= 0) {
printf("Array Size Should Be Of Positive Value ");
exit(1);
}
/* Dynamic Memory Allocation */
sum = 0.0;
partialsum = 0.0;
serial_sum = 0.0;
/* Serail Calculation */
for (i = 0; i < array_size; i++)
serial_sum = serial_sum + Check[i];
if (serial_sum == sum)
printf("\nThe Serial And Parallel Sums Are Equal\n");
else {
printf("\nThe Serial And Parallel Sums Are UnEqual\n");
exit(1);
}
/* Freeing Memory */
free(Check);
free(Array);
3.
#include <iostream>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define MAX_THREADS 2
int i, j;
double x;
double pi, sum = 0.0;
double start, delta;
sum = 0.0;
double start = omp_get_wtime();
4.
#include <iostream>
#include<stdio.h>
#include<omp.h>
int fib(int n)
{
if (n < 2) return n;
else return fib(n - 1) + fib(n - 2);
}
int main()
{
int fibnumber[100], i, j, n;
printf("Please Enter the series limit\n");
scanf_s("%d", &n);
#pragma omp parallel num_threads(2)
{
#pragma omp critical
if (omp_get_thread_num() == 0)
{
printf("There are %d threads\n", omp_get_num_threads());
printf("Thread %d generating numbers..\n", omp_get_thread_num());
for (i = 0;i < n;i++)
fibnumber[i] = fib(i);
}
else
{
printf("Thread %d Printing numbers..\n", omp_get_thread_num());
for (j = 0;j < n;j++)
printf("%d\t", fibnumber[j]);
}
}
return 0;
}
5.
#include <iostream>
#include <stdio.h>
#include <omp.h>
/* Main Program */
int main()
{
int i, N;
float* array, * check;
/* Size Of An Array */
if (N <= 0) {
printf("Array Size Should Be Of Postive Sign \n");
exit(1);
}
/* Dynamic Memory Allocation */
/* Serial Calculation */
/* Output Checking */
printf("\n");
/* Freeing The Memory */
free(array);
free(check);
}
6.
#include <iostream>
#include <stdio.h>
#include <omp.h>
#define MAXIMUM 65536
/* Main Program */
int main()
{
int* array, i, Noofelements, cur_max, current_value;
if (Noofelements <= 0) {
printf("The array elements cannot be stored\n");
exit(1);
}
/* Dynamic Memory Allocation */
srand(MAXIMUM);
for (i = 0; i < Noofelements; i++)
array[i] = rand();
if (Noofelements == 1) {
printf("The Largest Number In The Array is %d", array[0]);
exit(1);
}
/* OpenMP Parallel For Directive And Critical Section */
cur_max = 0;
#pragma omp parallel for
for (i = 0; i < Noofelements; i = i + 1) {
if (array[i] > cur_max)
#pragma omp critical
if (array[i] > cur_max)
cur_max = array[i];
}
/* Serial Calculation */
current_value = array[0];
for (i = 1; i < Noofelements; i++)
if (array[i] > current_value)
current_value = array[i];
printf("\n");
if (current_value == cur_max)
printf("\nThe Max Value Is Same From Serial And Parallel OpenMP
Directive\n");
else {
printf("\nThe Max Value Is Not Same In Serial And Parallel OpenMP
Directive\n");
exit(1);
}
printf("\n");
free(array);
printf("\nThe Largest Number In The Given Array Is %d\n", cur_max);
}
7.
#include <iostream>
#include<stdio.h>
#include<omp.h>
/* Main Program */
int main()
{
float* array_A, sum, * checkarray, serialsum;
int arraysize, i, k, Noofthreads;
if (arraysize <= 0) {
printf("Positive Number Required\n");
exit(1);
}
/* Dynamic Memory Allocation */
sum = 0.0;
/* Serial Calculation */
serialsum = 0.0;
for (i = 0; i < arraysize; i++)
serialsum = serialsum + array_A[i];
/* Output Checking */
if (serialsum != sum) {
printf("\nThe calculation of array sum is different \n");
exit(1);
}
else
printf("\nThe calculation of array sum is same\n");
free(checkarray);
free(array_A);
8.
#include <stdio.h>
#include <mpi.h>
using namespace std;
int main(int argc, char** argv) {
int mynode, totalnodes;
int sum, startval, endval, accum;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &totalnodes);
MPI_Comm_rank(MPI_COMM_WORLD, &mynode);
sum = 0;
startval = 1000 * mynode / totalnodes + 1;
endval = 1000 * (mynode + 1) / totalnodes;
for (int i = startval;i <= endval;i = i + 1)
sum = sum + i;
if (mynode != 0)
MPI_Send(&sum, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
else
for (int j = 1;j < totalnodes;j = j + 1) {
MPI_Recv(&accum, 1, MPI_INT, j, 1, MPI_COMM_WORLD, &status);
sum = sum + accum;
}
if (mynode == 0)
printf("The sum is %d\n", sum);
MPI_Finalize();
}
9.
#include <iostream>
#include "mpi.h"
#include <math.h>
#include <stdio.h>
void main(int argc, char* argv[])
{
int p, i, Iam, root;
int counts[4] = { 1, 2, 3, 4 };
int displs[4] = { 0, 1, 3, 6 };
char x[10], y[10], a, alphabet;
/*----------------*/
/* initialize MPI */
/*----------------*/
MPI_Init(&argc, &argv);
/*---------------------------*/
/* get the process ID number */
/*---------------------------*/
MPI_Comm_rank(MPI_COMM_WORLD, &Iam);
/*-----------------------------------*/
/* get the size of the process group */
/*-----------------------------------*/
MPI_Comm_size(MPI_COMM_WORLD, &p);
root = 1;
if (Iam == 0) {
printf(" Function Proc Sendbuf Recvbuf\n");
printf(" -------- ---- ------- -------\n");
}
MPI_Barrier(MPI_COMM_WORLD);
alphabet = 'a';
/*-----------------------------------*/
/* MPI_Gather() */
/*-----------------------------------*/
MPI_Barrier(MPI_COMM_WORLD);
/*-----------------------------------*/
/* MPI_Gatherv() */
/*-----------------------------------*/
MPI_Barrier(MPI_COMM_WORLD);
/*-----------------------------------*/
/* MPI_Allgather() */
/*-----------------------------------*/
MPI_Barrier(MPI_COMM_WORLD);
/*-----------------------------------*/
/* MPI_Allgatherv() */
/*-----------------------------------*/
/*-----------------------------------*/
/* MPI_Scatter() */
/*-----------------------------------*/
MPI_Barrier(MPI_COMM_WORLD);
/*-----------------------------------*/
/* MPI_Alltoall() */
/*-----------------------------------*/
MPI_Barrier(MPI_COMM_WORLD);
/*-----------------------------------*/
/* MPI_Reduce() */
/*-----------------------------------*/
MPI_Barrier(MPI_COMM_WORLD);
/*-----------------------------------*/
/* MPI_Allreduce() */
/*-----------------------------------*/
MPI_Barrier(MPI_COMM_WORLD);
/*-----------------------------------*/
/* MPI_Bcast() */
/*-----------------------------------*/
a = ' ';
for (i = 0; i < p; i++) {
x[i] = ' ';
y[i] = ' ';
}
if (Iam == root) {
a = 'b';
x[0] = a;
}
MPI_Bcast(&a, 1, MPI_CHAR, /* buf,count,type */
root, MPI_COMM_WORLD); /* root,comm */
MPI_Barrier(MPI_COMM_WORLD);
/*--------------*/
/* Finalize MPI */
/*--------------*/
MPI_Finalize();
}
10.
#include <iostream>
#include "mpi.h"
#include <stdio.h>
#define SIZE 16
#define UP 0
#define DOWN 1
#define LEFT 2
#define RIGHT 3
MPI_Request reqs[8];
MPI_Status stats[8];
MPI_Comm cartcomm;
/*----------------*/
/* Initialize MPI */
/*----------------*/
MPI_Init(&argc, &argv);
/*-------------------------------------------------------*/
/* Get the size of the MPI_COMM_WORLD communicator group */
/*-------------------------------------------------------*/
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
if (numtasks == SIZE) {
/*---------------------------------------------------------------------*/
/* Make a new communicator to which 2-D Cartesian topology is attached */
/*---------------------------------------------------------------------*/
/*------------------------------------------*/
/* Get my rank in the cartcomm communicator */
/*------------------------------------------*/
MPI_Comm_rank(cartcomm, &rank);
/*--------------------------------------------------------------------*/
/* Determine process coords in cartesian topology given rank in group */
/*--------------------------------------------------------------------*/
/*--------------------------------------------------------------------*/
/* Obtain the shifted source and destination ranks in both directions */
/*--------------------------------------------------------------------*/
MPI_Cart_shift(cartcomm, 0, 1, &nbrs[UP], &nbrs[DOWN]);
MPI_Cart_shift(cartcomm, 1, 1, &nbrs[LEFT], &nbrs[RIGHT]);
outbuf = rank;
/*----------------------------------------------*/
/* send messages to the four adjacent processes */
/*----------------------------------------------*/
/*---------------------------------------------------*/
/* receive messages from the four adjacent processes */
/*---------------------------------------------------*/
/*------------------------------------------------*/
/* Wait for all 8 communication tasks to complete */
/*------------------------------------------------*/
/*--------------*/
/* Finalize MPI */
/*--------------*/
MPI_Finalize();
11.
#include <iostream>
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#define MASTER 0
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
else {
if (taskid == MASTER)
printf("MASTER: Number of MPI tasks is: %d\n", numtasks);
MPI_Get_processor_name(hostname, &len);
printf("Hello from task %d on %s!\n", taskid, hostname);
MPI_Finalize();
12.
#include <iostream>
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#define MASTER 0
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
else {
MPI_Get_processor_name(hostname, &len);
printf("Hello from task %d on %s!\n", taskid, hostname);
if (taskid == MASTER)
printf("MASTER: Number of MPI tasks is: %d\n", numtasks);
MPI_Finalize();
}