0% found this document useful (0 votes)
5 views5 pages

Lab 7

Uploaded by

Kuladeep P
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
5 views5 pages

Lab 7

Uploaded by

Kuladeep P
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 5

High Performance Computing - Lab7:

Name: Raja Vignesh Selvamani


Student ID: 202103607

Question 1: Complete the above 4 examples with the required output


Code : data-type 1)
#include "mpi.h"
#include <stdio.h>
#define SIZE 4

int main(int argc, char *argv[]) {


int numtasks, rank, source=0, dest, tag=1, i;
float a[SIZE][SIZE] =
{1.0, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0,
13.0, 14.0, 15.0, 16.0};
float b[SIZE];
MPI_Status stat;
MPI_Datatype rowtype; // required variable
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
// create contiguous derived data type
MPI_Type_contiguous(SIZE, MPI_FLOAT, &rowtype);
MPI_Type_commit(&rowtype);

if (numtasks == SIZE) {
// task 0 sends one element of rowtype to all tasks
if (rank == 0) {
for (i = 0; i < numtasks; i++) {
MPI_Send(&a[i][0], 1, rowtype, i, tag, MPI_COMM_WORLD);
}
}

// all tasks receive rowtype data from task 0


MPI_Recv(b, SIZE, MPI_FLOAT, source, tag, MPI_COMM_WORLD, &stat);
printf("rank= %d b= %3.1f %3.1f %3.1f %3.1f\n",
rank, b[0], b[1], b[2], b[3]);
}
else
printf("Must specify %d processors. Terminating.\n", SIZE);

// free datatype when done using it


MPI_Type_free(&rowtype);
MPI_Finalize();
return 0;
}
Code: Data-type 2)
#include "mpi.h"
#include <stdio.h>
#define SIZE 4

int main(int argc, char *argv[]) {


int numtasks, rank, source=0, dest, tag=1, i;
float a[SIZE][SIZE] =
{1.0, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0,
13.0, 14.0, 15.0, 16.0};
float b[SIZE];

MPI_Status stat;
MPI_Datatype columntype; // required variable

MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);

// create vector derived data type


MPI_Type_vector(SIZE, 1, SIZE, MPI_FLOAT, &columntype);
MPI_Type_commit(&columntype);

if (numtasks == SIZE) {
// task 0 sends one element of columntype to all tasks
if (rank == 0) {
for (i = 0; i < numtasks; i++)
MPI_Send(&a[0][i], 1, columntype, i, tag, MPI_COMM_WORLD);
}

// all tasks receive columntype data from task 0


MPI_Recv(b, SIZE, MPI_FLOAT, source, tag, MPI_COMM_WORLD, &stat);
printf("rank= %d b= %3.1f %3.1f %3.1f %3.1f\n",
rank, b[0], b[1], b[2], b[3]);
}
else
printf("Must specify %d processors. Terminating.\n", SIZE);

// free datatype when done using it


MPI_Type_free(&columntype);
MPI_Finalize();
return 0;
}

Code: Data-type 3)
#include "mpi.h"
#include <stdio.h>
#define NELEMENTS 6
int main(int argc, char *argv[]) {
int numtasks, rank, source=0, dest, tag=1, i;
int blocklengths[2], displacements[2];
float a[16] =
{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0};
float b[NELEMENTS];

MPI_Status stat;
MPI_Datatype indextype; // required variable

MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);

blocklengths[0] = 4;
blocklengths[1] = 2;
displacements[0] = 5;
displacements[1] = 12;

// create indexed derived data type


MPI_Type_indexed(2, blocklengths, displacements, MPI_FLOAT, &indextype);
MPI_Type_commit(&indextype);

if (rank == 0) {
for (i = 0; i < numtasks; i++)
// task 0 sends one element of indextype to all tasks
MPI_Send(&a[0], 1, indextype, i, tag, MPI_COMM_WORLD);
}

// all tasks receive indextype data from task 0


MPI_Recv(b, NELEMENTS, MPI_FLOAT, source, tag, MPI_COMM_WORLD, &stat);
printf("rank= %d b= %3.1f %3.1f %3.1f %3.1f %3.1f %3.1f\n",
rank, b[0], b[1], b[2], b[3], b[4], b[5]);

// free datatype when done using it


MPI_Type_free(&indextype);
MPI_Finalize();
return 0;
}

Question 2) Can you think about howto use the above 4 derived data types to represent
themain diagonal elements(1.0, 6.0, 11.0, 16.0) of the above 4x4 matrix and distributethem
allprocesses.?

#include "mpi.h"
#include <stdio.h>
#define SIZE 4

int main(int argc, char *argv[]) {


int numtasks, rank, source=0, dest, tag=1, i;
float a[SIZE][SIZE] =
{1.0, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0,
13.0, 14.0, 15.0, 16.0};
float diagonal[SIZE];

MPI_Status stat;
MPI_Datatype rowtype, columntype, indextype, particletype;

MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);

// Create derived data types


MPI_Type_contiguous(SIZE, MPI_FLOAT, &rowtype);
MPI_Type_commit(&rowtype);

MPI_Type_vector(SIZE, 1, SIZE, MPI_FLOAT, &columntype);


MPI_Type_commit(&columntype);

int blocklengths[2] = {1, 1};


MPI_Aint offsets[2];
MPI_Datatype types[2] = {MPI_FLOAT, MPI_INT};
MPI_Type_extent(MPI_FLOAT, &offsets[1]);
offsets[0] = 0;
offsets[1] *= 4; // Size of MPI_FLOAT

MPI_Type_struct(2, blocklengths, offsets, types, &indextype);


MPI_Type_commit(&indextype);

typedef struct {
float value;
} DiagonalElement;
MPI_Datatype diagnostictype;
MPI_Type_contiguous(1, MPI_FLOAT, &diagnostictype);
MPI_Type_commit(&diagnostictype);

if (rank == 0) {
diagonal[0] = a[0][0];
diagonal[1] = a[1][1];
diagonal[2] = a[2][2];
diagonal[3] = a[3][3];

// Distribute diagonals using different derived data types


for (i = 0; i < numtasks; i++) {
MPI_Send(diagonal + i, 1, rowtype, i, tag, MPI_COMM_WORLD);
MPI_Send(diagonal + i, 1, columntype, i, tag, MPI_COMM_WORLD);
MPI_Send(diagonal + i, 1, indextype, i, tag, MPI_COMM_WORLD);
MPI_Send(diagonal + i, 1, diagnostictype, i, tag, MPI_COMM_WORLD);
}
}
MPI_Recv(diagonal, SIZE, MPI_FLOAT, source, tag, MPI_COMM_WORLD, &stat);

// Print received diagonal elements


printf("rank= %d diagonal= %3.1f %3.1f %3.1f %3.1f\n",
rank, diagonal[0], diagonal[1], diagonal[2], diagonal[3]);

// Free data types when done using them


MPI_Type_free(&rowtype);
MPI_Type_free(&columntype);
MPI_Type_free(&indextype);
MPI_Type_free(&diagnostictype);

MPI_Finalize();
return 0;
}

Explanation: The above code represents the diagonal elements from the 4x4
matrix and sends it to all the processors.

You might also like