Assignments for parallel processing using OpenMPI.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
openmpi-assignments/assignment2.c

107 lines
3.4 KiB

#include "common.h"
#define INPUTFILE "assignment2.in"
int main(int argc, char *argv[]) {
int numProcesses, numRank;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numProcesses);
MPI_Comm_rank(MPI_COMM_WORLD, &numRank);
int *arrMatrixOriginal, *arrIndicesTransp, *arrBlockSizes, *arrBlockIndices;
int numCols, numCount = 0;
if (numRank == 0) {
arrMatrixOriginal = read_matrix_file(INPUTFILE, &numCols, &numCount);
check_parallel_worth(numProcesses, numCount, 2);
arrIndicesTransp = my_malloc(numCount * sizeof(int));
spread_evenly(numProcesses, numCount, 1, &arrBlockIndices, &arrBlockSizes);
}
MPI_Bcast(&numCount, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&numCols, 1, MPI_INT, 0, MPI_COMM_WORLD);
//printf("size %2d, index %2d, buffer %2d, rank %d\n", packet[0], packet[1], packet[CODE_COUNT], numRank);
int arrBlockData[NUM_CODES];
MPI_Recv(arrBlockData, NUM_CODES, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
//We can calculate numRows * numCols = numCount
int numRows = numCount / numCols;
int *arrIndicesSub = my_malloc(numCount * sizeof(int));
//for (int i = arrBlockData[CODE_INDEX]; i < (arrBlockData[CODE_SIZE] + arrBlockData[CODE_INDEX]); i++) {
for (int i = 0; i < arrBlockData[CODE_SIZE]; i++) {
// Only the indices are transposed in parallel
int index = transpose_index(i + arrBlockData[CODE_INDEX], numRows, numCols);
arrIndicesSub[i] = index;
}
//if (numRank == 0)
MPI_Gatherv(arrIndicesSub, arrBlockData[CODE_SIZE], MPI_INT, arrIndicesTransp, arrBlockSizes, arrBlockIndices,
MPI_INT, 0, MPI_COMM_WORLD);
//if (numRank == 0 ) {
//printf("original matrix: \n");
//for (int i = 0; i < numCount; i++) {
//printf("%d ", arrMatrixOriginal[i]);
//if ((i + 1) % numCols == 0)
//printf("\n");
//}
//printf("\n");
//}
if (numRank == 0) {
printf("transposed matrix:");
for (int i = 0; i < numCount; i++) {
if (i % numRows == 0)
printf("\n");
printf("%d ", arrMatrixOriginal[arrIndicesTransp[i]]);
}
printf("\n");
}
MPI_Finalize();
return 0;
}
//int numBlockIndex = 0;
//int numRemainder = numCount % numProcesses;
//arrBlockIndices = my_malloc(numProcesses * sizeof(int));
//arrBlockSizes = my_malloc(numProcesses * sizeof(int));
//for (int i = 0; i < numProcesses; i++) {
//arrBlockSizes[i] = numCount / numProcesses;
//if (numRemainder > 0) {
//arrBlockSizes[i]++;
//numRemainder--;
//}
//arrBlockIndices[i] = numBlockIndex;
//numBlockIndex += arrBlockSizes[i];
//printf("size[%d] = %d\tindex[%d] = %d\n", i, arrBlockSizes[i], i, arrBlockIndices[i]);
//int arrBlockData[NUM_CODES];
//arrBlockData[CODE_SIZE] = arrBlockSizes[i];
//arrBlockData[CODE_INDEX] = arrBlockIndices[i];
//// Send every process starting index of 1D represented matrix and number of
//// succeding indices to calculate transposition
//MPI_Send(arrBlockData, NUM_CODES, MPI_INT, i, 1, MPI_COMM_WORLD);
//}
//for (int i = 0; i < numProcesses; i++) {
//printf("size[%d] = %d\tindex[%d] = %d\n", i, arrBlockSizes[i], i, arrBlockIndices[i]);
//}