refcatored assignment2 for spread_evenly

master
Peter Babič 9 years ago
parent 535a1622f3
commit 7bcb332ddd
  1. 38
      assignment1.c
  2. 74
      assignment2.c
  3. 30
      common.c
  4. 8
      common.h

@ -3,7 +3,6 @@
#define INPUTFILE "assignment1.in"
int maximum(int n, int *arrNumbers);
void send_block_size(const void *blockSize, int toProcess);
int main(int argc, char *argv[]) {
int numProcesses, numRank;
@ -38,43 +37,21 @@ int main(int argc, char *argv[]) {
checkParallelWorth(numProcesses, sizeBufferUsed);
spread_evenly(numProcesses, sizeBufferUsed, &arrBlockIndices, &arrBlockSizes, &send_block_size);
spread_evenly(numProcesses, sizeBufferUsed, &arrBlockIndices, &arrBlockSizes);
//int numBlockIndex = 0;
//int numRemainder = sizeBufferUsed % numProcesses;
//arrBlockIndices = my_malloc(numProcesses * sizeof(int));
//arrBlockSizes = my_malloc(numProcesses * sizeof(int));
//for (int i = 0; i < numProcesses; i++) {
//arrBlockSizes[i] = sizeBufferUsed / numProcesses;
//if (numRemainder > 0) {
//arrBlockSizes[i]++;
//numRemainder--;
//}
//arrBlockIndices[i] = numBlockIndex;
//numBlockIndex += arrBlockSizes[i];
//// Send block size to every process
//MPI_Send(&arrBlockSizes[i], 1, MPI_INT, i, 1, MPI_COMM_WORLD);
//}
//for (int i = 0; i < numProcesses; i++) {
//printf("size[%d] = %d\tindex[%d] = %d\n", i, arrBlockSizes[i], i, arrBlockIndices[i]);
//}
}
MPI_Bcast(&sizeBufferTotal, 1, MPI_INT, 0, MPI_COMM_WORLD);
int *arrSlice = my_malloc(sizeBufferTotal * sizeof(int));
MPI_Scatterv(arrNumbers, arrBlockSizes, arrBlockIndices, MPI_INT, arrSlice, sizeBufferTotal, MPI_INT, 0, MPI_COMM_WORLD);
int numBlockSize;
MPI_Recv(&numBlockSize, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
//for (int i = 0; i < numBlockSize; i++) {
int arrBlockData[NUM_CODES];
MPI_Recv(arrBlockData, NUM_CODES, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
//for (int i = 0; i < arrBlockData[CODE_SIZE]; i++) {
//printf("rank %d: m[%d] = %d\n", numRank, i, arrSlice[i]);
//}
int numMaximum = maximum(numBlockSize, arrSlice);
int numMaximum = maximum(arrBlockData[CODE_SIZE], arrSlice);
//printf("max in %d is %d\n", numRank, numMaximum);
int *arrMaximums;
if (numRank == 0) {
@ -106,8 +83,5 @@ int maximum(int n, int *arrNumbers) {
return value;
}
//void send_block_size(int blockSize, int toProcess) {
void send_block_size(const void *blockSize, int toProcess) {
MPI_Send(blockSize, 1, MPI_INT, toProcess, 1, MPI_COMM_WORLD);
}

@ -1,11 +1,7 @@
#include "common.h"
#define INPUTFILE "assignment2.in"
#define INPUTFILE "assignment3.in1"
// TODO: define custom structure for this probably
#define CODE_SIZE 0
#define CODE_INDEX 1
#define NUM_CODES 2
int transpose_index(int index, int rows, int cols);
@ -28,33 +24,9 @@ int main(int argc, char *argv[]) {
arrIndicesTransp = my_malloc(numCount * sizeof(int));
int numBlockIndex = 0;
int numRemainder = numCount % numProcesses;
spread_evenly(numProcesses, numCount, &arrBlockIndices, &arrBlockSizes);
arrBlockIndices = my_malloc(numProcesses * sizeof(int));
arrBlockSizes = my_malloc(numProcesses * sizeof(int));
for (int i = 0; i < numProcesses; i++) {
arrBlockSizes[i] = numCount / numProcesses;
if (numRemainder > 0) {
arrBlockSizes[i]++;
numRemainder--;
}
arrBlockIndices[i] = numBlockIndex;
numBlockIndex += arrBlockSizes[i];
int arrTransp[NUM_CODES];
arrTransp[CODE_SIZE] = arrBlockSizes[i];
arrTransp[CODE_INDEX] = arrBlockIndices[i];
// Send every process starting index of 1D represented matrix and number of
// succeding indices to calculate transposition
MPI_Send(arrTransp, NUM_CODES, MPI_INT, i, NUM_CODES, MPI_COMM_WORLD);
}
//for (int i = 0; i < numProcesses; i++) {
//printf("size[%d] = %d\tindex[%d] = %d\n", i, arrBlockSizes[i], i, arrBlockIndices[i]);
//}
}
@ -62,21 +34,21 @@ int main(int argc, char *argv[]) {
MPI_Bcast(&numCols, 1, MPI_INT, 0, MPI_COMM_WORLD);
//printf("size %2d, index %2d, buffer %2d, rank %d\n", packet[0], packet[1], packet[CODE_COUNT], numRank);
int arrTransp[NUM_CODES];
MPI_Recv(arrTransp, NUM_CODES, MPI_INT, 0, NUM_CODES, MPI_COMM_WORLD, &status);
int arrBlockData[NUM_CODES];
MPI_Recv(arrBlockData, NUM_CODES, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
//We can calculate numRows * numCols = numCount
int numRows = numCount / numCols;
int *arrIndicesSub = my_malloc(numCount * sizeof(int));
//for (int i = arrTransp[CODE_INDEX]; i < (arrTransp[CODE_SIZE] + arrTransp[CODE_INDEX]); i++) {
for (int i = 0; i < arrTransp[CODE_SIZE]; i++) {
//for (int i = arrBlockData[CODE_INDEX]; i < (arrBlockData[CODE_SIZE] + arrBlockData[CODE_INDEX]); i++) {
for (int i = 0; i < arrBlockData[CODE_SIZE]; i++) {
// Only the indices are transposed in parallel
int index = transpose_index(i + arrTransp[CODE_INDEX], numRows, numCols);
int index = transpose_index(i + arrBlockData[CODE_INDEX], numRows, numCols);
arrIndicesSub[i] = index;
}
//if (numRank == 0)
MPI_Gatherv(arrIndicesSub, arrTransp[CODE_SIZE], MPI_INT, arrIndicesTransp, arrBlockSizes, arrBlockIndices,
MPI_Gatherv(arrIndicesSub, arrBlockData[CODE_SIZE], MPI_INT, arrIndicesTransp, arrBlockSizes, arrBlockIndices,
MPI_INT, 0, MPI_COMM_WORLD);
//if (numRank == 0 ) {
@ -109,3 +81,33 @@ int transpose_index(int index, int rows, int cols) {
return (index / rows) + cols * (index % rows);
}
//int numBlockIndex = 0;
//int numRemainder = numCount % numProcesses;
//arrBlockIndices = my_malloc(numProcesses * sizeof(int));
//arrBlockSizes = my_malloc(numProcesses * sizeof(int));
//for (int i = 0; i < numProcesses; i++) {
//arrBlockSizes[i] = numCount / numProcesses;
//if (numRemainder > 0) {
//arrBlockSizes[i]++;
//numRemainder--;
//}
//arrBlockIndices[i] = numBlockIndex;
//numBlockIndex += arrBlockSizes[i];
//printf("size[%d] = %d\tindex[%d] = %d\n", i, arrBlockSizes[i], i, arrBlockIndices[i]);
//int arrBlockData[NUM_CODES];
//arrBlockData[CODE_SIZE] = arrBlockSizes[i];
//arrBlockData[CODE_INDEX] = arrBlockIndices[i];
//// Send every process starting index of 1D represented matrix and number of
//// succeding indices to calculate transposition
//MPI_Send(arrBlockData, NUM_CODES, MPI_INT, i, 1, MPI_COMM_WORLD);
//}
//for (int i = 0; i < numProcesses; i++) {
//printf("size[%d] = %d\tindex[%d] = %d\n", i, arrBlockSizes[i], i, arrBlockIndices[i]);
//}

@ -38,17 +38,17 @@ FILE *my_fopen(const char *filename, const char *mode) {
return file;
}
void *buffer_grow(void *ptr, int sizeBufferUsed, int *sizeBufferTotal) {
void *buffer_grow(void *ptr, int numCount, int *sizeBufferTotal) {
// Grow the buffer exponentially when needed
if (sizeBufferUsed == *sizeBufferTotal) {
if (numCount == *sizeBufferTotal) {
*sizeBufferTotal *= GROWTH_FACTOR;
ptr = my_realloc(ptr, *sizeBufferTotal * sizeof(int));
}
return ptr;
}
bool checkParallelWorth(int numProcesses, int sizeBufferUsed) {
if (numProcesses > sizeBufferUsed / 2) {
bool checkParallelWorth(int numProcesses, int numCount) {
if (numProcesses > numCount / 2) {
//printf("*********************************************************************************\n");
printf("The number of processes if greater than number of parallel computations required!\n");
//printf("*********************************************************************************\n\n");
@ -57,27 +57,35 @@ bool checkParallelWorth(int numProcesses, int sizeBufferUsed) {
return true;
}
void spread_evenly(int numProcesses, int sizeBufferUsed,
int **arrBlockIndices, int **arrBlockSizes, void (*ptrFciLoopEnd)(const void*, int)) {
void spread_evenly(int numProcesses, int numCount, int **arrBlockIndices, int **arrBlockSizes) {
int numBlockIndex = 0;
int numRemainder = sizeBufferUsed % numProcesses;
int numRemainder = numCount % numProcesses;
*arrBlockIndices = my_malloc(numProcesses * sizeof(int));
*arrBlockSizes = my_malloc(numProcesses * sizeof(int));
*arrBlockIndices = my_malloc(numProcesses * sizeof(int));
for (int i = 0; i < numProcesses; i++) {
int numBlockSize = sizeBufferUsed / numProcesses;
int numBlockSize = numCount / numProcesses;
(*arrBlockSizes)[i] = numBlockSize;
if (numRemainder > 0) {
(*arrBlockSizes)[i] = ++numBlockSize;
numRemainder--;
}
int arrBlockData[NUM_CODES];
arrBlockData[CODE_SIZE] = numBlockSize;
arrBlockData[CODE_INDEX] = numBlockIndex;
(*arrBlockIndices)[i] = numBlockIndex;
numBlockIndex += numBlockSize;
//printf("size[%d] = %d\tindex[%d] = %d\n", i, (*arrBlockSizes)[i], i, (*arrBlockIndices)[i]);
//printf("size[%d] = %d\tindex[%d] = %d\n", i, numBlockSize, i, numBlockIndex);
// Send every process starting index of 1D represented matrix and number of
// succeding indices to calculate transposition
MPI_Send(arrBlockData, NUM_CODES, MPI_INT, i, 1, MPI_COMM_WORLD);
(*ptrFciLoopEnd)(&numBlockSize, i);
//(*ptrFciLoopEnd)(&numBlockSize, i);
//MPI_Send(&arrBlockSizes[i], 1, MPI_INT, i, 1, MPI_COMM_WORLD);
}
}

@ -14,14 +14,18 @@
// Maximum length of the file that will be read (not applicable to a1)
#define MAX_LINE_LENGTH 8192
// TODO: define custom structure for this probably
#define CODE_SIZE 0
#define CODE_INDEX 1
#define NUM_CODES 2
void *my_malloc(size_t size);
void *my_calloc(size_t nitems, size_t size);
void *my_realloc(void *ptr, size_t size);
FILE *my_fopen(const char *filename, const char *mode);
void *buffer_grow(void *ptr, int sizeBufferUsed, int *sizeBufferTotal);
bool checkParallelWorth(int numProcesses, int sizeBufferUsed);
void spread_evenly(int numProcesses, int sizeBufferUsed,
int **arrBlockIndices, int **arrBlockSizes, void (*ptrFciLoopEnd)(const void*, int));
void spread_evenly(int numProcesses, int sizeBufferUsed, int **arrBlockIndices, int **arrBlockSizes);
void *read_matrix_file(const char *filename, int *numCols, int *numCount);
#endif

Loading…
Cancel
Save