master
Peter Babič 9 years ago
parent 078cedbc0e
commit 3c8a2664fb
  1. BIN
      atter
  2. 46
      atter.c
  3. BIN
      atterv
  4. 69
      atterv.c
  5. 4
      library.h
  6. 4
      run
  7. BIN
      zadanie1
  8. 249
      zadanie1.c
  9. 24
      zadanie1.in
  10. BIN
      zadanie11
  11. 188
      zadanie11.c

BIN
atter

Binary file not shown.

@ -0,0 +1,46 @@
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char **argv) {
int size, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int *globaldata=NULL;
int localdata;
if (rank == 0) {
globaldata = malloc(size * sizeof(int) );
for (int i=0; i<size; i++)
globaldata[i] = 2*i+1;
printf("Processor %d has data: ", rank);
for (int i=0; i<size; i++)
printf("%d ", globaldata[i]);
printf("\n");
}
MPI_Scatter(globaldata, 1, MPI_INT, &localdata, 1, MPI_INT, 0, MPI_COMM_WORLD);
printf("Processor %d has data %d\n", rank, localdata);
localdata *= 2;
printf("Processor %d doubling the data, now has %d\n", rank, localdata);
MPI_Gather(&localdata, 1, MPI_INT, globaldata, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (rank == 0) {
printf("Processor %d has data: ", rank);
for (int i=0; i<size; i++)
printf("%d ", globaldata[i]);
printf("\n");
}
if (rank == 0)
free(globaldata);
MPI_Finalize();
return 0;
}

BIN
atterv

Binary file not shown.

@ -0,0 +1,69 @@
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
//#include <math.h>
#define SIZE 4
int main(int argc, char *argv[])
{
int rank, size; // for storing this process' rank, and the number of processes
int *sendcounts; // array describing how many elements to send to each process
int *displs; // array describing the displacements where each segment begins
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int rem = (SIZE*SIZE)%size; // elements remaining after division among processes
int sum = 0; // Sum of counts. Used to calculate displacements
char rec_buf[100]; // buffer where the received data should be stored
// the data to be distributed
char data[SIZE][SIZE] = {
{'a', 'b', 'c', 'd'},
{'e', 'f', 'g', 'h'},
{'i', 'j', 'k', 'l'},
{'m', 'n', 'o', 'p'}
//{'q', 'r', 's'}
};
sendcounts = malloc(sizeof(int)*size);
displs = malloc(sizeof(int)*size);
// calculate send counts and displacements
for (int i = 0; i < size; i++) {
sendcounts[i] = (SIZE*SIZE)/size;
if (rem > 0) {
sendcounts[i]++;
rem--;
}
displs[i] = sum;
sum += sendcounts[i];
}
// print calculated send counts and displacements for each process
if (0 == rank) {
for (int i = 0; i < size; i++) {
printf("sendcounts[%d] = %d\tdispls[%d] = %d\n", i, sendcounts[i], i, displs[i]);
}
}
// divide the data among processes as described by sendcounts and displs
MPI_Scatterv(&data, sendcounts, displs, MPI_CHAR, &rec_buf, 100, MPI_CHAR, 0, MPI_COMM_WORLD);
// print what each process received
printf("%d: ", rank);
for (int i = 0; i < sendcounts[rank]; i++) {
printf("%c\t", rec_buf[i]);
}
printf("\n");
MPI_Finalize();
free(sendcounts);
free(displs);
return 0;
}

@ -4,12 +4,12 @@
#include <mpi.h>
// Avoids aggresive memory reallocation at the beginning of buffer growth sequence
#define INIT_COUNT 8
#define INIT_BUFFER_SIZE 8
// Optimal exponential buffer growth factor, 2 is sometimes used to
#define GROWTH_FACTOR 1.5
// Macro for counting the lenght of an array
#define COUNT(x) ((int) (sizeof(x) / sizeof(x[0])))
//#define COUNT(x) ((int) (sizeof(x) / sizeof(x[0])))
void *my_malloc(size_t size);

4
run

@ -1,4 +1,6 @@
#!/bin/bash
mpicc -Wall "$1.c" "library.c" -o "$1"
#mpicc -Wall -Wno-unused-variable "$1.c" "library.c" -o "$1"
#mpicc -Wall "$1.c" "library.c" -o "$1"
mpicc -Wall "$1.c" -o "$1"
mpirun -v -np "$2" "$1"

Binary file not shown.

@ -1,147 +1,176 @@
#include "library.h"
#define INPUTFILE "zadanie1.in"
#define MIN_PORTION 2
int maximum(int n, int *numbers);
int *array_slice(int n, int *numbers, int start, int count);
int maximum(int n, int *arrNumbers);
int *array_slice(int n, int *arrNumbers, int start, int count);
int main(int argc, char *argv[]) {
int processes, rank;
MPI_Status status;
int numProcesses, numRank;
MPI_Status mpiStatus;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &processes);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int i, n, value, count = 0;
// We must subtract one process for a root and one for a remainder
//int slaves = processes - 2;
if (processes < 3) {
printf("There must be at least 3 processes to do parallel computing.\n");
exit(EXIT_FAILURE);
}
MPI_Comm_size(MPI_COMM_WORLD, &numProcesses);
MPI_Comm_rank(MPI_COMM_WORLD, &numRank);
// This is the master process
if (rank == 0) {
if (numRank == 0) {
int number, max = INIT_COUNT;
// Allocate enough memory for INIT_COUNT numbers
int *numbers = my_malloc(max * sizeof(int));
int numNumber, numCount = 0;
// To avoid aggresive buffer growth at the beggining of the sequence
int numBufferSize = INIT_BUFFER_SIZE;
// Allocate enough memory for initial buffer
int *arrNumbers = my_malloc(numBufferSize * sizeof(int));
//int num[10];
FILE *file = my_fopen(INPUTFILE, "r");
FILE *ptrFile = my_fopen(INPUTFILE, "r");
// Read lines of numbers until an EOF or a character is read
while (fscanf(file, "%d", &number) == 1) {
// Buffer space check
if (max == count) {
while (fscanf(ptrFile, "%d", &numNumber) == 1) {
// Incorporate space for storing results received from slaves at the end of array
int numBufferUsed = numCount + numProcesses - 1;
if (numBufferSize == numBufferUsed) {
// Grow buffer exponentially
max *= GROWTH_FACTOR;
numbers = my_realloc(numbers, max * sizeof(int));
numBufferSize *= GROWTH_FACTOR;
arrNumbers = my_realloc(arrNumbers, numBufferSize * sizeof(int));
}
// Store the read number to the array in memory, if in usable range
numbers[count++] = number;
arrNumbers[numCount++] = numNumber;
}
//int portion = count / slaves;
int slaves = processes - 1;
int portion = count / slaves;
int remainder = count % slaves;
// If there is a remainder, we need to dedicate a slave to it
if (remainder < 2)
slaves--;
// If there are no slave processes, just find the maximum of the numbers from the file
int numUse = numCount, numStart = 0;
int numPortion = numCount;
portion = count / slaves;
remainder = count % slaves;
if (numProcesses > 1) {
//switch (remainder) {
//case 0:
//portion = count / slaves;
//break;
//case 1;
//if (portion < 2) {
//printf("please deacrease the number of processes.\n");
//exit(exit_failure);
//}
printf("count: %d, slaves: %d, portion: %d, mod: %d \n", count, slaves, portion, remainder);
//// Send every slave the portion of the set
//for (i = 0; i < slaves; i++) {
//int *slice = array_slice(count, numbers, i * portion, portion);
//MPI_Send(slice, portion, MPI_INT, i + 1, 1, MPI_COMM_WORLD);
//}
//i++;
//int *slice = array_slice(count, numbers, i * portion, remainder);
//MPI_Send(slice, remainder, MPI_INT, i + 1, 1, MPI_COMM_WORLD);
//int *maximums = my_malloc((i + 1) * sizeof(int));
//// Receive all the maximums from the slaves
//for (i = 0; i < slaves; i++) {
//MPI_Recv(&value, 1, MPI_INT, i + 1, 1, MPI_COMM_WORLD, &status);
//maximums[i] = value;
////printf("i = %d, received %d from %d\n", i, value, i+1);
//}
//if (remainder > 1) {
//int *slice = array_slice(count, numbers, i * portion, remainder);
//maximums[i] = maximum(remainder, slice);
//}
//else if (remainder == 1) {
//maximums[i] = numbers[count - 1];
//}
//printf("Maximum of numbers");
//for (n = 0; n < count; n++)
//printf(" %d", numbers[n]);
//printf("Maximum is %d\n", maximum(i + 1, maximums));
//printf("i: %d, count: %d, max: %d\n", i, COUNT(maximums), maximum(i + 1, &maximums));
//printf("numbers: ");
//for (n = 0; n <= i; n++)
//printf("%d ", maximums[n]);
//printf("\n");
}
// This is a slave process
else {
//MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
//MPI_Get_count(&status, MPI_INT, &n);
//if (n != MPI_UNDEFINED && n >= 2) {
//int *numbers = my_malloc(n * sizeof(int));
////printf("address before: %p\n", numbers);
//MPI_Recv(numbers, n, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
////printf("address after: %p\n", numbers);
//value = maximum(n, numbers);
//MPI_Send(&value, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
//}
}
//int numSlaves = numProcesses - 1;
numPortion = numCount / numProcesses;
// Send at least 2 numbers to compare to slave process, no less
numPortion = numPortion >= 2 ? numPortion : 2;
// Do the loop for every slave process
for (int numSlave = 1; numProcesses > numSlave; numSlave++) {
printf("> sending %d to %d\n", numPortion, numSlave);
MPI_Send(&arrNumbers[numStart], numPortion, MPI_INT, numSlave, 1, MPI_COMM_WORLD);
numStart += numPortion;
numUse -= numPortion;
}
for (int
MPI_Finalize();
return (0);
}
//int max = maximum(numUse, &arrNumbers[numStart]);
//int max = maximum(numCount, arrNumbers);
//printf("processes: %d, buffer: %d, count: %d, portion: %d \n",
//numProcesses, numBufferSize, numCount, numPortion);
//printf("start: %d, use: %d, max: %d\n", numStart, numUse, max);
}
// This is a slave process
else {
int numCount, numValue;
MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &mpiStatus);
MPI_Get_count(&mpiStatus, MPI_INT, &numCount);
if (numCount != MPI_UNDEFINED) {
int *arrNumbers = my_malloc(numCount * sizeof(int));
//printf("address before: %p\n", arrNumbers);
MPI_Recv(arrNumbers, numCount, MPI_INT, 0, 1, MPI_COMM_WORLD, &mpiStatus);
//printf("address after: %p\n", arrNumbers);
numValue = maximum(numCount, arrNumbers);
MPI_Send(&numValue, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
}
}
MPI_Finalize();
return (0);
}
int maximum(int n, int *numbers) {
int maximum(int n, int *arrNumbers) {
int value = 0;
for (int i = 0; i < n; i++) {
if (value < numbers[i]) {
value = numbers[i];
if (value < arrNumbers[i]) {
value = arrNumbers[i];
}
}
return value;
}
int *array_slice(int n, int *numbers, int start, int count) {
int *array_slice(int n, int *arrNumbers, int start, int count) {
int *slice = my_malloc(count * sizeof(int));
for (int i = 0; i < count && i < n; i++)
slice[i] = numbers[start + i];
slice[i] = arrNumbers[start + i];
return slice;
}
/*int portion = count / slaves;
int slaves = numProcesses - 1;
// If there is a remainder, we need to dedicate a slave to it
if (numRemainder < 2)
slaves--;
portion = count / slaves;
remainder = count % slaves;
switch (remainder) {
case 0:
portion = count / slaves;
break;
case 1;
if (numPortion < 2) {
printf("please deacrease the number of processes.\n");
exit(exit_failure);
}
printf("count: %d, slaves: %d, portion: %d, mod: %d \n", count, slaves, numPortion, numRemainder);
// Send every slave the portion of the set
for (i = 0; i < slaves; i++) {
int *slice = array_slice(count, arrNumbers, i * numPortion, numPortion);
MPI_Send(slice, numPortion, MPI_INT, i + 1, 1, MPI_COMM_WORLD);
}
i++;
int *slice = array_slice(count, arrNumbers, i * numPortion, numRemainder);
MPI_Send(slice, numRemainder, MPI_INT, i + 1, 1, MPI_COMM_WORLD);
int *maximums = my_malloc((i + 1) * sizeof(int));
// Receive all the maximums from the slaves
for (i = 0; i < slaves; i++) {
MPI_Recv(&value, 1, MPI_INT, i + 1, 1, MPI_COMM_WORLD, &mpiStatus);
maximums[i] = value;
//printf("i = %d, received %d from %d\n", i, value, i+1);
}
if (remainder > 1) {
int *slice = array_slice(count, arrNumbers, i * numPortion, remainder);
maximums[i] = maximum(remainder, slice);
}
else if (remainder == 1) {
maximums[i] = arrNumbers[count - 1];
}
printf("Maximum of arrNumbers");
for (n = 0; n < count; n++)
printf(" %d", arrNumbers[n]);
printf("Maximum is %d\n", maximum(i + 1, maximums));
printf("i: %d, count: %d, max: %d\n", i, COUNT(maximums), maximum(i + 1, &maximums));
printf("arrNumbers: ");
for (n = 0; n <= i; n++)
printf("%d ", maximums[n]);
printf("\n");*/

@ -8,18 +8,12 @@
8
9
10
11
12
13
14
15
16
17
0
-1
21
-234
32
32
32
31
9
8
7
6
5
4
3
2
1

Binary file not shown.

@ -0,0 +1,188 @@
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <mpi.h>
// Avoids aggresive memory reallocation at the beginning of buffer growth sequence
#define INIT_BUFFER_SIZE 8
// Optimal exponential buffer growth factor, 2 is sometimes used to
#define GROWTH_FACTOR 1.5
#define INPUTFILE "zadanie1.in"
//#define MIN_PORTION 2
int maximum(int n, int *arrNumbers);
int *array_slice(int n, int *arrNumbers, int start, int count);
void *my_malloc(size_t size);
void *my_realloc(void *p, size_t size);
FILE *my_fopen(const char *filename, const char *mode);
int main(int argc, char *argv[]) {
int numProcesses, numRank;
MPI_Status mpiStatus;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numProcesses);
MPI_Comm_rank(MPI_COMM_WORLD, &numRank);
int numNumber, numCount = 0;
// To avoid aggresive buffer growth at the beggining of the sequence
int numBufferSize = INIT_BUFFER_SIZE;
// Allocate enough memory for initial buffer
int *arrNumbers = my_malloc(numBufferSize * sizeof(int));
//int num[10];
FILE *ptrFile = my_fopen(INPUTFILE, "r");
// Read lines of numbers until an EOF or a character is read
while (fscanf(ptrFile, "%d", &numNumber) == 1) {
// Incorporate space for storing results received from slaves at the end of array
int numBufferUsed = numCount + numProcesses - 1;
if (numBufferSize == numBufferUsed) {
// Grow buffer exponentially
numBufferSize *= GROWTH_FACTOR;
arrNumbers = my_realloc(arrNumbers, numBufferSize * sizeof(int));
}
arrNumbers[numCount++] = numNumber;
}
//printf("count: %d, buffer: %d\n", numCount, numBufferSize);
int numBlockIndex = 0;
//int numBlockSize = numCount / numProcesses;
int numRemainder = numCount % numProcesses;
int *arrBlockIndices = my_malloc(numProcesses * sizeof(int));
int *arrBlockSizes = my_malloc(numProcesses * sizeof(int));
for (int ii; ii < numProcesses; ii++) {
arrBlockSizes[ii] = numCount / numProcesses;
if (numRemainder > 0) {
arrBlockSizes[ii]++;
numRemainder--;
}
arrBlockIndices[ii] = numBlockIndex;
numBlockIndex += arrBlockSizes[ii];
}
if (numRank == 0) {
for (int jj = 0; jj < numProcesses; jj++) {
printf("size[%d] = %d\tindex[%d] = %d\n", jj, arrBlockSizes[jj], jj, arrBlockIndices[jj]);
}
}
int arrMaximums[100];
MPI_Scatterv(arrNumbers, arrBlockSizes, arrBlockIndices, MPI_INT, &arrMaximums, 100, MPI_INT, 0, MPI_COMM_WORLD);
// print what each process received
printf("%d: ", numRank);
for (int i = 0; i < arrBlockSizes[numRank]; i++) {
printf("m[%d] = %d\t", i, arrMaximums[i]);
}
printf("\n");
MPI_Gatherv(&arrMaximums, 100, MPI_INT, arrNumbers, arrBlockSizes, arrBlockIndices, MPI_INT, 0, MPI_COMM_WORLD);
//printf("size = %d, remainder = %d\n\n", numBlockSize, numRemainder);
// This is a slave process
//else {
//int numCount, numValue;
//MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &mpiStatus);
//MPI_Get_count(&mpiStatus, MPI_INT, &numCount);
//if (numCount != MPI_UNDEFINED) {
//int *arrNumbers = my_malloc(numCount * sizeof(int));
////printf("address before: %p\n", arrNumbers);
//MPI_Recv(arrNumbers, numCount, MPI_INT, 0, 1, MPI_COMM_WORLD, &mpiStatus);
////printf("address after: %p\n", arrNumbers);
//numValue = maximum(numCount, arrNumbers);
//MPI_Send(&numValue, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
//}
//}
MPI_Finalize();
return (0);
}
int maximum(int n, int *arrNumbers) {
int value = 0;
for (int i = 0; i < n; i++) {
if (value < arrNumbers[i]) {
value = arrNumbers[i];
}
}
return value;
}
int *array_slice(int n, int *arrNumbers, int start, int count) {
int *slice = my_malloc(count * sizeof(int));
for (int i = 0; i < count && i < n; i++)
slice[i] = arrNumbers[start + i];
return slice;
}
void *my_malloc(size_t size) {
void *p = malloc(size);
if (p == NULL) {
printf("Memory allocation unsuccessful.\n");
exit(EXIT_FAILURE);
}
return p;
}
void *my_realloc(void *p, size_t size) {
void *temp = realloc(p, size);
if (temp == NULL) {
printf("Insufficient memory; can't add more items.\n");
exit(EXIT_FAILURE);
}
return temp;
}
FILE *my_fopen(const char *filename, const char *mode) {
FILE *file = fopen(filename, mode);
if (file == NULL) {
printf("File %s could not be opened.\n", filename);
exit(EXIT_FAILURE);
}
return file;
}
// If there are no slave processes, just find the maximum of the numbers from the file
//int numUse = numCount, numStart = 0;
//int numPortion = numCount;
//if (numProcesses > 1) {
////int numSlaves = numProcesses - 1;
//numPortion = numCount / numProcesses;
//// Send at least 2 numbers to compare to slave process, no less
//numPortion = numPortion >= 2 ? numPortion : 2;
//// Do the loop for every slave process
//for (int numSlave = 1; numProcesses > numSlave; numSlave++) {
//printf("> sending %d to %d\n", numPortion, numSlave);
//MPI_Send(&arrNumbers[numStart], numPortion, MPI_INT, numSlave, 1, MPI_COMM_WORLD);
//numStart += numPortion;
//numUse -= numPortion;
//}
//}
//int max = maximum(numUse, &arrNumbers[numStart]);
//int max = maximum(numCount, arrNumbers);
//printf("processes: %d, buffer: %d, count: %d, portion: %d \n",
//numProcesses, numBufferSize, numCount, numPortion);
//printf("start: %d, use: %d, max: %d\n", numStart, numUse, max);
Loading…
Cancel
Save