#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <mpi.h>
#include "func.h"
#define WORKTAG 1
#define REQTAG 2
/*
* Find the area to the tolerance.
*/
static double find(double x1, double y1, double x2, double y2, double tol)
{
/* Compute the midpoint from the funcion. */
double midx = (x1 + x2) / 2;
double midy = f(midx);
/* Estimate the midpoint from the y value. */
double midest = (y1 + y2) / 2;
/* See if we're getting close. */
if(fabs(midy - midest) <= tol)
/* Will do. Compute the area using the midy, since we
found it. Two trapazoids algebraically simplified. */
return 0.25*(x2-x1)*(y1 + y2 + 2*midy);
else
/* Subdivide and try again. */
return find(x1, y1, midx, midy, tol) +
find(midx, midy, x2, y2, tol);
}
int main(int argc, char **argv)
{
int nproc, myid;
int namelen;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&nproc);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
/* Fine argument whine. */
if(argc < 5) {
printf("Usage: %s startx endx tol nreg funcargs*\n",
argv[0]);
MPI_Finalize();
exit(2);
}
#ifdef DEBUG
/* See what we're running on. */
MPI_Get_processor_name(processor_name,&namelen);
printf("Process %d of %d is on %s\n",
myid, nproc, processor_name);
fflush(stdout);
#endif
/* Get the args. */
char *pname = argv[0];
double start = atof(argv[1]);
double end = atof(argv[2]);
double tol = atof(argv[3]);
int nreg = atoi(argv[4]);
argc -= 5; argv += 5;
/* Sanity. */
if(tol < 0.0000000001) {
printf("Tolerance to too small or negative.\n");
MPI_Finalize();
exit(3);
}
if(end < start) {
double t = start;
start = end;
end = t;
}
/* Local total for the collect function. */
double loctot = 0.0;
/* The first task passes out the work. */
if(myid == 0) {
/* *** Foreman Function: Distribute work. *** */
/* Divide into to nreg regions and send 'em out. */
double x = start;
double inc = (end - start) / nreg;
int i;
for(i = nreg; i--;) {
double newx = i ? x + inc : end;
int worker;
MPI_Status st;
/* Get a work request and send the segment. */
MPI_Recv(&worker, 1, MPI_INTEGER, MPI_ANY_SOURCE,
REQTAG, MPI_COMM_WORLD, &st);
MPI_Send(&x, 1, MPI_DOUBLE, worker, WORKTAG,
MPI_COMM_WORLD);
MPI_Send(&newx, 1, MPI_DOUBLE, worker, WORKTAG,
MPI_COMM_WORLD);
x = newx;
}
/* Tell 'em all to quit. We send an overly large end value
as a flag. This loop runs one less time than the number
of processes, so we will tell each one to quit. */
if(end < 1.0) end = 1.0;
end = 2*end + 1.0;
while(--nproc) {
int worker;
MPI_Status st;
MPI_Recv(&worker, 1, MPI_INTEGER, MPI_ANY_SOURCE,
REQTAG, MPI_COMM_WORLD, &st);
MPI_Send(&end, 1, MPI_DOUBLE, worker, WORKTAG,
MPI_COMM_WORLD);
}
} else {
/* *** Worker Function: Compute regions *** */
/* Initialize the function. */
finit(argc, argv);
/* Execute regions until all have been taken care of. */
while(1) {
/* Get some work. Send a request to the boss, and
do what he says. */
double startx, endx;
MPI_Status st;
MPI_Send(&myid, 1, MPI_INTEGER, 0, REQTAG,
MPI_COMM_WORLD);
MPI_Recv(&startx, 1, MPI_DOUBLE, 0, WORKTAG,
MPI_COMM_WORLD, &st);
if(startx > end) break;
MPI_Recv(&endx, 1, MPI_DOUBLE, 0, WORKTAG,
MPI_COMM_WORLD, &st);
loctot += find(startx, f(startx), endx, f(endx), tol);
}
}
/* Collect the result. */
double tot;
MPI_Reduce(&loctot, &tot, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if(myid == 0)
printf("Integral from %g to %g = %g (tol %g)\n",
start, end, tot, tol);
MPI_Finalize();
return 0;
}
In this version, the number of blocks in independent of the number of tasks,
so it takes the number of blocks on the command line.
|
mpiexec -hosts 2 bennet.mc.edu sandbox.mc.edu parallel -120 130 0.000001 20 5 3 7 1 |
or
|
mpiexec -hosts 2 bennet.mc.edu 5 sandbox.mc.edu 7 parallel -120 130 0.000001 30 5 3 7 1 |