#include <mpi.h>
#include <stdio.h>
#include <math.h>

#define WORKTAG 1
#define DIETAG 2

#define MAXNUM 1000000000

static void master();
static void slave(int myrank);

/*
* main
* This program is really MIMD, but is written SPMD for
* simplicity in launching the application.
*/
int main(int argc, char *argv[]) {

	int myrank;
	char name[100];
	int len;

	MPI_Init(&argc, &argv);

	MPI_Comm_rank(MPI_COMM_WORLD, /* group of everybody */
		&myrank); /* 0 thru N-1 */

	MPI_Get_processor_name(name, &len);

	printf("\t\t\t\t\tHello. I am n%i, running on %s.\n",myrank,name);

	if (myrank == 0) {
		master();
	} else {
		slave(myrank);
	}
	MPI_Finalize();
	return(0);
}

/*
* master
* The master process sends work requests to the slaves
* and collects results.
*/

static void master()
{
	int ntasks, rank;
	long min,max,jump;
	long range[2];
	double sum;
	double ret;
	double result;
	double time_start, time_stop;
	MPI_Status status;

	MPI_Comm_size(MPI_COMM_WORLD, &ntasks); /* #processes in app */

	if (ntasks < 2) {
		printf("Please run with 2 or more nodes...\n");
		return; 
	}

/*
* Seed the slaves.
*/

	jump = MAXNUM/(ntasks-1);
	
	time_start = MPI_Wtime();
	
	for (rank = 1; rank < ntasks; rank++) {
		
		min=(rank-1)*jump;
		max=rank*jump-1;

		if (rank==ntasks-1)
			max=MAXNUM;
		
		if (rank==1)
			min=0;

		range[0]=min;
		range[1]=max;

		MPI_Send(&range, /* message buffer */
			2, /* two data items */
			MPI_LONG, /* of this type */
			rank, /* to this rank */
			WORKTAG, /* a work message */
			MPI_COMM_WORLD); /* always use this */
		printf("Sent range [%i-%i] to node %i.\n", min,max, rank);
	}

/*
* Receive a result from any slave and dispatch a new work
* request until work requests have been exhausted.
*/
	sum=0;

	for (rank = 1; rank < ntasks; ++rank) {
		MPI_Recv(&ret, 1, MPI_DOUBLE, MPI_ANY_SOURCE,
		MPI_ANY_TAG, MPI_COMM_WORLD, &status);
		sum+=ret;
		printf("got one from node%i: %lf\n",status.MPI_SOURCE, ret);
	}

	time_stop=MPI_Wtime();

        printf("Total time: %f\n",time_stop-time_start);

	printf("\nSum is: %lf\n\n",sum);
/*
* Tell all the slaves to exit.
*/

	for (rank = 1; rank < ntasks; ++rank) {
		MPI_Send(0, 0, MPI_INT, rank, DIETAG,
		MPI_COMM_WORLD);
	}
}

/*
* slave
* Each slave process accepts work requests and returns
* results until a special termination request is received.
*/
static void slave(int myrank) {

	double result;
	long range[2];
	long min,max;
	MPI_Status status;
	double i;
	char* indent="";

	for (;;) {
		MPI_Recv(&range, 2, MPI_LONG, 0, MPI_ANY_TAG,
			MPI_COMM_WORLD, &status);
/*
* Check the tag of the received message.
*/

	if (status.MPI_TAG == DIETAG) {
		return;
	}

	min=range[0];
	max=range[1];

	if (myrank==2)
		indent="\t\t\t";

	result=0;
	for (i=min;i<=max;i++) {
		result+=sqrt(i);

/*		if (i%1000000==0)
			printf("%snode %i: doing %i...\n",indent,myrank,i);
*/		
	}
	
	MPI_Send(&result, 1, MPI_DOUBLE, 0, 0,
		MPI_COMM_WORLD);
}
}

/*
export LAMRSH="ssh"
recon -v bhost      NOTE: YOUR COMPUTER MUST BE IN THE HOST FILE

recon: -- testing n0 (hydrogen.tjhsst.edu)
recon: -- testing n1 (p.tjhsst.edu)
recon: -- testing n2 (he.tjhsst.edu)
recon: -- testing n3 (li.tjhsst.edu)
recon: -- testing n4 (c.tjhsst.edu)
recon: -- testing n5 (n.tjhsst.edu)
recon: -- testing n6 (o.tjhsst.edu)
recon: -- testing n7 (na.tjhsst.edu)
recon: -- testing n8 (mg.tjhsst.edu)
recon: -- testing n9 (al.tjhsst.edu)
recon: -- testing n10 (si.tjhsst.edu)
recon: -- testing n11 (ne.tjhsst.edu)
recon: -- testing n12 (s.tjhsst.edu)
recon: -- testing n13 (be.tjhsst.edu)

lamboot -v bhost

LAM 6.5.6/MPI 2 C++/ROMIO - University of Notre Dame

Executing hboot on n0 (hydrogen.tjhsst.edu - 1 CPU)...
Executing hboot on n1 (p.tjhsst.edu - 1 CPU)...
Executing hboot on n2 (he.tjhsst.edu - 1 CPU)...
Executing hboot on n3 (li.tjhsst.edu - 1 CPU)...
Executing hboot on n4 (c.tjhsst.edu - 1 CPU)...
Executing hboot on n5 (n.tjhsst.edu - 1 CPU)...
Executing hboot on n6 (o.tjhsst.edu - 1 CPU)...
Executing hboot on n7 (na.tjhsst.edu - 1 CPU)...
Executing hboot on n8 (mg.tjhsst.edu - 1 CPU)...
Executing hboot on n9 (al.tjhsst.edu - 1 CPU)...
Executing hboot on n10 (si.tjhsst.edu - 1 CPU)...
Executing hboot on n11 (ne.tjhsst.edu - 1 CPU)...
Executing hboot on n12 (s.tjhsst.edu - 1 CPU)...
Executing hboot on n13 (be.tjhsst.edu - 1 CPU)...
topology done      

mpirun -np 5 msum    (RUN 5 PROCESSES)
                              Hello. I am n1, running on phosphorus.
                              Hello. I am n2, running on helium.
                              Hello. I am n4, running on carbon.
                              Hello. I am n3, running on lithium.
                              Hello. I am n0, running on hydrogen.
Sent range [0-249999999] to node 1.
Sent range [250000000-499999999] to node 2.
Sent range [500000000-749999999] to node 3.
Sent range [750000000-1000000000] to node 4.
got one from node1: 2635231375568.780273
got one from node3: 6239504010117.341797
got one from node4: 7388787159664.755859
got one from node2: 4818328538251.493164
Total time: 19.842601

Sum is: 21081851083602.371094



mpirun N msum    RUN ON ALL PROCESSORS

                        Hello. I am n12, running on sulfur.
                        Hello. I am n4, running on carbon.
                        Hello. I am n2, running on helium.
                        Hello. I am n9, running on aluminum.
                        Hello. I am n6, running on oxygen.
                        Hello. I am n10, running on silicon.
                        Hello. I am n5, running on nitrogen.
                        Hello. I am n3, running on lithium.
                        Hello. I am n1, running on phosphorus.
                        Hello. I am n8, running on magnesium.
                        Hello. I am n7, running on sodium.
                        Hello. I am n11, running on neon.
                        Hello. I am n13, running on beryllium.
                        Hello. I am n0, running on hydrogen.
Sent range [0-76923075] to node 1.
Sent range [76923076-153846151] to node 2.
Sent range [153846152-230769227] to node 3.
Sent range [230769228-307692303] to node 4.
Sent range [307692304-384615379] to node 5.
Sent range [384615380-461538455] to node 6.
Sent range [461538456-538461531] to node 7.
Sent range [538461532-615384607] to node 8.
Sent range [615384608-692307683] to node 9.
Sent range [692307684-769230759] to node 10.
Sent range [769230760-846153835] to node 11.
Sent range [846153836-923076911] to node 12.
Sent range [923076912-1000000000] to node 13.

got one from node5: 1430432077039.374268
got one from node12: 2287703112770.268066
got one from node10: 2079201649027.937256
got one from node4: 1261095874883.805908
got one from node3: 1064939702561.615723
got one from node9: 1966671239240.670410
got one from node6: 1581672265876.548340
got one from node8: 1847290070072.672119
got one from node2: 822377764138.591797
got one from node13: 2385124658403.666016
got one from node11: 2185941627816.417236
got one from node7: 1719627711016.600342
got one from node1: 449773330753.131531
Total time: 7.304467

Sum is: 21081851083601.300781


lamwipe

lamhalt

*/