""" From "COMPUTATIONAL PHYSICS", 3rd Ed, Enlarged Python eTextBook by RH Landau, MJ Paez, and CC Bordeianu Copyright Wiley-VCH Verlag GmbH & Co. KGaA, Berlin; Copyright R Landau, Oregon State Unv, MJ Paez, Univ Antioquia, C Bordeianu, Univ Bucharest, 2015. Support by National Science Foundation""" /****************************************************************** * 1-D Gauss Siedel solution to Poisson equation * * * * Syntax * * Poisson N N_x * * ( Max # iteration, grid in x ) * * * * Compilation * * mpicc poisson_parallel_1d.c NR.c -lm -o poisson_parallel_1d * * * * Run * * mpiexec -np 4 ./poisson_parallel_1d 1000 100 * * * * Parallel Version * *******************************************************************/ /* Michel Vallieres */ #include #include #include #include #include #define EPS 1.0e-7 /* tolerable error */ #define MAX_PROCS 100 /* max # of processors */ #define MAX_GRID_POINTS 1000 /* max grid points */ double phi[MAX_GRID_POINTS], source[MAX_GRID_POINTS]; int ibound[MAX_GRID_POINTS]; /* Gauss-Siedel in local piece of lattice */ void Gauss_Siedel_step( int imin, int imax, int iter, double *diff, double h ) { double old, chi; int i; /* over-relaxation */ chi = 1.0; if ( iter > 30 ) chi = 1.2; if ( iter > 50 ) chi = 1.8; *diff = 0.0; /* scan local lattice */ for ( i=imin; i<=imax ; i++ ) { if ( ibound[i] == 0 ) { old = phi[i]; phi[i] = 0.5 * ( phi[i-1] + phi[i+1] - h*h*source[i] ); phi[i] = (1.0-chi) * old + chi*phi[i]; if ( fabs( old - phi[i] ) > *diff ) *diff = fabs( old - phi[i] ); } } } /* to exchange the ghost rows */ void exchange ( int myid, int numprocs, int imin, int imax ) { MPI_Status recv_status; int top_process, bot_process; top_process = myid + 1; bot_process = myid - 1; /* odd - even scheme for safety */ /* & efficiency */ if ( myid % 2 != 0 ) /* odd process */ { if ( myid < numprocs-1 ) MPI_Ssend( &phi[imax], 1, MPI_DOUBLE, top_process, 121, MPI_COMM_WORLD); if ( myid > 0 ) MPI_Recv( &phi[imin-1], 1, MPI_DOUBLE, bot_process, 122, MPI_COMM_WORLD, &recv_status); if ( myid > 0 ) MPI_Ssend( &phi[imin], 1, MPI_DOUBLE, bot_process, 123, MPI_COMM_WORLD); if ( myid < numprocs-1 ) MPI_Recv( &phi[imax+1], 1, MPI_DOUBLE, top_process, 124, MPI_COMM_WORLD, &recv_status); } else /* even process */ { if ( myid > 0 ) MPI_Recv( &phi[imin-1], 1, MPI_DOUBLE, bot_process, 121, MPI_COMM_WORLD, &recv_status); if ( myid < numprocs-1 ) MPI_Ssend( &phi[imax], 1, MPI_DOUBLE, top_process, 122, MPI_COMM_WORLD); if ( myid < numprocs-1 ) MPI_Recv( &phi[imax+1], 1, MPI_DOUBLE, top_process, 123, MPI_COMM_WORLD, &recv_status); if ( myid > 0 ) MPI_Ssend( &phi[imin], 1, MPI_DOUBLE, bot_process, 124, MPI_COMM_WORLD); } } /* set up local domain - slices */ void decompose_domain ( int myid, int numprocs, int Nx, int *begin_slice, int *end_slice, int *size_slice, int *imin, int *imax ) { int ip, j, average, leftover; int temp, slice, N_slices; /* check dimensions */ if ( numprocs > MAX_PROCS ) { if ( myid == 0 ) fprintf( stderr, "Code written for fewer processes than %d \n", MAX_PROCS); MPI_Finalize(); exit(1); } /* partition domain */ N_slices = numprocs; average = Nx / N_slices; leftover = Nx % N_slices; temp = -1; for ( slice=0 ; slice0 ) { end_slice[slice]++; leftover--; } temp = end_slice[slice]; size_slice[slice] = end_slice[slice] - begin_slice[slice] + 1; } /* echo partitions */ if( myid == 0 ) { fprintf( stderr, "\n Local grid: \n "); for ( ip=0 ; ip diff ) diff = fabs( difference ); } } /* find max amongst processes */ MPI_Allreduce ( &diff, &totaldiff, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD ); if ( myid == 0 ) fprintf( stderr," Largest error: %e \n\n", totaldiff ); /* gather pieces of phi */ if ( myid == 0 ) { /* space to gather final phi */ final_phi = (double *)malloc( Nx*sizeof(double) ); /* transfer own sub-domain */ /* piece of phi */ for ( i=begin_slice[myid]; i<=end_slice[myid]; i++ ) { final_phi[i] = phi[i]; } /* receives pieces of */ /* phi from sub-domains */ for ( ip=1 ; ip= imin || is <= imax ) { ibound[is] = 1; phi[is] = +1.0; } /* iterate the solution */ for ( iter=0; iter