| 1 |
#include "CPP_OPTIONS.h" |
| 2 |
|
| 3 |
SUBROUTINE GATHER_2D( global, local, myThid ) |
| 4 |
C Gather elements of a 2-D array from all mpi processes to process 0. |
| 5 |
IMPLICIT NONE |
| 6 |
#include "SIZE.h" |
| 7 |
#include "EEPARAMS.h" |
| 8 |
#include "EESUPPORT.h" |
| 9 |
C mythid - thread number for this instance of the routine. |
| 10 |
C global,local - working arrays used to transfer 2-D fields |
| 11 |
INTEGER mythid |
| 12 |
Real*8 global(Nx,Ny) |
| 13 |
_RL local(1-OLx:sNx+OLx,1-OLy:sNy+OLy,nSx,nSy) |
| 14 |
|
| 15 |
INTEGER iG,jG, i, j, bi, bj |
| 16 |
#ifdef ALLOW_USE_MPI |
| 17 |
COMMON /GlobalLo/ mpi_myXGlobalLo, mpi_myYGlobalLo |
| 18 |
INTEGER mpi_myXGlobalLo(nPx*nPy) |
| 19 |
INTEGER mpi_myYGlobalLo(nPx*nPy) |
| 20 |
|
| 21 |
_RL temp(1-OLx:sNx+OLx,1-OLy:sNy+OLy,nSx,nSy) |
| 22 |
INTEGER istatus(MPI_STATUS_SIZE), ierr |
| 23 |
INTEGER lbuff, idest, itag, npe, ready_to_receive |
| 24 |
#endif /* ALLOW_USE_MPI */ |
| 25 |
|
| 26 |
C-- Make everyone wait except for master thread. |
| 27 |
_BARRIER |
| 28 |
_BEGIN_MASTER( myThid ) |
| 29 |
|
| 30 |
#ifndef ALLOW_USE_MPI |
| 31 |
|
| 32 |
DO bj=1,nSy |
| 33 |
DO bi=1,nSx |
| 34 |
DO j=1,sNy |
| 35 |
DO i=1,sNx |
| 36 |
iG = myXGlobalLo-1+(bi-1)*sNx+i |
| 37 |
jG = myYGlobalLo-1+(bj-1)*sNy+j |
| 38 |
global(iG,jG) = local(i,j,bi,bj) |
| 39 |
ENDDO |
| 40 |
ENDDO |
| 41 |
ENDDO |
| 42 |
ENDDO |
| 43 |
|
| 44 |
#else /* ALLOW_USE_MPI */ |
| 45 |
|
| 46 |
lbuff = (sNx+2*OLx)*nSx*(sNy+2*OLy)*nSy |
| 47 |
idest = 0 |
| 48 |
itag = 0 |
| 49 |
ready_to_receive = 0 |
| 50 |
|
| 51 |
IF( mpiMyId .EQ. 0 ) THEN |
| 52 |
|
| 53 |
C-- Process 0 fills-in its local data |
| 54 |
npe = 0 |
| 55 |
DO bj=1,nSy |
| 56 |
DO bi=1,nSx |
| 57 |
DO j=1,sNy |
| 58 |
DO i=1,sNx |
| 59 |
iG = mpi_myXGlobalLo(npe+1)-1+(bi-1)*sNx+i |
| 60 |
jG = mpi_myYGlobalLo(npe+1)-1+(bj-1)*sNy+j |
| 61 |
global(iG,jG) = local(i,j,bi,bj) |
| 62 |
ENDDO |
| 63 |
ENDDO |
| 64 |
ENDDO |
| 65 |
ENDDO |
| 66 |
|
| 67 |
C-- Process 0 polls and receives data from each process in turn |
| 68 |
DO npe = 1, numberOfProcs-1 |
| 69 |
CALL MPI_SEND (ready_to_receive, 1, MPI_INTEGER, |
| 70 |
& npe, itag, MPI_COMM_MODEL, ierr) |
| 71 |
CALL MPI_RECV (temp, lbuff, MPI_DOUBLE_PRECISION, |
| 72 |
& npe, itag, MPI_COMM_MODEL, istatus, ierr) |
| 73 |
|
| 74 |
C-- Process 0 gathers the local arrays into a global array. |
| 75 |
DO bj=1,nSy |
| 76 |
DO bi=1,nSx |
| 77 |
DO j=1,sNy |
| 78 |
DO i=1,sNx |
| 79 |
iG = mpi_myXGlobalLo(npe+1)-1+(bi-1)*sNx+i |
| 80 |
jG = mpi_myYGlobalLo(npe+1)-1+(bj-1)*sNy+j |
| 81 |
global(iG,jG) = temp(i,j,bi,bj) |
| 82 |
ENDDO |
| 83 |
ENDDO |
| 84 |
ENDDO |
| 85 |
ENDDO |
| 86 |
ENDDO |
| 87 |
|
| 88 |
ELSE |
| 89 |
|
| 90 |
C-- All proceses except 0 wait to be polled then send local array |
| 91 |
CALL MPI_RECV (ready_to_receive, 1, MPI_INTEGER, |
| 92 |
& idest, itag, MPI_COMM_MODEL, istatus, ierr) |
| 93 |
CALL MPI_SEND (local, lbuff, MPI_DOUBLE_PRECISION, |
| 94 |
& idest, itag, MPI_COMM_MODEL, ierr) |
| 95 |
|
| 96 |
ENDIF |
| 97 |
|
| 98 |
#endif /* ALLOW_USE_MPI */ |
| 99 |
|
| 100 |
_END_MASTER( myThid ) |
| 101 |
_BARRIER |
| 102 |
|
| 103 |
RETURN |
| 104 |
END |