60 |
INTEGER elCount |
INTEGER elCount |
61 |
INTEGER elLen |
INTEGER elLen |
62 |
INTEGER elStride |
INTEGER elStride |
63 |
|
|
64 |
|
C-- Variables needed for mpi gather scatter routines. |
65 |
|
COMMON /GlobalLo/ mpi_myXGlobalLo, mpi_myYGlobalLo |
66 |
|
INTEGER mpi_myXGlobalLo(nPx*nPy) |
67 |
|
INTEGER mpi_myYGlobalLo(nPx*nPy) |
68 |
|
INTEGER npe,itemp,ierr,istatus(MPI_STATUS_SIZE) |
69 |
|
|
70 |
#endif /* ALLOW_USE_MPI */ |
#endif /* ALLOW_USE_MPI */ |
71 |
INTEGER myThid |
INTEGER myThid |
72 |
CEOP |
CEOP |
182 |
mpiYGlobalLo = 1 + sNy*nSy*(mpiPy) |
mpiYGlobalLo = 1 + sNy*nSy*(mpiPy) |
183 |
myXGlobalLo = mpiXGlobalLo |
myXGlobalLo = mpiXGlobalLo |
184 |
myYGlobalLo = mpiYGlobalLo |
myYGlobalLo = mpiYGlobalLo |
185 |
|
|
186 |
|
C-- To speed-up mpi gather and scatter routines, myXGlobalLo |
187 |
|
C and myYGlobalLo from each process are transferred to |
188 |
|
C a common block array. This allows process 0 to know |
189 |
|
C the location of the domains controlled by each process. |
190 |
|
DO npe = 0, numberOfProcs-1 |
191 |
|
CALL MPI_SEND (myXGlobalLo, 1, MPI_INTEGER, |
192 |
|
& npe, mpiMyId, MPI_COMM_MODEL, ierr) |
193 |
|
ENDDO |
194 |
|
DO npe = 0, numberOfProcs-1 |
195 |
|
CALL MPI_RECV (itemp, 1, MPI_INTEGER, |
196 |
|
& npe, npe, MPI_COMM_MODEL, istatus, ierr) |
197 |
|
mpi_myXGlobalLo(npe+1) = itemp |
198 |
|
ENDDO |
199 |
|
DO npe = 0, numberOfProcs-1 |
200 |
|
CALL MPI_SEND (myYGlobalLo, 1, MPI_INTEGER, |
201 |
|
& npe, mpiMyId, MPI_COMM_MODEL, ierr) |
202 |
|
ENDDO |
203 |
|
DO npe = 0, numberOfProcs-1 |
204 |
|
CALL MPI_RECV (itemp, 1, MPI_INTEGER, |
205 |
|
& npe, npe, MPI_COMM_MODEL, istatus, ierr) |
206 |
|
mpi_myYGlobalLo(npe+1) = itemp |
207 |
|
ENDDO |
208 |
|
|
209 |
myPx = mpiPx+1 |
myPx = mpiPx+1 |
210 |
myPy = mpiPy+1 |
myPy = mpiPy+1 |
211 |
C-- Get MPI id for neighboring procs. |
C-- Get MPI id for neighboring procs. |