1 |
|
C $Header$ |
2 |
|
C $Name$ |
3 |
#include "CPP_EEOPTIONS.h" |
#include "CPP_EEOPTIONS.h" |
4 |
CStartOfInterface |
CBOP |
5 |
|
|
6 |
|
C !ROUTINE: INI_PROCS |
7 |
|
|
8 |
|
C !INTERFACE: |
9 |
SUBROUTINE INI_PROCS |
SUBROUTINE INI_PROCS |
|
C /==========================================================\ |
|
|
C | SUBROUTINE INI_PROCS | |
|
|
C | o Initialise multiple concurrent processes environment. | |
|
|
C |==========================================================| |
|
|
C | Under MPI this routine calls various MPI service routines| |
|
|
C | that map the model grid to MPI processes. The information| |
|
|
C | is then stored in a common block for later use. | |
|
|
C | Note: This routine can also be compiled with CPP | |
|
|
C | directives set so that no multi-processing is initialise.| |
|
|
C | This is OK and should work fine. | |
|
|
C \==========================================================/ |
|
10 |
IMPLICIT NONE |
IMPLICIT NONE |
11 |
|
|
12 |
|
C !DESCRIPTION: |
13 |
|
C *==========================================================* |
14 |
|
C | SUBROUTINE INI\_PROCS |
15 |
|
C | o Initialise multiple concurrent processes environment. |
16 |
|
C *==========================================================* |
17 |
|
C | Under MPI this routine calls various MPI service routines |
18 |
|
C | that map the model grid to MPI processes. The information |
19 |
|
C | is then stored in a common block for later use. |
20 |
|
C | Note: This routine can also be compiled with CPP |
21 |
|
C | directives set so that no multi-processing is initialise. |
22 |
|
C | This is OK and should work fine. |
23 |
|
C *==========================================================* |
24 |
|
|
25 |
|
C !USES: |
26 |
C === Global data === |
C === Global data === |
27 |
#include "SIZE.h" |
#include "SIZE.h" |
28 |
#include "EEPARAMS.h" |
#include "EEPARAMS.h" |
29 |
#include "EESUPPORT.h" |
#include "EESUPPORT.h" |
|
CEndOfInterface |
|
30 |
|
|
31 |
|
C !LOCAL VARIABLES: |
32 |
C === Local variables === |
C === Local variables === |
33 |
#ifdef ALLOW_USE_MPI |
#ifdef ALLOW_USE_MPI |
34 |
C msgBuffer - IO buffer |
C msgBuffer :: IO buffer |
35 |
C myThid - Dummy thread id |
C myThid :: Dummy thread id |
36 |
C mpiRC - Error code reporting variable used |
C mpiRC :: Error code reporting variable used |
37 |
C with MPI. |
C with MPI. |
38 |
C mpiGridSpec - No. of processes in X and Y. |
C mpiGridSpec :: No. of processes in X and Y. |
39 |
C mpiPeriodicity - Flag indicating XY priodicity to MPI. |
C mpiPeriodicity :: Flag indicating XY priodicity to MPI. |
40 |
C arrElSize - Size of an array element in bytes used |
C arrElSize :: Size of an array element in bytes used |
41 |
C to define MPI datatypes for communication |
C to define MPI datatypes for communication |
42 |
C operations. |
C operations. |
43 |
C arrElSep - Separation in units of array elements between |
C arrElSep :: Separation in units of array elements between |
44 |
C blocks to be communicated. |
C blocks to be communicated. |
45 |
C elCount - No. of blocks that are associated with MPI |
C elCount :: No. of blocks that are associated with MPI |
46 |
C datatype. |
C datatype. |
47 |
C elLen - Length of an MPI datatype in terms of preexisting |
C elLen :: Length of an MPI datatype in terms of preexisting |
48 |
C datatype. |
C datatype. |
49 |
C elStride - Distance between starting location of elements |
C elStride :: Distance between starting location of elements |
50 |
C in an MPI datatype - can be bytes of datatype |
C in an MPI datatype - can be bytes of datatype |
51 |
C units. |
C units. |
52 |
CHARACTER*(MAX_LEN_MBUF) msgBuffer |
CHARACTER*(MAX_LEN_MBUF) msgBuffer |
53 |
INTEGER mpiRC |
INTEGER mpiRC |
54 |
INTEGER mpiGridSpec(2) |
INTEGER mpiGridSpec(2) |
60 |
INTEGER elCount |
INTEGER elCount |
61 |
INTEGER elLen |
INTEGER elLen |
62 |
INTEGER elStride |
INTEGER elStride |
63 |
|
|
64 |
|
C-- Variables needed for mpi gather scatter routines. |
65 |
|
COMMON /GlobalLo/ mpi_myXGlobalLo, mpi_myYGlobalLo |
66 |
|
INTEGER mpi_myXGlobalLo(nPx*nPy) |
67 |
|
INTEGER mpi_myYGlobalLo(nPx*nPy) |
68 |
|
INTEGER npe,itemp,ierr,istatus(MPI_STATUS_SIZE) |
69 |
|
INTEGER mpiBufSize,mpiRequest |
70 |
|
|
71 |
#endif /* ALLOW_USE_MPI */ |
#endif /* ALLOW_USE_MPI */ |
72 |
INTEGER myThid |
INTEGER myThid |
73 |
|
CEOP |
74 |
|
|
75 |
C-- Default values set to single processor case |
C-- Default values set to single processor case |
76 |
C pid[W-SE] are the MPI process id of the neighbor |
C pid[W-SE] are the MPI process id of the neighbor |
86 |
pidE = 1 |
pidE = 1 |
87 |
pidN = 1 |
pidN = 1 |
88 |
pidS = 1 |
pidS = 1 |
89 |
errorMessageUnit = 0 |
c errorMessageUnit = 0 |
90 |
standardMessageUnit = 6 |
c standardMessageUnit = 6 |
91 |
|
|
92 |
#ifdef ALLOW_USE_MPI |
#ifdef ALLOW_USE_MPI |
93 |
C-- |
C-- |
183 |
mpiYGlobalLo = 1 + sNy*nSy*(mpiPy) |
mpiYGlobalLo = 1 + sNy*nSy*(mpiPy) |
184 |
myXGlobalLo = mpiXGlobalLo |
myXGlobalLo = mpiXGlobalLo |
185 |
myYGlobalLo = mpiYGlobalLo |
myYGlobalLo = mpiYGlobalLo |
186 |
|
|
187 |
|
C-- To speed-up mpi gather and scatter routines, myXGlobalLo |
188 |
|
C and myYGlobalLo from each process are transferred to |
189 |
|
C a common block array. This allows process 0 to know |
190 |
|
C the location of the domains controlled by each process. |
191 |
|
mpiBufSize=1 |
192 |
|
mpiRequest=0 |
193 |
|
DO npe = 0, numberOfProcs-1 |
194 |
|
CALL MPI_ISEND (myXGlobalLo, mpiBufSize, MPI_INTEGER, |
195 |
|
& npe, mpiMyId, MPI_COMM_MODEL, mpiRequest, ierr) |
196 |
|
ENDDO |
197 |
|
DO npe = 0, numberOfProcs-1 |
198 |
|
CALL MPI_RECV (itemp, mpiBufSize, MPI_INTEGER, |
199 |
|
& npe, npe, MPI_COMM_MODEL, istatus, ierr) |
200 |
|
mpi_myXGlobalLo(npe+1) = itemp |
201 |
|
ENDDO |
202 |
|
DO npe = 0, numberOfProcs-1 |
203 |
|
CALL MPI_ISEND (myYGlobalLo, mpiBufSize, MPI_INTEGER, |
204 |
|
& npe, mpiMyId, MPI_COMM_MODEL, mpiRequest, ierr) |
205 |
|
ENDDO |
206 |
|
DO npe = 0, numberOfProcs-1 |
207 |
|
CALL MPI_RECV (itemp, mpiBufSize, MPI_INTEGER, |
208 |
|
& npe, npe, MPI_COMM_MODEL, istatus, ierr) |
209 |
|
mpi_myYGlobalLo(npe+1) = itemp |
210 |
|
ENDDO |
211 |
|
|
212 |
myPx = mpiPx+1 |
myPx = mpiPx+1 |
213 |
myPy = mpiPy+1 |
myPy = mpiPy+1 |
214 |
C-- Get MPI id for neighboring procs. |
C-- Get MPI id for neighboring procs. |
336 |
elCount = sNy+OLy*2 |
elCount = sNy+OLy*2 |
337 |
elLen = OLx |
elLen = OLx |
338 |
elStride = arrElSep |
elStride = arrElSep |
339 |
|
#ifdef TARGET_SGI |
340 |
|
CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_REAL, |
341 |
|
& mpiTypeXFaceBlock_xy_r4, mpiRC) |
342 |
|
#else |
343 |
CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_REAL4, |
CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_REAL4, |
344 |
& mpiTypeXFaceBlock_xy_r4, mpiRC) |
& mpiTypeXFaceBlock_xy_r4, mpiRC) |
345 |
|
#endif |
346 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
347 |
eeBootError = .TRUE. |
eeBootError = .TRUE. |
348 |
WRITE(msgBuffer,'(A,I5)') |
WRITE(msgBuffer,'(A,I5)') |
360 |
ENDIF |
ENDIF |
361 |
|
|
362 |
C xFace (y=constant) for XY arrays with real*8 declaration. |
C xFace (y=constant) for XY arrays with real*8 declaration. |
363 |
|
#ifdef TARGET_SGI |
364 |
|
CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_DOUBLE_PRECISION, |
365 |
|
& mpiTypeXFaceBlock_xy_r8, mpiRC) |
366 |
|
#else |
367 |
CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_REAL8, |
CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_REAL8, |
368 |
& mpiTypeXFaceBlock_xy_r8, mpiRC) |
& mpiTypeXFaceBlock_xy_r8, mpiRC) |
369 |
|
#endif |
370 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
371 |
eeBootError = .TRUE. |
eeBootError = .TRUE. |
372 |
WRITE(msgBuffer,'(A,I5)') |
WRITE(msgBuffer,'(A,I5)') |
434 |
C-- |
C-- |
435 |
C yFace (x=constant) for XY arrays with real*4 declaration |
C yFace (x=constant) for XY arrays with real*4 declaration |
436 |
elCount = OLy*(sNx+OLx*2) |
elCount = OLy*(sNx+OLx*2) |
437 |
|
#ifdef TARGET_SGI |
438 |
|
CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_REAL, |
439 |
|
& mpiTypeYFaceBlock_xy_r4, mpiRC) |
440 |
|
#else |
441 |
CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_REAL4, |
CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_REAL4, |
442 |
& mpiTypeYFaceBlock_xy_r4, mpiRC) |
& mpiTypeYFaceBlock_xy_r4, mpiRC) |
443 |
|
#endif |
444 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
445 |
eeBootError = .TRUE. |
eeBootError = .TRUE. |
446 |
WRITE(msgBuffer,'(A,I5)') |
WRITE(msgBuffer,'(A,I5)') |
457 |
CALL PRINT_ERROR( msgBuffer , myThid) |
CALL PRINT_ERROR( msgBuffer , myThid) |
458 |
ENDIF |
ENDIF |
459 |
C yFace (x=constant) for XY arrays with real*8 declaration |
C yFace (x=constant) for XY arrays with real*8 declaration |
460 |
|
#ifdef TARGET_SGI |
461 |
|
CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_DOUBLE_PRECISION, |
462 |
|
& mpiTypeYFaceBlock_xy_r8, mpiRC) |
463 |
|
#else |
464 |
CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_REAL8, |
CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_REAL8, |
465 |
& mpiTypeYFaceBlock_xy_r8, mpiRC) |
& mpiTypeYFaceBlock_xy_r8, mpiRC) |
466 |
|
#endif |
467 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
468 |
eeBootError = .TRUE. |
eeBootError = .TRUE. |
469 |
WRITE(msgBuffer,'(A,I5)') |
WRITE(msgBuffer,'(A,I5)') |