/[MITgcm]/MITgcm/eesupp/src/ini_procs.F
ViewVC logotype

Annotation of /MITgcm/eesupp/src/ini_procs.F

Parent Directory Parent Directory | Revision Log Revision Log | View Revision Graph Revision Graph


Revision 1.22 - (hide annotations) (download)
Fri Feb 18 19:43:27 2005 UTC (19 years, 4 months ago) by ce107
Branch: MAIN
CVS Tags: checkpoint57m_post, checkpoint57g_pre, checkpoint57s_post, checkpoint57g_post, checkpoint57r_post, checkpoint57i_post, checkpoint57n_post, checkpoint57l_post, checkpoint57t_post, checkpoint57v_post, checkpoint57f_post, checkpoint57h_pre, checkpoint57h_post, checkpoint57e_post, checkpoint57p_post, checkpint57u_post, checkpoint57q_post, eckpoint57e_pre, checkpoint57h_done, checkpoint57j_post, checkpoint57f_pre, checkpoint57o_post, checkpoint57k_post, checkpoint57w_post
Changes since 1.21: +6 -6 lines
Added TARGET_LAM case to the TARGET_SGI and TARGET_AIX ones for the older
LAM MPI implementations that did not know of the MPI_REAL4 & MPI_REAL8
datatypes.

1 ce107 1.22 C $Header: /u/gcmpack/MITgcm/eesupp/src/ini_procs.F,v 1.21 2005/01/13 00:10:38 ce107 Exp $
2 adcroft 1.13 C $Name: $
3 cnh 1.1 #include "CPP_EEOPTIONS.h"
4 cnh 1.14 CBOP
5    
6     C !ROUTINE: INI_PROCS
7    
8     C !INTERFACE:
9 cnh 1.1 SUBROUTINE INI_PROCS
10 adcroft 1.10 IMPLICIT NONE
11 cnh 1.1
12 cnh 1.14 C !DESCRIPTION:
13     C *==========================================================*
14 edhill 1.18 C | SUBROUTINE INI\_PROCS
15 cnh 1.14 C | o Initialise multiple concurrent processes environment.
16     C *==========================================================*
17     C | Under MPI this routine calls various MPI service routines
18     C | that map the model grid to MPI processes. The information
19     C | is then stored in a common block for later use.
20     C | Note: This routine can also be compiled with CPP
21     C | directives set so that no multi-processing is initialise.
22     C | This is OK and should work fine.
23     C *==========================================================*
24    
25     C !USES:
26 cnh 1.1 C === Global data ===
27     #include "SIZE.h"
28     #include "EEPARAMS.h"
29     #include "EESUPPORT.h"
30    
31 cnh 1.14 C !LOCAL VARIABLES:
32 cnh 1.1 C === Local variables ===
33     #ifdef ALLOW_USE_MPI
34 cnh 1.14 C msgBuffer :: IO buffer
35     C myThid :: Dummy thread id
36     C mpiRC :: Error code reporting variable used
37     C with MPI.
38     C mpiGridSpec :: No. of processes in X and Y.
39     C mpiPeriodicity :: Flag indicating XY priodicity to MPI.
40     C arrElSize :: Size of an array element in bytes used
41     C to define MPI datatypes for communication
42     C operations.
43     C arrElSep :: Separation in units of array elements between
44     C blocks to be communicated.
45     C elCount :: No. of blocks that are associated with MPI
46     C datatype.
47     C elLen :: Length of an MPI datatype in terms of preexisting
48     C datatype.
49     C elStride :: Distance between starting location of elements
50     C in an MPI datatype - can be bytes of datatype
51     C units.
52 cnh 1.1 CHARACTER*(MAX_LEN_MBUF) msgBuffer
53     INTEGER mpiRC
54     INTEGER mpiGridSpec(2)
55     INTEGER mpiPeriodicity(2)
56     INTEGER mpiLProcNam
57     CHARACTER*(MPI_MAX_PROCESSOR_NAME) mpiProcNam
58     INTEGER arrElSize
59     INTEGER arrElSep
60     INTEGER elCount
61     INTEGER elLen
62     INTEGER elStride
63 dimitri 1.20 INTEGER npe,itemp,ierr,istatus(MPI_STATUS_SIZE)
64     INTEGER mpiBufSize,mpiRequest
65     #endif /* ALLOW_USE_MPI */
66     INTEGER myThid
67 dimitri 1.15
68     C-- Variables needed for mpi gather scatter routines.
69     COMMON /GlobalLo/ mpi_myXGlobalLo, mpi_myYGlobalLo
70     INTEGER mpi_myXGlobalLo(nPx*nPy)
71     INTEGER mpi_myYGlobalLo(nPx*nPy)
72 cnh 1.14 CEOP
73 cnh 1.1
74     C-- Default values set to single processor case
75 cnh 1.7 C pid[W-SE] are the MPI process id of the neighbor
76 cnh 1.1 C processes. A process can be its own neighbor!
77 cnh 1.6 myThid = 1
78     myPid = 1
79     nProcs = 1
80     myPx = 1
81     myPy = 1
82     myXGlobalLo = 1
83     myYGlobalLo = 1
84     pidW = 1
85     pidE = 1
86     pidN = 1
87     pidS = 1
88 dimitri 1.20 mpi_myXGlobalLo(1)=1
89     mpi_myYGlobalLo(1)=1
90 adcroft 1.17 c errorMessageUnit = 0
91     c standardMessageUnit = 6
92 cnh 1.6
93 cnh 1.1 #ifdef ALLOW_USE_MPI
94     C--
95     C-- MPI style full multiple-process initialisation
96     C-- ==============================================
97     #ifndef ALWAYS_USE_MPI
98     IF ( usingMPI ) THEN
99     #endif
100    
101     C-- Arrange MPI processes on a cartesian grid
102     C Set variable indicating which MPI process is to the north,
103     C south, east, west, south-west, south-east, north-west
104     C and north-east of me e.g.
105     C
106     C Plan view of model domain centered on process ME
107     C ================================================
108     C
109     C : : : :
110     C : : : :
111     C : : : :
112     C .....------------------------------.....
113     C | | | |
114     C | NW | N | NE |
115     C | | | |
116     C .....------------------------------.....
117     C | | | |
118     C | W | ME | E |
119     C | | | |
120     C .....------------------------------.....
121     C | | | |
122     C | SW | S | SE |
123     C | | | |
124     C .....------------------------------.....
125     C : : : :
126     C Y : : : :
127     C / \ : : : :
128     C |
129     C |
130     C |----> X
131     C
132     C-- Set default MPI communicator to XY processor grid
133     myThid = 1
134     mpiGridSpec(1) = nPx
135     mpiGridSpec(2) = nPy
136     C Could be periodic in X and/or Y - set at run time or compile time!
137     mpiPeriodicity(1) = _mpiTRUE_
138     mpiPeriodicity(2) = _mpiTRUE_
139     #ifdef CAN_PREVENT_X_PERIODICITY
140     #ifndef ALWAYS_PREVENT_X_PERIODICITY
141     IF ( notUsingXPeriodicity ) THEN
142     #endif
143     mpiPeriodicity(1) = _mpiFALSE_
144     #ifndef ALWAYS_PREVENT_X_PERIODICITY
145     ENDIF
146     #endif
147     #endif /* CAN_PREVENT_X_PERIODICITY */
148     #ifdef CAN_PREVENT_Y_PERIODICITY
149     #ifndef ALWAYS_PREVENT_Y_PERIODICITY
150     IF ( notUsingYPeriodicity ) THEN
151     #endif
152     mpiPeriodicity(2) = _mpiFALSE_
153     #ifndef ALWAYS_PREVENT_Y_PERIODICITY
154     ENDIF
155     #endif
156     #endif /* CAN_PREVENT_Y_PERIODICITY */
157    
158     CALL MPI_CART_CREATE(
159 adcroft 1.8 I MPI_COMM_MODEL,2,mpiGridSpec,mpiPeriodicity,_mpiTRUE_,
160 cnh 1.1 O mpiComm, mpiRC )
161     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
162     eeBootError = .TRUE.
163 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
164 cnh 1.1 & 'S/R INI_PROCS: MPI_CART_CREATE return code',
165     & mpiRC
166     CALL PRINT_ERROR( msgBuffer , myThid)
167     GOTO 999
168     ENDIF
169    
170     C-- Get my location on the grid
171     CALL MPI_CART_COORDS( mpiComm, mpiMyId, 2, mpiGridSpec, mpiRC )
172     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
173     eeBootError = .TRUE.
174 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
175 cnh 1.1 & 'S/R INI_PROCS: MPI_CART_COORDS return code',
176     & mpiRC
177     CALL PRINT_ERROR( msgBuffer , myThid)
178     GOTO 999
179     ENDIF
180 cnh 1.6 myPid = mpiMyId
181 cnh 1.1 mpiPx = mpiGridSpec(1)
182     mpiPy = mpiGridSpec(2)
183     mpiXGlobalLo = 1 + sNx*nSx*(mpiPx)
184     mpiYGlobalLo = 1 + sNy*nSy*(mpiPy)
185     myXGlobalLo = mpiXGlobalLo
186     myYGlobalLo = mpiYGlobalLo
187 dimitri 1.15
188     C-- To speed-up mpi gather and scatter routines, myXGlobalLo
189     C and myYGlobalLo from each process are transferred to
190     C a common block array. This allows process 0 to know
191     C the location of the domains controlled by each process.
192 dimitri 1.19 mpiBufSize=1
193     mpiRequest=0
194 dimitri 1.15 DO npe = 0, numberOfProcs-1
195 adcroft 1.16 CALL MPI_ISEND (myXGlobalLo, mpiBufSize, MPI_INTEGER,
196     & npe, mpiMyId, MPI_COMM_MODEL, mpiRequest, ierr)
197 dimitri 1.15 ENDDO
198     DO npe = 0, numberOfProcs-1
199 adcroft 1.16 CALL MPI_RECV (itemp, mpiBufSize, MPI_INTEGER,
200 dimitri 1.15 & npe, npe, MPI_COMM_MODEL, istatus, ierr)
201     mpi_myXGlobalLo(npe+1) = itemp
202     ENDDO
203     DO npe = 0, numberOfProcs-1
204 adcroft 1.16 CALL MPI_ISEND (myYGlobalLo, mpiBufSize, MPI_INTEGER,
205     & npe, mpiMyId, MPI_COMM_MODEL, mpiRequest, ierr)
206 dimitri 1.15 ENDDO
207     DO npe = 0, numberOfProcs-1
208 adcroft 1.16 CALL MPI_RECV (itemp, mpiBufSize, MPI_INTEGER,
209 dimitri 1.15 & npe, npe, MPI_COMM_MODEL, istatus, ierr)
210     mpi_myYGlobalLo(npe+1) = itemp
211     ENDDO
212    
213 cnh 1.6 myPx = mpiPx+1
214     myPy = mpiPy+1
215 cnh 1.1 C-- Get MPI id for neighboring procs.
216     mpiGridSpec(1) = mpiPx-1
217 adcroft 1.9 IF ( mpiPeriodicity(1) .EQ. _mpiTRUE_
218     & .AND. mpiGridSpec(1) .LT. 0 )
219 cnh 1.1 & mpiGridSpec(1) = nPx-1
220     mpiGridSpec(2) = mpiPy
221     CALL MPI_CART_RANK( mpiComm, mpiGridSpec, mpiPidW , mpiRC )
222     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
223     eeBootError = .TRUE.
224 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
225 cnh 1.1 & 'S/R INI_PROCS: MPI_CART_RANK (pidW) return code',
226     & mpiRC
227     CALL PRINT_ERROR( msgBuffer , myThid)
228     GOTO 999
229     ENDIF
230     pidW = mpiPidW
231     mpiGridSpec(1) = mpiPx+1
232 adcroft 1.9 IF ( mpiPeriodicity(1) .EQ. _mpiTRUE_
233     & .AND. mpiGridSpec(1) .GT. nPx-1 )
234 cnh 1.1 & mpiGridSpec(1) = 0
235     mpiGridSpec(2) = mpiPy
236     CALL MPI_CART_RANK( mpiComm, mpiGridSpec, mpiPidE , mpiRC )
237     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
238     eeBootError = .TRUE.
239 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
240 cnh 1.1 & 'S/R INI_PROCS: MPI_CART_RANK (pidE) return code',
241     & mpiRC
242     CALL PRINT_ERROR( msgBuffer , myThid)
243     GOTO 999
244     ENDIF
245     pidE = mpiPidE
246     mpiGridSpec(1) = mpiPx
247     mpiGridSpec(2) = mpiPy-1
248 adcroft 1.9 IF ( mpiPeriodicity(2) .EQ. _mpiTRUE_
249     & .AND. mpiGridSpec(2) .LT. 0 )
250 cnh 1.1 & mpiGridSpec(2) = nPy - 1
251     CALL MPI_CART_RANK( mpiComm, mpiGridSpec, mpiPidS , mpiRC )
252     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
253     eeBootError = .TRUE.
254 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
255 cnh 1.1 & 'S/R INI_PROCS: MPI_CART_RANK (pidS) return code',
256     & mpiRC
257     CALL PRINT_ERROR( msgBuffer , myThid)
258     GOTO 999
259     ENDIF
260     pidS = mpiPidS
261     mpiGridSpec(1) = mpiPx
262     mpiGridSpec(2) = mpiPy+1
263 adcroft 1.9 IF ( mpiPeriodicity(2) .EQ. _mpiTRUE_
264     & .AND. mpiGridSpec(2) .GT. nPy-1 )
265 cnh 1.1 & mpiGridSpec(2) = 0
266     CALL MPI_CART_RANK( mpiComm, mpiGridSpec, mpiPidN , mpiRC )
267     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
268     eeBootError = .TRUE.
269 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
270 cnh 1.1 & 'S/R INI_PROCS: MPI_CART_RANK (pidN) return code',
271     & mpiRC
272     CALL PRINT_ERROR( msgBuffer , myThid)
273     GOTO 999
274     ENDIF
275     pidN = mpiPidN
276    
277     C-- Print summary of processor mapping on standard output
278     CALL MPI_GET_PROCESSOR_NAME( mpiProcNam, mpilProcNam, mpiRC )
279     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
280     eeBootError = .TRUE.
281 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
282 cnh 1.1 & 'S/R INI_PROCS: MPI_GET_PROCESSOR_NAME return code',
283     & mpiRC
284     CALL PRINT_ERROR( msgBuffer , myThid)
285     GOTO 999
286     ENDIF
287 adcroft 1.9 WRITE(msgBuffer,'(A)')
288     & '======= Starting MPI parallel Run ========='
289 cnh 1.1 CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
290     & SQUEEZE_BOTH , myThid)
291     WRITE(msgBuffer,'(A,A64)') ' My Processor Name = ',
292     & mpiProcNam(1:mpilProcNam)
293     CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
294     & SQUEEZE_RIGHT , myThid)
295     WRITE(msgBuffer,'(A,I3,A,I3,A,I3,A,I3,A)') ' Located at (',
296     & mpiPx,',',mpiPy,
297     & ') on processor grid (0:',nPx-1,',0:',nPy-1,')'
298     CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
299     & SQUEEZE_RIGHT , myThid)
300     WRITE(msgBuffer,'(A,I4,A,I4,A,I4,A,I4,A)') ' Origin at (',
301     & mpiXGlobalLo,',',mpiYGLobalLo,
302     & ') on global grid (1:',nPx*sNx*nSx,',1:',nPy*sNy*nSy,')'
303     CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
304     & SQUEEZE_RIGHT , myThid)
305 adcroft 1.9 WRITE(msgBuffer,'(A,I4.4)')
306     & ' North neighbor = processor ', mpiPidN
307 cnh 1.1 CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
308     & SQUEEZE_RIGHT , myThid)
309 adcroft 1.9 WRITE(msgBuffer,'(A,I4.4)')
310     & ' South neighbor = processor ', mpiPidS
311 cnh 1.1 CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
312     & SQUEEZE_RIGHT , myThid)
313 adcroft 1.9 WRITE(msgBuffer,'(A,I4.4)')
314     & ' East neighbor = processor ', mpiPidE
315 cnh 1.1 CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
316     & SQUEEZE_RIGHT , myThid)
317 adcroft 1.9 WRITE(msgBuffer,'(A,I4.4)')
318     & ' West neighbor = processor ', mpiPidW
319 cnh 1.1 CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
320     & SQUEEZE_RIGHT , myThid)
321     C
322     C-- Create MPI types for transfer of array edges.
323     C-- Four and eight byte primitive (one block only) datatypes.
324     C-- These are common to all threads in the process.
325     C Notes:
326     C ======
327     C 1. The datatypes MPI_REAL4 and MPI_REAL8 are usually predefined.
328     C If they are not defined code must be added to create them -
329     C the MPI standard leaves optional whether they exist.
330     C 2. Per thread datatypes that handle all the edges for a thread
331     C are defined based on the type defined here.
332     C--
333     C-- xFace datatypes (east<-->west messages)
334     C--
335     C xFace (y=constant) for XY arrays with real*4 declaration.
336     arrElSep = (sNx+OLx*2)
337     elCount = sNy+OLy*2
338     elLen = OLx
339     elStride = arrElSep
340 ce107 1.22 #if (defined (TARGET_SGI) || defined (TARGET_AIX) || defined(TARGET_LAM))
341 adcroft 1.13 CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_REAL,
342     & mpiTypeXFaceBlock_xy_r4, mpiRC)
343     #else
344 cnh 1.1 CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_REAL4,
345     & mpiTypeXFaceBlock_xy_r4, mpiRC)
346 adcroft 1.13 #endif
347 cnh 1.1 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
348     eeBootError = .TRUE.
349 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
350 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_VECTOR (mpiTypeXFaceBlock_xy_r4)',
351 cnh 1.1 & mpiRC
352     CALL PRINT_ERROR( msgBuffer , myThid)
353     ENDIF
354     CALL MPI_TYPE_COMMIT( mpiTypeXFaceBlock_xy_r4, mpiRC)
355     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
356     eeBootError = .TRUE.
357 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
358 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeXFaceBlock_xy_r4)',
359 cnh 1.1 & mpiRC
360     CALL PRINT_ERROR( msgBuffer , myThid)
361     ENDIF
362    
363     C xFace (y=constant) for XY arrays with real*8 declaration.
364 ce107 1.22 #if (defined (TARGET_SGI) || defined (TARGET_AIX) || defined(TARGET_LAM))
365 adcroft 1.13 CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_DOUBLE_PRECISION,
366     & mpiTypeXFaceBlock_xy_r8, mpiRC)
367     #else
368 cnh 1.1 CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_REAL8,
369     & mpiTypeXFaceBlock_xy_r8, mpiRC)
370 adcroft 1.13 #endif
371 cnh 1.1 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
372     eeBootError = .TRUE.
373 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
374 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_VECTOR (mpiTypeXFaceBlock_xy_r8)',
375 cnh 1.1 & mpiRC
376     CALL PRINT_ERROR( msgBuffer , myThid)
377     ENDIF
378     CALL MPI_TYPE_COMMIT( mpiTypeXFaceBlock_xy_r8, mpiRC)
379     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
380     eeBootError = .TRUE.
381 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
382 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeXFaceBlock_xy_r8)',
383 cnh 1.1 & mpiRC
384     CALL PRINT_ERROR( msgBuffer , myThid)
385     ENDIF
386    
387     C xFace (y=constant) for XYZ arrays with real*4 declaration.
388     arrElSize = 4
389     arrElSep = (sNx+OLx*2)*(sNy+OLy*2)
390 cnh 1.5 elCount = Nr
391 cnh 1.1 elLen = 1
392     elStride = arrElSize*arrElSep
393     CALL MPI_TYPE_HVECTOR(elCount,elLen,elStride,
394     & mpiTypeXFaceBlock_xy_r4,
395     & mpiTypeXFaceBlock_xyz_r4, mpiRC)
396     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
397     eeBootError = .TRUE.
398 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
399 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_HVECTOR (mpiTypeXFaceBlock_xyz_r4)',
400 cnh 1.1 & mpiRC
401     CALL PRINT_ERROR( msgBuffer , myThid)
402     ENDIF
403     CALL MPI_TYPE_COMMIT( mpiTypeXFaceBlock_xyz_r4, mpiRC)
404     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
405     eeBootError = .TRUE.
406 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
407 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeXFaceBlock_xyz_r4)',
408 cnh 1.1 & mpiRC
409     CALL PRINT_ERROR( msgBuffer , myThid)
410     ENDIF
411    
412     C xFace (y=constant) for XYZ arrays with real*8 declaration.
413     arrElSize = 8
414     elStride = arrElSize*arrElSep
415     CALL MPI_TYPE_HVECTOR(elCount,elLen,elStride,
416     & mpiTypeXFaceBlock_xy_r8,
417     & mpiTypeXFaceBlock_xyz_r8, mpiRC)
418     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
419     eeBootError = .TRUE.
420 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
421 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_HVECTOR (mpiTypeXFaceBlock_xyz_r8)',
422 cnh 1.1 & mpiRC
423     CALL PRINT_ERROR( msgBuffer , myThid)
424     ENDIF
425     CALL MPI_TYPE_COMMIT( mpiTypeXFaceBlock_xyz_r8, mpiRC)
426     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
427     eeBootError = .TRUE.
428 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
429 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeXFaceBlock_xyz_r8)',
430 cnh 1.1 & mpiRC
431     CALL PRINT_ERROR( msgBuffer , myThid)
432     ENDIF
433     C--
434     C-- yFace datatypes (north<-->south messages)
435     C--
436     C yFace (x=constant) for XY arrays with real*4 declaration
437     elCount = OLy*(sNx+OLx*2)
438 ce107 1.22 #if (defined (TARGET_SGI) || defined (TARGET_AIX) || defined(TARGET_LAM))
439 adcroft 1.13 CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_REAL,
440     & mpiTypeYFaceBlock_xy_r4, mpiRC)
441     #else
442 cnh 1.1 CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_REAL4,
443     & mpiTypeYFaceBlock_xy_r4, mpiRC)
444 adcroft 1.13 #endif
445 cnh 1.1 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
446     eeBootError = .TRUE.
447 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
448 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_CONTIGUOUS (mpiTypeYFaceBlock_xy_r4)',
449 cnh 1.1 & mpiRC
450     CALL PRINT_ERROR( msgBuffer , myThid)
451     ENDIF
452     CALL MPI_TYPE_COMMIT( mpiTypeYFaceBlock_xy_r4, mpiRC)
453     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
454     eeBootError = .TRUE.
455 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
456 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeYFaceBlock_xy_r4)',
457 cnh 1.1 & mpiRC
458     CALL PRINT_ERROR( msgBuffer , myThid)
459     ENDIF
460     C yFace (x=constant) for XY arrays with real*8 declaration
461 ce107 1.22 #if (defined (TARGET_SGI) || defined (TARGET_AIX) || defined(TARGET_LAM))
462 adcroft 1.13 CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_DOUBLE_PRECISION,
463     & mpiTypeYFaceBlock_xy_r8, mpiRC)
464     #else
465 cnh 1.1 CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_REAL8,
466     & mpiTypeYFaceBlock_xy_r8, mpiRC)
467 adcroft 1.13 #endif
468 cnh 1.1 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
469     eeBootError = .TRUE.
470 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
471 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_CONTIGUOUS (mpiTypeYFaceBlock_xy_r8)',
472 cnh 1.1 & mpiRC
473     CALL PRINT_ERROR( msgBuffer , myThid)
474     ENDIF
475     CALL MPI_TYPE_COMMIT( mpiTypeYFaceBlock_xy_r8, mpiRC)
476     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
477     eeBootError = .TRUE.
478 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
479 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeYFaceBlock_xy_r8)',
480 cnh 1.1 & mpiRC
481     CALL PRINT_ERROR( msgBuffer , myThid)
482     ENDIF
483     C yFace (x=constant) for XYZ arrays with real*4 declaration
484     arrElSize = 4
485     arrElSep = (sNx+OLx*2)*(sNy+OLy*2)
486 cnh 1.5 elCount = Nr
487 cnh 1.1 elLen = 1
488     elStride = arrElSize*arrElSep
489     CALL MPI_TYPE_HVECTOR(elCount,elLen,elStride,
490     & mpiTypeYFaceBlock_xy_r4,
491     & mpiTypeYFaceBlock_xyz_r4, mpiRC)
492     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
493     eeBootError = .TRUE.
494 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
495 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_HVECTOR (mpiTypeYFaceBlock_xyz_r4)',
496 cnh 1.1 & mpiRC
497     CALL PRINT_ERROR( msgBuffer , myThid)
498     ENDIF
499     CALL MPI_TYPE_COMMIT( mpiTypeYFaceBlock_xyz_r4, mpiRC)
500     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
501     eeBootError = .TRUE.
502 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
503 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeYFaceBlock_xyz_r4)',
504 cnh 1.1 & mpiRC
505     CALL PRINT_ERROR( msgBuffer , myThid)
506     ENDIF
507     C yFace (x=constant) for XYZ arrays with real*8 declaration
508     arrElSize = 8
509     elStride = arrElSize*arrElSep
510     CALL MPI_TYPE_HVECTOR(elCount,elLen,elStride,
511     & mpiTypeYFaceBlock_xy_r8,
512     & mpiTypeYFaceBlock_xyz_r8, mpiRC)
513     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
514     eeBootError = .TRUE.
515 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
516 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_HVECTOR (mpiTypeYFaceBlock_xyz_r8)',
517 cnh 1.1 & mpiRC
518     CALL PRINT_ERROR( msgBuffer , myThid)
519     ENDIF
520     CALL MPI_TYPE_COMMIT( mpiTypeYFaceBlock_xyz_r8, mpiRC)
521     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
522     eeBootError = .TRUE.
523 adcroft 1.11 WRITE(msgBuffer,'(A,I5)')
524 adcroft 1.9 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeYFaceBlock_xyz_r8)',
525 cnh 1.1 & mpiRC
526     CALL PRINT_ERROR( msgBuffer , myThid)
527     ENDIF
528    
529     C-- Assign MPI values used in generating unique tags for messages.
530     mpiTagW = 1
531     mpiTagE = 2
532     mpiTagS = 3
533     mpiTagN = 4
534    
535     C
536 adcroft 1.8 CALL MPI_Barrier(MPI_COMM_MODEL,mpiRC)
537 cnh 1.1
538    
539     C
540     #ifndef ALWAYS_USE_MPI
541     ENDIF
542     #endif
543     #endif /* ALLOW_USE_MPI */
544    
545     999 CONTINUE
546    
547     RETURN
548     END
549 cnh 1.6
550 ce107 1.22 C $Id: ini_procs.F,v 1.21 2005/01/13 00:10:38 ce107 Exp $

  ViewVC Help
Powered by ViewVC 1.1.22