/[MITgcm]/MITgcm/eesupp/src/ini_procs.F
ViewVC logotype

Contents of /MITgcm/eesupp/src/ini_procs.F

Parent Directory Parent Directory | Revision Log Revision Log | View Revision Graph Revision Graph


Revision 1.16 - (show annotations) (download)
Mon May 12 16:32:27 2003 UTC (20 years, 10 months ago) by adcroft
Branch: MAIN
CVS Tags: checkpoint51k_post, checkpoint50c_post, checkpoint52d_pre, checkpoint51o_pre, checkpoint51l_post, checkpoint51, checkpoint52, checkpoint50d_post, checkpoint51f_post, checkpoint51d_post, checkpoint51t_post, checkpoint51n_post, checkpoint51s_post, checkpoint51j_post, checkpoint51n_pre, checkpoint52b_pre, checkpoint51l_pre, checkpoint51q_post, checkpoint51b_pre, checkpoint52b_post, checkpoint52c_post, checkpoint51h_pre, checkpoint50f_post, checkpoint50f_pre, branchpoint-genmake2, checkpoint51r_post, checkpoint51i_post, checkpoint51b_post, checkpoint51c_post, checkpoint52d_post, checkpoint50g_post, checkpoint52a_pre, checkpoint50h_post, checkpoint50e_pre, checkpoint50i_post, checkpoint51i_pre, checkpoint50e_post, branch-netcdf, checkpoint50d_pre, checkpoint51e_post, checkpoint51o_post, checkpoint51f_pre, checkpoint52a_post, checkpoint51g_post, ecco_c52_e35, checkpoint51m_post, checkpoint51a_post, checkpoint51p_post, checkpoint51u_post
Branch point for: branch-genmake2, branch-nonh, tg2-branch, netcdf-sm0, checkpoint51n_branch
Changes since 1.15: +15 -8 lines
Fixed global gather/scatter operation added by Dimitri:
 o replaced (potentially) blocking sends with explicitly non-blocking sends
 o replaced constant arguments with variables
 o missing arguments
 X Note: we don't check the return status and should...

1 C $Header: /u/gcmpack/MITgcm/eesupp/src/ini_procs.F,v 1.15 2003/02/18 05:33:53 dimitri Exp $
2 C $Name: $
3 #include "CPP_EEOPTIONS.h"
4 CBOP
5
6 C !ROUTINE: INI_PROCS
7
8 C !INTERFACE:
9 SUBROUTINE INI_PROCS
10 IMPLICIT NONE
11
12 C !DESCRIPTION:
13 C *==========================================================*
14 C | SUBROUTINE INI_PROCS
15 C | o Initialise multiple concurrent processes environment.
16 C *==========================================================*
17 C | Under MPI this routine calls various MPI service routines
18 C | that map the model grid to MPI processes. The information
19 C | is then stored in a common block for later use.
20 C | Note: This routine can also be compiled with CPP
21 C | directives set so that no multi-processing is initialise.
22 C | This is OK and should work fine.
23 C *==========================================================*
24
25 C !USES:
26 C === Global data ===
27 #include "SIZE.h"
28 #include "EEPARAMS.h"
29 #include "EESUPPORT.h"
30
31 C !LOCAL VARIABLES:
32 C === Local variables ===
33 #ifdef ALLOW_USE_MPI
34 C msgBuffer :: IO buffer
35 C myThid :: Dummy thread id
36 C mpiRC :: Error code reporting variable used
37 C with MPI.
38 C mpiGridSpec :: No. of processes in X and Y.
39 C mpiPeriodicity :: Flag indicating XY priodicity to MPI.
40 C arrElSize :: Size of an array element in bytes used
41 C to define MPI datatypes for communication
42 C operations.
43 C arrElSep :: Separation in units of array elements between
44 C blocks to be communicated.
45 C elCount :: No. of blocks that are associated with MPI
46 C datatype.
47 C elLen :: Length of an MPI datatype in terms of preexisting
48 C datatype.
49 C elStride :: Distance between starting location of elements
50 C in an MPI datatype - can be bytes of datatype
51 C units.
52 CHARACTER*(MAX_LEN_MBUF) msgBuffer
53 INTEGER mpiRC
54 INTEGER mpiGridSpec(2)
55 INTEGER mpiPeriodicity(2)
56 INTEGER mpiLProcNam
57 CHARACTER*(MPI_MAX_PROCESSOR_NAME) mpiProcNam
58 INTEGER arrElSize
59 INTEGER arrElSep
60 INTEGER elCount
61 INTEGER elLen
62 INTEGER elStride
63
64 C-- Variables needed for mpi gather scatter routines.
65 COMMON /GlobalLo/ mpi_myXGlobalLo, mpi_myYGlobalLo
66 INTEGER mpi_myXGlobalLo(nPx*nPy)
67 INTEGER mpi_myYGlobalLo(nPx*nPy)
68 INTEGER npe,itemp,ierr,istatus(MPI_STATUS_SIZE)
69 INTEGER mpiBufSize,mpiRequest
70
71 #endif /* ALLOW_USE_MPI */
72 INTEGER myThid
73 CEOP
74
75 C-- Default values set to single processor case
76 C pid[W-SE] are the MPI process id of the neighbor
77 C processes. A process can be its own neighbor!
78 myThid = 1
79 myPid = 1
80 nProcs = 1
81 myPx = 1
82 myPy = 1
83 myXGlobalLo = 1
84 myYGlobalLo = 1
85 pidW = 1
86 pidE = 1
87 pidN = 1
88 pidS = 1
89 errorMessageUnit = 0
90 standardMessageUnit = 6
91
92 #ifdef ALLOW_USE_MPI
93 C--
94 C-- MPI style full multiple-process initialisation
95 C-- ==============================================
96 #ifndef ALWAYS_USE_MPI
97 IF ( usingMPI ) THEN
98 #endif
99
100 C-- Arrange MPI processes on a cartesian grid
101 C Set variable indicating which MPI process is to the north,
102 C south, east, west, south-west, south-east, north-west
103 C and north-east of me e.g.
104 C
105 C Plan view of model domain centered on process ME
106 C ================================================
107 C
108 C : : : :
109 C : : : :
110 C : : : :
111 C .....------------------------------.....
112 C | | | |
113 C | NW | N | NE |
114 C | | | |
115 C .....------------------------------.....
116 C | | | |
117 C | W | ME | E |
118 C | | | |
119 C .....------------------------------.....
120 C | | | |
121 C | SW | S | SE |
122 C | | | |
123 C .....------------------------------.....
124 C : : : :
125 C Y : : : :
126 C / \ : : : :
127 C |
128 C |
129 C |----> X
130 C
131 C-- Set default MPI communicator to XY processor grid
132 myThid = 1
133 mpiGridSpec(1) = nPx
134 mpiGridSpec(2) = nPy
135 C Could be periodic in X and/or Y - set at run time or compile time!
136 mpiPeriodicity(1) = _mpiTRUE_
137 mpiPeriodicity(2) = _mpiTRUE_
138 #ifdef CAN_PREVENT_X_PERIODICITY
139 #ifndef ALWAYS_PREVENT_X_PERIODICITY
140 IF ( notUsingXPeriodicity ) THEN
141 #endif
142 mpiPeriodicity(1) = _mpiFALSE_
143 #ifndef ALWAYS_PREVENT_X_PERIODICITY
144 ENDIF
145 #endif
146 #endif /* CAN_PREVENT_X_PERIODICITY */
147 #ifdef CAN_PREVENT_Y_PERIODICITY
148 #ifndef ALWAYS_PREVENT_Y_PERIODICITY
149 IF ( notUsingYPeriodicity ) THEN
150 #endif
151 mpiPeriodicity(2) = _mpiFALSE_
152 #ifndef ALWAYS_PREVENT_Y_PERIODICITY
153 ENDIF
154 #endif
155 #endif /* CAN_PREVENT_Y_PERIODICITY */
156
157 CALL MPI_CART_CREATE(
158 I MPI_COMM_MODEL,2,mpiGridSpec,mpiPeriodicity,_mpiTRUE_,
159 O mpiComm, mpiRC )
160 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
161 eeBootError = .TRUE.
162 WRITE(msgBuffer,'(A,I5)')
163 & 'S/R INI_PROCS: MPI_CART_CREATE return code',
164 & mpiRC
165 CALL PRINT_ERROR( msgBuffer , myThid)
166 GOTO 999
167 ENDIF
168
169 C-- Get my location on the grid
170 CALL MPI_CART_COORDS( mpiComm, mpiMyId, 2, mpiGridSpec, mpiRC )
171 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
172 eeBootError = .TRUE.
173 WRITE(msgBuffer,'(A,I5)')
174 & 'S/R INI_PROCS: MPI_CART_COORDS return code',
175 & mpiRC
176 CALL PRINT_ERROR( msgBuffer , myThid)
177 GOTO 999
178 ENDIF
179 myPid = mpiMyId
180 mpiPx = mpiGridSpec(1)
181 mpiPy = mpiGridSpec(2)
182 mpiXGlobalLo = 1 + sNx*nSx*(mpiPx)
183 mpiYGlobalLo = 1 + sNy*nSy*(mpiPy)
184 myXGlobalLo = mpiXGlobalLo
185 myYGlobalLo = mpiYGlobalLo
186
187 C-- To speed-up mpi gather and scatter routines, myXGlobalLo
188 C and myYGlobalLo from each process are transferred to
189 C a common block array. This allows process 0 to know
190 C the location of the domains controlled by each process.
191 DO npe = 0, numberOfProcs-1
192 mpiBufSize=1
193 mpiRequest=0
194 CALL MPI_ISEND (myXGlobalLo, mpiBufSize, MPI_INTEGER,
195 & npe, mpiMyId, MPI_COMM_MODEL, mpiRequest, ierr)
196 ENDDO
197 DO npe = 0, numberOfProcs-1
198 mpiBufSize=1
199 CALL MPI_RECV (itemp, mpiBufSize, MPI_INTEGER,
200 & npe, npe, MPI_COMM_MODEL, istatus, ierr)
201 mpi_myXGlobalLo(npe+1) = itemp
202 ENDDO
203 DO npe = 0, numberOfProcs-1
204 mpiBufSize=1
205 mpiRequest=0
206 CALL MPI_ISEND (myYGlobalLo, mpiBufSize, MPI_INTEGER,
207 & npe, mpiMyId, MPI_COMM_MODEL, mpiRequest, ierr)
208 ENDDO
209 DO npe = 0, numberOfProcs-1
210 mpiBufSize=1
211 CALL MPI_RECV (itemp, mpiBufSize, MPI_INTEGER,
212 & npe, npe, MPI_COMM_MODEL, istatus, ierr)
213 mpi_myYGlobalLo(npe+1) = itemp
214 ENDDO
215
216 myPx = mpiPx+1
217 myPy = mpiPy+1
218 C-- Get MPI id for neighboring procs.
219 mpiGridSpec(1) = mpiPx-1
220 IF ( mpiPeriodicity(1) .EQ. _mpiTRUE_
221 & .AND. mpiGridSpec(1) .LT. 0 )
222 & mpiGridSpec(1) = nPx-1
223 mpiGridSpec(2) = mpiPy
224 CALL MPI_CART_RANK( mpiComm, mpiGridSpec, mpiPidW , mpiRC )
225 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
226 eeBootError = .TRUE.
227 WRITE(msgBuffer,'(A,I5)')
228 & 'S/R INI_PROCS: MPI_CART_RANK (pidW) return code',
229 & mpiRC
230 CALL PRINT_ERROR( msgBuffer , myThid)
231 GOTO 999
232 ENDIF
233 pidW = mpiPidW
234 mpiGridSpec(1) = mpiPx+1
235 IF ( mpiPeriodicity(1) .EQ. _mpiTRUE_
236 & .AND. mpiGridSpec(1) .GT. nPx-1 )
237 & mpiGridSpec(1) = 0
238 mpiGridSpec(2) = mpiPy
239 CALL MPI_CART_RANK( mpiComm, mpiGridSpec, mpiPidE , mpiRC )
240 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
241 eeBootError = .TRUE.
242 WRITE(msgBuffer,'(A,I5)')
243 & 'S/R INI_PROCS: MPI_CART_RANK (pidE) return code',
244 & mpiRC
245 CALL PRINT_ERROR( msgBuffer , myThid)
246 GOTO 999
247 ENDIF
248 pidE = mpiPidE
249 mpiGridSpec(1) = mpiPx
250 mpiGridSpec(2) = mpiPy-1
251 IF ( mpiPeriodicity(2) .EQ. _mpiTRUE_
252 & .AND. mpiGridSpec(2) .LT. 0 )
253 & mpiGridSpec(2) = nPy - 1
254 CALL MPI_CART_RANK( mpiComm, mpiGridSpec, mpiPidS , mpiRC )
255 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
256 eeBootError = .TRUE.
257 WRITE(msgBuffer,'(A,I5)')
258 & 'S/R INI_PROCS: MPI_CART_RANK (pidS) return code',
259 & mpiRC
260 CALL PRINT_ERROR( msgBuffer , myThid)
261 GOTO 999
262 ENDIF
263 pidS = mpiPidS
264 mpiGridSpec(1) = mpiPx
265 mpiGridSpec(2) = mpiPy+1
266 IF ( mpiPeriodicity(2) .EQ. _mpiTRUE_
267 & .AND. mpiGridSpec(2) .GT. nPy-1 )
268 & mpiGridSpec(2) = 0
269 CALL MPI_CART_RANK( mpiComm, mpiGridSpec, mpiPidN , mpiRC )
270 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
271 eeBootError = .TRUE.
272 WRITE(msgBuffer,'(A,I5)')
273 & 'S/R INI_PROCS: MPI_CART_RANK (pidN) return code',
274 & mpiRC
275 CALL PRINT_ERROR( msgBuffer , myThid)
276 GOTO 999
277 ENDIF
278 pidN = mpiPidN
279
280 C-- Print summary of processor mapping on standard output
281 CALL MPI_GET_PROCESSOR_NAME( mpiProcNam, mpilProcNam, mpiRC )
282 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
283 eeBootError = .TRUE.
284 WRITE(msgBuffer,'(A,I5)')
285 & 'S/R INI_PROCS: MPI_GET_PROCESSOR_NAME return code',
286 & mpiRC
287 CALL PRINT_ERROR( msgBuffer , myThid)
288 GOTO 999
289 ENDIF
290 WRITE(msgBuffer,'(A)')
291 & '======= Starting MPI parallel Run ========='
292 CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
293 & SQUEEZE_BOTH , myThid)
294 WRITE(msgBuffer,'(A,A64)') ' My Processor Name = ',
295 & mpiProcNam(1:mpilProcNam)
296 CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
297 & SQUEEZE_RIGHT , myThid)
298 WRITE(msgBuffer,'(A,I3,A,I3,A,I3,A,I3,A)') ' Located at (',
299 & mpiPx,',',mpiPy,
300 & ') on processor grid (0:',nPx-1,',0:',nPy-1,')'
301 CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
302 & SQUEEZE_RIGHT , myThid)
303 WRITE(msgBuffer,'(A,I4,A,I4,A,I4,A,I4,A)') ' Origin at (',
304 & mpiXGlobalLo,',',mpiYGLobalLo,
305 & ') on global grid (1:',nPx*sNx*nSx,',1:',nPy*sNy*nSy,')'
306 CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
307 & SQUEEZE_RIGHT , myThid)
308 WRITE(msgBuffer,'(A,I4.4)')
309 & ' North neighbor = processor ', mpiPidN
310 CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
311 & SQUEEZE_RIGHT , myThid)
312 WRITE(msgBuffer,'(A,I4.4)')
313 & ' South neighbor = processor ', mpiPidS
314 CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
315 & SQUEEZE_RIGHT , myThid)
316 WRITE(msgBuffer,'(A,I4.4)')
317 & ' East neighbor = processor ', mpiPidE
318 CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
319 & SQUEEZE_RIGHT , myThid)
320 WRITE(msgBuffer,'(A,I4.4)')
321 & ' West neighbor = processor ', mpiPidW
322 CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit,
323 & SQUEEZE_RIGHT , myThid)
324 C
325 C-- Create MPI types for transfer of array edges.
326 C-- Four and eight byte primitive (one block only) datatypes.
327 C-- These are common to all threads in the process.
328 C Notes:
329 C ======
330 C 1. The datatypes MPI_REAL4 and MPI_REAL8 are usually predefined.
331 C If they are not defined code must be added to create them -
332 C the MPI standard leaves optional whether they exist.
333 C 2. Per thread datatypes that handle all the edges for a thread
334 C are defined based on the type defined here.
335 C--
336 C-- xFace datatypes (east<-->west messages)
337 C--
338 C xFace (y=constant) for XY arrays with real*4 declaration.
339 arrElSep = (sNx+OLx*2)
340 elCount = sNy+OLy*2
341 elLen = OLx
342 elStride = arrElSep
343 #ifdef TARGET_SGI
344 CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_REAL,
345 & mpiTypeXFaceBlock_xy_r4, mpiRC)
346 #else
347 CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_REAL4,
348 & mpiTypeXFaceBlock_xy_r4, mpiRC)
349 #endif
350 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
351 eeBootError = .TRUE.
352 WRITE(msgBuffer,'(A,I5)')
353 & 'S/R INI_PROCS: MPI_TYPE_VECTOR (mpiTypeXFaceBlock_xy_r4)',
354 & mpiRC
355 CALL PRINT_ERROR( msgBuffer , myThid)
356 ENDIF
357 CALL MPI_TYPE_COMMIT( mpiTypeXFaceBlock_xy_r4, mpiRC)
358 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
359 eeBootError = .TRUE.
360 WRITE(msgBuffer,'(A,I5)')
361 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeXFaceBlock_xy_r4)',
362 & mpiRC
363 CALL PRINT_ERROR( msgBuffer , myThid)
364 ENDIF
365
366 C xFace (y=constant) for XY arrays with real*8 declaration.
367 #ifdef TARGET_SGI
368 CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_DOUBLE_PRECISION,
369 & mpiTypeXFaceBlock_xy_r8, mpiRC)
370 #else
371 CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_REAL8,
372 & mpiTypeXFaceBlock_xy_r8, mpiRC)
373 #endif
374 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
375 eeBootError = .TRUE.
376 WRITE(msgBuffer,'(A,I5)')
377 & 'S/R INI_PROCS: MPI_TYPE_VECTOR (mpiTypeXFaceBlock_xy_r8)',
378 & mpiRC
379 CALL PRINT_ERROR( msgBuffer , myThid)
380 ENDIF
381 CALL MPI_TYPE_COMMIT( mpiTypeXFaceBlock_xy_r8, mpiRC)
382 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
383 eeBootError = .TRUE.
384 WRITE(msgBuffer,'(A,I5)')
385 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeXFaceBlock_xy_r8)',
386 & mpiRC
387 CALL PRINT_ERROR( msgBuffer , myThid)
388 ENDIF
389
390 C xFace (y=constant) for XYZ arrays with real*4 declaration.
391 arrElSize = 4
392 arrElSep = (sNx+OLx*2)*(sNy+OLy*2)
393 elCount = Nr
394 elLen = 1
395 elStride = arrElSize*arrElSep
396 CALL MPI_TYPE_HVECTOR(elCount,elLen,elStride,
397 & mpiTypeXFaceBlock_xy_r4,
398 & mpiTypeXFaceBlock_xyz_r4, mpiRC)
399 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
400 eeBootError = .TRUE.
401 WRITE(msgBuffer,'(A,I5)')
402 & 'S/R INI_PROCS: MPI_TYPE_HVECTOR (mpiTypeXFaceBlock_xyz_r4)',
403 & mpiRC
404 CALL PRINT_ERROR( msgBuffer , myThid)
405 ENDIF
406 CALL MPI_TYPE_COMMIT( mpiTypeXFaceBlock_xyz_r4, mpiRC)
407 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
408 eeBootError = .TRUE.
409 WRITE(msgBuffer,'(A,I5)')
410 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeXFaceBlock_xyz_r4)',
411 & mpiRC
412 CALL PRINT_ERROR( msgBuffer , myThid)
413 ENDIF
414
415 C xFace (y=constant) for XYZ arrays with real*8 declaration.
416 arrElSize = 8
417 elStride = arrElSize*arrElSep
418 CALL MPI_TYPE_HVECTOR(elCount,elLen,elStride,
419 & mpiTypeXFaceBlock_xy_r8,
420 & mpiTypeXFaceBlock_xyz_r8, mpiRC)
421 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
422 eeBootError = .TRUE.
423 WRITE(msgBuffer,'(A,I5)')
424 & 'S/R INI_PROCS: MPI_TYPE_HVECTOR (mpiTypeXFaceBlock_xyz_r8)',
425 & mpiRC
426 CALL PRINT_ERROR( msgBuffer , myThid)
427 ENDIF
428 CALL MPI_TYPE_COMMIT( mpiTypeXFaceBlock_xyz_r8, mpiRC)
429 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
430 eeBootError = .TRUE.
431 WRITE(msgBuffer,'(A,I5)')
432 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeXFaceBlock_xyz_r8)',
433 & mpiRC
434 CALL PRINT_ERROR( msgBuffer , myThid)
435 ENDIF
436 C--
437 C-- yFace datatypes (north<-->south messages)
438 C--
439 C yFace (x=constant) for XY arrays with real*4 declaration
440 elCount = OLy*(sNx+OLx*2)
441 #ifdef TARGET_SGI
442 CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_REAL,
443 & mpiTypeYFaceBlock_xy_r4, mpiRC)
444 #else
445 CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_REAL4,
446 & mpiTypeYFaceBlock_xy_r4, mpiRC)
447 #endif
448 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
449 eeBootError = .TRUE.
450 WRITE(msgBuffer,'(A,I5)')
451 & 'S/R INI_PROCS: MPI_TYPE_CONTIGUOUS (mpiTypeYFaceBlock_xy_r4)',
452 & mpiRC
453 CALL PRINT_ERROR( msgBuffer , myThid)
454 ENDIF
455 CALL MPI_TYPE_COMMIT( mpiTypeYFaceBlock_xy_r4, mpiRC)
456 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
457 eeBootError = .TRUE.
458 WRITE(msgBuffer,'(A,I5)')
459 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeYFaceBlock_xy_r4)',
460 & mpiRC
461 CALL PRINT_ERROR( msgBuffer , myThid)
462 ENDIF
463 C yFace (x=constant) for XY arrays with real*8 declaration
464 #ifdef TARGET_SGI
465 CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_DOUBLE_PRECISION,
466 & mpiTypeYFaceBlock_xy_r8, mpiRC)
467 #else
468 CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_REAL8,
469 & mpiTypeYFaceBlock_xy_r8, mpiRC)
470 #endif
471 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
472 eeBootError = .TRUE.
473 WRITE(msgBuffer,'(A,I5)')
474 & 'S/R INI_PROCS: MPI_TYPE_CONTIGUOUS (mpiTypeYFaceBlock_xy_r8)',
475 & mpiRC
476 CALL PRINT_ERROR( msgBuffer , myThid)
477 ENDIF
478 CALL MPI_TYPE_COMMIT( mpiTypeYFaceBlock_xy_r8, mpiRC)
479 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
480 eeBootError = .TRUE.
481 WRITE(msgBuffer,'(A,I5)')
482 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeYFaceBlock_xy_r8)',
483 & mpiRC
484 CALL PRINT_ERROR( msgBuffer , myThid)
485 ENDIF
486 C yFace (x=constant) for XYZ arrays with real*4 declaration
487 arrElSize = 4
488 arrElSep = (sNx+OLx*2)*(sNy+OLy*2)
489 elCount = Nr
490 elLen = 1
491 elStride = arrElSize*arrElSep
492 CALL MPI_TYPE_HVECTOR(elCount,elLen,elStride,
493 & mpiTypeYFaceBlock_xy_r4,
494 & mpiTypeYFaceBlock_xyz_r4, mpiRC)
495 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
496 eeBootError = .TRUE.
497 WRITE(msgBuffer,'(A,I5)')
498 & 'S/R INI_PROCS: MPI_TYPE_HVECTOR (mpiTypeYFaceBlock_xyz_r4)',
499 & mpiRC
500 CALL PRINT_ERROR( msgBuffer , myThid)
501 ENDIF
502 CALL MPI_TYPE_COMMIT( mpiTypeYFaceBlock_xyz_r4, mpiRC)
503 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
504 eeBootError = .TRUE.
505 WRITE(msgBuffer,'(A,I5)')
506 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeYFaceBlock_xyz_r4)',
507 & mpiRC
508 CALL PRINT_ERROR( msgBuffer , myThid)
509 ENDIF
510 C yFace (x=constant) for XYZ arrays with real*8 declaration
511 arrElSize = 8
512 elStride = arrElSize*arrElSep
513 CALL MPI_TYPE_HVECTOR(elCount,elLen,elStride,
514 & mpiTypeYFaceBlock_xy_r8,
515 & mpiTypeYFaceBlock_xyz_r8, mpiRC)
516 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
517 eeBootError = .TRUE.
518 WRITE(msgBuffer,'(A,I5)')
519 & 'S/R INI_PROCS: MPI_TYPE_HVECTOR (mpiTypeYFaceBlock_xyz_r8)',
520 & mpiRC
521 CALL PRINT_ERROR( msgBuffer , myThid)
522 ENDIF
523 CALL MPI_TYPE_COMMIT( mpiTypeYFaceBlock_xyz_r8, mpiRC)
524 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
525 eeBootError = .TRUE.
526 WRITE(msgBuffer,'(A,I5)')
527 & 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeYFaceBlock_xyz_r8)',
528 & mpiRC
529 CALL PRINT_ERROR( msgBuffer , myThid)
530 ENDIF
531
532 C-- Assign MPI values used in generating unique tags for messages.
533 mpiTagW = 1
534 mpiTagE = 2
535 mpiTagS = 3
536 mpiTagN = 4
537
538 C
539 CALL MPI_Barrier(MPI_COMM_MODEL,mpiRC)
540
541
542 C
543 #ifndef ALWAYS_USE_MPI
544 ENDIF
545 #endif
546 #endif /* ALLOW_USE_MPI */
547
548 999 CONTINUE
549
550 RETURN
551 END
552
553 C $Id: ini_procs.F,v 1.15 2003/02/18 05:33:53 dimitri Exp $

  ViewVC Help
Powered by ViewVC 1.1.22