1 |
C $Header: $ |
2 |
C $Name: $ |
3 |
#include "CPP_EEOPTIONS.h" |
4 |
CStartOfInterface |
5 |
SUBROUTINE INI_PROCS |
6 |
C /==========================================================\ |
7 |
C | SUBROUTINE INI_PROCS | |
8 |
C | o Initialise multiple concurrent processes environment. | |
9 |
C |==========================================================| |
10 |
C | Under MPI this routine calls various MPI service routines| |
11 |
C | that map the model grid to MPI processes. The information| |
12 |
C | is then stored in a common block for later use. | |
13 |
C | Note: This routine can also be compiled with CPP | |
14 |
C | directives set so that no multi-processing is initialise.| |
15 |
C | This is OK and should work fine. | |
16 |
C \==========================================================/ |
17 |
IMPLICIT NONE |
18 |
|
19 |
C === Global data === |
20 |
#include "SIZE.h" |
21 |
#include "EEPARAMS.h" |
22 |
#include "EESUPPORT.h" |
23 |
CEndOfInterface |
24 |
|
25 |
C === Local variables === |
26 |
#ifdef ALLOW_USE_MPI |
27 |
C msgBuffer - IO buffer |
28 |
C myThid - Dummy thread id |
29 |
C mpiRC - Error code reporting variable used |
30 |
C with MPI. |
31 |
C mpiGridSpec - No. of processes in X and Y. |
32 |
C mpiPeriodicity - Flag indicating XY priodicity to MPI. |
33 |
C arrElSize - Size of an array element in bytes used |
34 |
C to define MPI datatypes for communication |
35 |
C operations. |
36 |
C arrElSep - Separation in units of array elements between |
37 |
C blocks to be communicated. |
38 |
C elCount - No. of blocks that are associated with MPI |
39 |
C datatype. |
40 |
C elLen - Length of an MPI datatype in terms of preexisting |
41 |
C datatype. |
42 |
C elStride - Distance between starting location of elements |
43 |
C in an MPI datatype - can be bytes of datatype |
44 |
C units. |
45 |
CHARACTER*(MAX_LEN_MBUF) msgBuffer |
46 |
INTEGER mpiRC |
47 |
INTEGER mpiGridSpec(2) |
48 |
INTEGER mpiPeriodicity(2) |
49 |
INTEGER mpiLProcNam |
50 |
CHARACTER*(MPI_MAX_PROCESSOR_NAME) mpiProcNam |
51 |
INTEGER arrElSize |
52 |
INTEGER arrElSep |
53 |
INTEGER elCount |
54 |
INTEGER elLen |
55 |
INTEGER elStride |
56 |
#endif /* ALLOW_USE_MPI */ |
57 |
INTEGER myThid |
58 |
|
59 |
C-- Default values set to single processor case |
60 |
C pid[W-SE] are the MPI process id of the neighbor |
61 |
C processes. A process can be its own neighbor! |
62 |
myThid = 1 |
63 |
myPid = 1 |
64 |
nProcs = 1 |
65 |
myPx = 1 |
66 |
myPy = 1 |
67 |
myXGlobalLo = 1 |
68 |
myYGlobalLo = 1 |
69 |
pidW = 1 |
70 |
pidE = 1 |
71 |
pidN = 1 |
72 |
pidS = 1 |
73 |
errorMessageUnit = 0 |
74 |
standardMessageUnit = 6 |
75 |
|
76 |
#ifdef ALLOW_USE_MPI |
77 |
C-- |
78 |
C-- MPI style full multiple-process initialisation |
79 |
C-- ============================================== |
80 |
#ifndef ALWAYS_USE_MPI |
81 |
IF ( usingMPI ) THEN |
82 |
#endif |
83 |
|
84 |
C-- Arrange MPI processes on a cartesian grid |
85 |
C Set variable indicating which MPI process is to the north, |
86 |
C south, east, west, south-west, south-east, north-west |
87 |
C and north-east of me e.g. |
88 |
C |
89 |
C Plan view of model domain centered on process ME |
90 |
C ================================================ |
91 |
C |
92 |
C : : : : |
93 |
C : : : : |
94 |
C : : : : |
95 |
C .....------------------------------..... |
96 |
C | | | | |
97 |
C | NW | N | NE | |
98 |
C | | | | |
99 |
C .....------------------------------..... |
100 |
C | | | | |
101 |
C | W | ME | E | |
102 |
C | | | | |
103 |
C .....------------------------------..... |
104 |
C | | | | |
105 |
C | SW | S | SE | |
106 |
C | | | | |
107 |
C .....------------------------------..... |
108 |
C : : : : |
109 |
C Y : : : : |
110 |
C / \ : : : : |
111 |
C | |
112 |
C | |
113 |
C |----> X |
114 |
C |
115 |
C-- Set default MPI communicator to XY processor grid |
116 |
myThid = 1 |
117 |
mpiGridSpec(1) = nPx |
118 |
mpiGridSpec(2) = nPy |
119 |
C Could be periodic in X and/or Y - set at run time or compile time! |
120 |
mpiPeriodicity(1) = _mpiTRUE_ |
121 |
mpiPeriodicity(2) = _mpiTRUE_ |
122 |
#ifdef CAN_PREVENT_X_PERIODICITY |
123 |
#ifndef ALWAYS_PREVENT_X_PERIODICITY |
124 |
IF ( notUsingXPeriodicity ) THEN |
125 |
#endif |
126 |
mpiPeriodicity(1) = _mpiFALSE_ |
127 |
#ifndef ALWAYS_PREVENT_X_PERIODICITY |
128 |
ENDIF |
129 |
#endif |
130 |
#endif /* CAN_PREVENT_X_PERIODICITY */ |
131 |
#ifdef CAN_PREVENT_Y_PERIODICITY |
132 |
#ifndef ALWAYS_PREVENT_Y_PERIODICITY |
133 |
IF ( notUsingYPeriodicity ) THEN |
134 |
#endif |
135 |
mpiPeriodicity(2) = _mpiFALSE_ |
136 |
#ifndef ALWAYS_PREVENT_Y_PERIODICITY |
137 |
ENDIF |
138 |
#endif |
139 |
#endif /* CAN_PREVENT_Y_PERIODICITY */ |
140 |
|
141 |
CALL MPI_CART_CREATE( |
142 |
I MPI_COMM_MODEL,2,mpiGridSpec,mpiPeriodicity,_mpiTRUE_, |
143 |
O mpiComm, mpiRC ) |
144 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
145 |
eeBootError = .TRUE. |
146 |
WRITE(msgBuffer,'(A,I5)') |
147 |
& 'S/R INI_PROCS: MPI_CART_CREATE return code', |
148 |
& mpiRC |
149 |
CALL PRINT_ERROR( msgBuffer , myThid) |
150 |
GOTO 999 |
151 |
ENDIF |
152 |
|
153 |
C-- Get my location on the grid |
154 |
CALL MPI_CART_COORDS( mpiComm, mpiMyId, 2, mpiGridSpec, mpiRC ) |
155 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
156 |
eeBootError = .TRUE. |
157 |
WRITE(msgBuffer,'(A,I5)') |
158 |
& 'S/R INI_PROCS: MPI_CART_COORDS return code', |
159 |
& mpiRC |
160 |
CALL PRINT_ERROR( msgBuffer , myThid) |
161 |
GOTO 999 |
162 |
ENDIF |
163 |
myPid = mpiMyId |
164 |
mpiPx = mpiGridSpec(1) |
165 |
mpiPy = mpiGridSpec(2) |
166 |
mpiXGlobalLo = 1 + sNx*nSx*(mpiPx) |
167 |
mpiYGlobalLo = 1 + sNy*nSy*(mpiPy) |
168 |
myXGlobalLo = mpiXGlobalLo |
169 |
myYGlobalLo = mpiYGlobalLo |
170 |
myPx = mpiPx+1 |
171 |
myPy = mpiPy+1 |
172 |
C-- Get MPI id for neighboring procs. |
173 |
mpiGridSpec(1) = mpiPx-1 |
174 |
IF ( mpiPeriodicity(1) .EQ. _mpiTRUE_ |
175 |
& .AND. mpiGridSpec(1) .LT. 0 ) |
176 |
& mpiGridSpec(1) = nPx-1 |
177 |
mpiGridSpec(2) = mpiPy |
178 |
CALL MPI_CART_RANK( mpiComm, mpiGridSpec, mpiPidW , mpiRC ) |
179 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
180 |
eeBootError = .TRUE. |
181 |
WRITE(msgBuffer,'(A,I5)') |
182 |
& 'S/R INI_PROCS: MPI_CART_RANK (pidW) return code', |
183 |
& mpiRC |
184 |
CALL PRINT_ERROR( msgBuffer , myThid) |
185 |
GOTO 999 |
186 |
ENDIF |
187 |
pidW = mpiPidW |
188 |
mpiGridSpec(1) = mpiPx+1 |
189 |
IF ( mpiPeriodicity(1) .EQ. _mpiTRUE_ |
190 |
& .AND. mpiGridSpec(1) .GT. nPx-1 ) |
191 |
& mpiGridSpec(1) = 0 |
192 |
mpiGridSpec(2) = mpiPy |
193 |
CALL MPI_CART_RANK( mpiComm, mpiGridSpec, mpiPidE , mpiRC ) |
194 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
195 |
eeBootError = .TRUE. |
196 |
WRITE(msgBuffer,'(A,I5)') |
197 |
& 'S/R INI_PROCS: MPI_CART_RANK (pidE) return code', |
198 |
& mpiRC |
199 |
CALL PRINT_ERROR( msgBuffer , myThid) |
200 |
GOTO 999 |
201 |
ENDIF |
202 |
pidE = mpiPidE |
203 |
mpiGridSpec(1) = mpiPx |
204 |
mpiGridSpec(2) = mpiPy-1 |
205 |
IF ( mpiPeriodicity(2) .EQ. _mpiTRUE_ |
206 |
& .AND. mpiGridSpec(2) .LT. 0 ) |
207 |
& mpiGridSpec(2) = nPy - 1 |
208 |
CALL MPI_CART_RANK( mpiComm, mpiGridSpec, mpiPidS , mpiRC ) |
209 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
210 |
eeBootError = .TRUE. |
211 |
WRITE(msgBuffer,'(A,I5)') |
212 |
& 'S/R INI_PROCS: MPI_CART_RANK (pidS) return code', |
213 |
& mpiRC |
214 |
CALL PRINT_ERROR( msgBuffer , myThid) |
215 |
GOTO 999 |
216 |
ENDIF |
217 |
pidS = mpiPidS |
218 |
mpiGridSpec(1) = mpiPx |
219 |
mpiGridSpec(2) = mpiPy+1 |
220 |
IF ( mpiPeriodicity(2) .EQ. _mpiTRUE_ |
221 |
& .AND. mpiGridSpec(2) .GT. nPy-1 ) |
222 |
& mpiGridSpec(2) = 0 |
223 |
CALL MPI_CART_RANK( mpiComm, mpiGridSpec, mpiPidN , mpiRC ) |
224 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
225 |
eeBootError = .TRUE. |
226 |
WRITE(msgBuffer,'(A,I5)') |
227 |
& 'S/R INI_PROCS: MPI_CART_RANK (pidN) return code', |
228 |
& mpiRC |
229 |
CALL PRINT_ERROR( msgBuffer , myThid) |
230 |
GOTO 999 |
231 |
ENDIF |
232 |
pidN = mpiPidN |
233 |
|
234 |
C-- Print summary of processor mapping on standard output |
235 |
CALL MPI_GET_PROCESSOR_NAME( mpiProcNam, mpilProcNam, mpiRC ) |
236 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
237 |
eeBootError = .TRUE. |
238 |
WRITE(msgBuffer,'(A,I5)') |
239 |
& 'S/R INI_PROCS: MPI_GET_PROCESSOR_NAME return code', |
240 |
& mpiRC |
241 |
CALL PRINT_ERROR( msgBuffer , myThid) |
242 |
GOTO 999 |
243 |
ENDIF |
244 |
WRITE(msgBuffer,'(A)') |
245 |
& '======= Starting MPI parallel Run =========' |
246 |
CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit, |
247 |
& SQUEEZE_BOTH , myThid) |
248 |
WRITE(msgBuffer,'(A,A64)') ' My Processor Name = ', |
249 |
& mpiProcNam(1:mpilProcNam) |
250 |
CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit, |
251 |
& SQUEEZE_RIGHT , myThid) |
252 |
WRITE(msgBuffer,'(A,I3,A,I3,A,I3,A,I3,A)') ' Located at (', |
253 |
& mpiPx,',',mpiPy, |
254 |
& ') on processor grid (0:',nPx-1,',0:',nPy-1,')' |
255 |
CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit, |
256 |
& SQUEEZE_RIGHT , myThid) |
257 |
WRITE(msgBuffer,'(A,I4,A,I4,A,I4,A,I4,A)') ' Origin at (', |
258 |
& mpiXGlobalLo,',',mpiYGLobalLo, |
259 |
& ') on global grid (1:',nPx*sNx*nSx,',1:',nPy*sNy*nSy,')' |
260 |
CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit, |
261 |
& SQUEEZE_RIGHT , myThid) |
262 |
WRITE(msgBuffer,'(A,I4.4)') |
263 |
& ' North neighbor = processor ', mpiPidN |
264 |
CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit, |
265 |
& SQUEEZE_RIGHT , myThid) |
266 |
WRITE(msgBuffer,'(A,I4.4)') |
267 |
& ' South neighbor = processor ', mpiPidS |
268 |
CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit, |
269 |
& SQUEEZE_RIGHT , myThid) |
270 |
WRITE(msgBuffer,'(A,I4.4)') |
271 |
& ' East neighbor = processor ', mpiPidE |
272 |
CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit, |
273 |
& SQUEEZE_RIGHT , myThid) |
274 |
WRITE(msgBuffer,'(A,I4.4)') |
275 |
& ' West neighbor = processor ', mpiPidW |
276 |
CALL PRINT_MESSAGE( msgBuffer, standardMessageUnit, |
277 |
& SQUEEZE_RIGHT , myThid) |
278 |
C |
279 |
C-- Create MPI types for transfer of array edges. |
280 |
C-- Four and eight byte primitive (one block only) datatypes. |
281 |
C-- These are common to all threads in the process. |
282 |
C Notes: |
283 |
C ====== |
284 |
C 1. The datatypes MPI_REAL4 and MPI_REAL8 are usually predefined. |
285 |
C If they are not defined code must be added to create them - |
286 |
C the MPI standard leaves optional whether they exist. |
287 |
C 2. Per thread datatypes that handle all the edges for a thread |
288 |
C are defined based on the type defined here. |
289 |
C-- |
290 |
C-- xFace datatypes (east<-->west messages) |
291 |
C-- |
292 |
C xFace (y=constant) for XY arrays with real*4 declaration. |
293 |
arrElSep = (sNx+OLx*2) |
294 |
elCount = sNy+OLy*2 |
295 |
elLen = OLx |
296 |
elStride = arrElSep |
297 |
CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_REAL4, |
298 |
& mpiTypeXFaceBlock_xy_r4, mpiRC) |
299 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
300 |
eeBootError = .TRUE. |
301 |
WRITE(msgBuffer,'(A,I5)') |
302 |
& 'S/R INI_PROCS: MPI_TYPE_VECTOR (mpiTypeXFaceBlock_xy_r4)', |
303 |
& mpiRC |
304 |
CALL PRINT_ERROR( msgBuffer , myThid) |
305 |
ENDIF |
306 |
CALL MPI_TYPE_COMMIT( mpiTypeXFaceBlock_xy_r4, mpiRC) |
307 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
308 |
eeBootError = .TRUE. |
309 |
WRITE(msgBuffer,'(A,I5)') |
310 |
& 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeXFaceBlock_xy_r4)', |
311 |
& mpiRC |
312 |
CALL PRINT_ERROR( msgBuffer , myThid) |
313 |
ENDIF |
314 |
|
315 |
C xFace (y=constant) for XY arrays with real*8 declaration. |
316 |
CALL MPI_TYPE_VECTOR(elCount,elLen,elStride,MPI_REAL8, |
317 |
& mpiTypeXFaceBlock_xy_r8, mpiRC) |
318 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
319 |
eeBootError = .TRUE. |
320 |
WRITE(msgBuffer,'(A,I5)') |
321 |
& 'S/R INI_PROCS: MPI_TYPE_VECTOR (mpiTypeXFaceBlock_xy_r8)', |
322 |
& mpiRC |
323 |
CALL PRINT_ERROR( msgBuffer , myThid) |
324 |
ENDIF |
325 |
CALL MPI_TYPE_COMMIT( mpiTypeXFaceBlock_xy_r8, mpiRC) |
326 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
327 |
eeBootError = .TRUE. |
328 |
WRITE(msgBuffer,'(A,I5)') |
329 |
& 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeXFaceBlock_xy_r8)', |
330 |
& mpiRC |
331 |
CALL PRINT_ERROR( msgBuffer , myThid) |
332 |
ENDIF |
333 |
|
334 |
C xFace (y=constant) for XYZ arrays with real*4 declaration. |
335 |
arrElSize = 4 |
336 |
arrElSep = (sNx+OLx*2)*(sNy+OLy*2) |
337 |
elCount = Nr |
338 |
elLen = 1 |
339 |
elStride = arrElSize*arrElSep |
340 |
CALL MPI_TYPE_HVECTOR(elCount,elLen,elStride, |
341 |
& mpiTypeXFaceBlock_xy_r4, |
342 |
& mpiTypeXFaceBlock_xyz_r4, mpiRC) |
343 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
344 |
eeBootError = .TRUE. |
345 |
WRITE(msgBuffer,'(A,I5)') |
346 |
& 'S/R INI_PROCS: MPI_TYPE_HVECTOR (mpiTypeXFaceBlock_xyz_r4)', |
347 |
& mpiRC |
348 |
CALL PRINT_ERROR( msgBuffer , myThid) |
349 |
ENDIF |
350 |
CALL MPI_TYPE_COMMIT( mpiTypeXFaceBlock_xyz_r4, mpiRC) |
351 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
352 |
eeBootError = .TRUE. |
353 |
WRITE(msgBuffer,'(A,I5)') |
354 |
& 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeXFaceBlock_xyz_r4)', |
355 |
& mpiRC |
356 |
CALL PRINT_ERROR( msgBuffer , myThid) |
357 |
ENDIF |
358 |
|
359 |
C xFace (y=constant) for XYZ arrays with real*8 declaration. |
360 |
arrElSize = 8 |
361 |
elStride = arrElSize*arrElSep |
362 |
CALL MPI_TYPE_HVECTOR(elCount,elLen,elStride, |
363 |
& mpiTypeXFaceBlock_xy_r8, |
364 |
& mpiTypeXFaceBlock_xyz_r8, mpiRC) |
365 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
366 |
eeBootError = .TRUE. |
367 |
WRITE(msgBuffer,'(A,I5)') |
368 |
& 'S/R INI_PROCS: MPI_TYPE_HVECTOR (mpiTypeXFaceBlock_xyz_r8)', |
369 |
& mpiRC |
370 |
CALL PRINT_ERROR( msgBuffer , myThid) |
371 |
ENDIF |
372 |
CALL MPI_TYPE_COMMIT( mpiTypeXFaceBlock_xyz_r8, mpiRC) |
373 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
374 |
eeBootError = .TRUE. |
375 |
WRITE(msgBuffer,'(A,I5)') |
376 |
& 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeXFaceBlock_xyz_r8)', |
377 |
& mpiRC |
378 |
CALL PRINT_ERROR( msgBuffer , myThid) |
379 |
ENDIF |
380 |
C-- |
381 |
C-- yFace datatypes (north<-->south messages) |
382 |
C-- |
383 |
C yFace (x=constant) for XY arrays with real*4 declaration |
384 |
elCount = OLy*(sNx+OLx*2) |
385 |
CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_REAL4, |
386 |
& mpiTypeYFaceBlock_xy_r4, mpiRC) |
387 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
388 |
eeBootError = .TRUE. |
389 |
WRITE(msgBuffer,'(A,I5)') |
390 |
& 'S/R INI_PROCS: MPI_TYPE_CONTIGUOUS (mpiTypeYFaceBlock_xy_r4)', |
391 |
& mpiRC |
392 |
CALL PRINT_ERROR( msgBuffer , myThid) |
393 |
ENDIF |
394 |
CALL MPI_TYPE_COMMIT( mpiTypeYFaceBlock_xy_r4, mpiRC) |
395 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
396 |
eeBootError = .TRUE. |
397 |
WRITE(msgBuffer,'(A,I5)') |
398 |
& 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeYFaceBlock_xy_r4)', |
399 |
& mpiRC |
400 |
CALL PRINT_ERROR( msgBuffer , myThid) |
401 |
ENDIF |
402 |
C yFace (x=constant) for XY arrays with real*8 declaration |
403 |
CALL MPI_TYPE_CONTIGUOUS(elCount,MPI_REAL8, |
404 |
& mpiTypeYFaceBlock_xy_r8, mpiRC) |
405 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
406 |
eeBootError = .TRUE. |
407 |
WRITE(msgBuffer,'(A,I5)') |
408 |
& 'S/R INI_PROCS: MPI_TYPE_CONTIGUOUS (mpiTypeYFaceBlock_xy_r8)', |
409 |
& mpiRC |
410 |
CALL PRINT_ERROR( msgBuffer , myThid) |
411 |
ENDIF |
412 |
CALL MPI_TYPE_COMMIT( mpiTypeYFaceBlock_xy_r8, mpiRC) |
413 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
414 |
eeBootError = .TRUE. |
415 |
WRITE(msgBuffer,'(A,I5)') |
416 |
& 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeYFaceBlock_xy_r8)', |
417 |
& mpiRC |
418 |
CALL PRINT_ERROR( msgBuffer , myThid) |
419 |
ENDIF |
420 |
C yFace (x=constant) for XYZ arrays with real*4 declaration |
421 |
arrElSize = 4 |
422 |
arrElSep = (sNx+OLx*2)*(sNy+OLy*2) |
423 |
elCount = Nr |
424 |
elLen = 1 |
425 |
elStride = arrElSize*arrElSep |
426 |
CALL MPI_TYPE_HVECTOR(elCount,elLen,elStride, |
427 |
& mpiTypeYFaceBlock_xy_r4, |
428 |
& mpiTypeYFaceBlock_xyz_r4, mpiRC) |
429 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
430 |
eeBootError = .TRUE. |
431 |
WRITE(msgBuffer,'(A,I5)') |
432 |
& 'S/R INI_PROCS: MPI_TYPE_HVECTOR (mpiTypeYFaceBlock_xyz_r4)', |
433 |
& mpiRC |
434 |
CALL PRINT_ERROR( msgBuffer , myThid) |
435 |
ENDIF |
436 |
CALL MPI_TYPE_COMMIT( mpiTypeYFaceBlock_xyz_r4, mpiRC) |
437 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
438 |
eeBootError = .TRUE. |
439 |
WRITE(msgBuffer,'(A,I5)') |
440 |
& 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeYFaceBlock_xyz_r4)', |
441 |
& mpiRC |
442 |
CALL PRINT_ERROR( msgBuffer , myThid) |
443 |
ENDIF |
444 |
C yFace (x=constant) for XYZ arrays with real*8 declaration |
445 |
arrElSize = 8 |
446 |
elStride = arrElSize*arrElSep |
447 |
CALL MPI_TYPE_HVECTOR(elCount,elLen,elStride, |
448 |
& mpiTypeYFaceBlock_xy_r8, |
449 |
& mpiTypeYFaceBlock_xyz_r8, mpiRC) |
450 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
451 |
eeBootError = .TRUE. |
452 |
WRITE(msgBuffer,'(A,I5)') |
453 |
& 'S/R INI_PROCS: MPI_TYPE_HVECTOR (mpiTypeYFaceBlock_xyz_r8)', |
454 |
& mpiRC |
455 |
CALL PRINT_ERROR( msgBuffer , myThid) |
456 |
ENDIF |
457 |
CALL MPI_TYPE_COMMIT( mpiTypeYFaceBlock_xyz_r8, mpiRC) |
458 |
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
459 |
eeBootError = .TRUE. |
460 |
WRITE(msgBuffer,'(A,I5)') |
461 |
& 'S/R INI_PROCS: MPI_TYPE_COMMIT (mpiTypeYFaceBlock_xyz_r8)', |
462 |
& mpiRC |
463 |
CALL PRINT_ERROR( msgBuffer , myThid) |
464 |
ENDIF |
465 |
|
466 |
C-- Assign MPI values used in generating unique tags for messages. |
467 |
mpiTagW = 1 |
468 |
mpiTagE = 2 |
469 |
mpiTagS = 3 |
470 |
mpiTagN = 4 |
471 |
|
472 |
C |
473 |
CALL MPI_Barrier(MPI_COMM_MODEL,mpiRC) |
474 |
|
475 |
|
476 |
C |
477 |
#ifndef ALWAYS_USE_MPI |
478 |
ENDIF |
479 |
#endif |
480 |
#endif /* ALLOW_USE_MPI */ |
481 |
|
482 |
999 CONTINUE |
483 |
|
484 |
RETURN |
485 |
END |
486 |
|
487 |
C $Id: ini_procs.F,v 1.11 1999/05/24 15:19:53 adcroft Exp $ |