1 |
jmc |
1.9 |
C $Header: /u/gcmpack/MITgcm/pkg/flt/exch_recv_get_vec.F,v 1.8 2011/08/31 21:35:11 jmc Exp $ |
2 |
jmc |
1.2 |
C $Name: $ |
3 |
|
|
|
4 |
jmc |
1.5 |
#include "CPP_EEOPTIONS.h" |
5 |
jmc |
1.8 |
#undef EXCH_USE_SPINNING |
6 |
jmc |
1.5 |
#undef DBUG_EXCH_VEC |
7 |
jmc |
1.3 |
|
8 |
|
|
C-- Contents |
9 |
|
|
C-- o EXCH_RL_RECV_GET_VEC_X |
10 |
|
|
C-- o EXCH_RL_RECV_GET_VEC_Y |
11 |
|
|
|
12 |
|
|
C---+----1----+----2----+----3----+----4----+----5----+----6----+----7-|--+----| |
13 |
jmc |
1.5 |
CBOP 0 |
14 |
|
|
C !ROUTINE: EXCH_RL_RECV_GET_VEC_X |
15 |
adcroft |
1.1 |
|
16 |
jmc |
1.5 |
C !INTERFACE: |
17 |
jmc |
1.4 |
SUBROUTINE EXCH_RL_RECV_GET_VEC_X( |
18 |
|
|
U arrayE, arrayW, |
19 |
|
|
I myd1, myThid ) |
20 |
jmc |
1.5 |
C !DESCRIPTION: |
21 |
|
|
C *==========================================================* |
22 |
|
|
C | SUBROUTINE EXCH_RL_RECV_GET_VEC_X |
23 |
|
|
C | o "Receive" or "Get" X edges for RL array. |
24 |
|
|
C *==========================================================* |
25 |
|
|
C | Routine that invokes actual message passing receive |
26 |
|
|
C | of data to update buffer in X direction |
27 |
|
|
C *==========================================================* |
28 |
|
|
|
29 |
|
|
C !USES: |
30 |
adcroft |
1.1 |
IMPLICIT NONE |
31 |
|
|
|
32 |
|
|
C == Global variables == |
33 |
|
|
#include "SIZE.h" |
34 |
|
|
#include "EEPARAMS.h" |
35 |
|
|
#include "EESUPPORT.h" |
36 |
|
|
#include "EXCH.h" |
37 |
|
|
|
38 |
jmc |
1.5 |
C !INPUT/OUTPUT PARAMETERS: |
39 |
|
|
C arrayE :: buffer array to collect Eastern Neighbour values |
40 |
|
|
C arrayW :: buffer array to collect Western Neighbour values |
41 |
|
|
C myd1 :: size |
42 |
|
|
C myThid :: my Thread Id. number |
43 |
adcroft |
1.1 |
INTEGER myd1 |
44 |
|
|
_RL arrayE(myd1, nSx, nSy), arrayW(myd1, nSx, nSy) |
45 |
|
|
INTEGER myThid |
46 |
jmc |
1.5 |
CEOP |
47 |
adcroft |
1.1 |
|
48 |
jmc |
1.5 |
C !LOCAL VARIABLES: |
49 |
|
|
C bi, bj :: tile indices |
50 |
|
|
C biW, bjW :: West tile indices |
51 |
|
|
C biE, bjE :: East tile indices |
52 |
cnh |
1.6 |
C theProc :: Variables used in message building |
53 |
|
|
C theTag :: Variables used in message building |
54 |
|
|
C theType :: Variables used in message building |
55 |
|
|
C theSize :: Variables used in message building |
56 |
jmc |
1.5 |
C westCommMode :: variables holding type of communication |
57 |
|
|
C eastCommMode :: a particular tile face uses. |
58 |
jmc |
1.4 |
INTEGER bi, bj |
59 |
|
|
c INTEGER biW, bjW, biE, bjE |
60 |
adcroft |
1.1 |
INTEGER westCommMode |
61 |
|
|
INTEGER eastCommMode |
62 |
jmc |
1.9 |
INTEGER ioUnit |
63 |
|
|
#ifdef EXCH_USE_SPINNING |
64 |
adcroft |
1.1 |
INTEGER spinCount |
65 |
jmc |
1.9 |
#endif |
66 |
adcroft |
1.1 |
#ifdef ALLOW_USE_MPI |
67 |
|
|
INTEGER theProc, theTag, theType, theSize |
68 |
|
|
INTEGER mpiStatus(MPI_STATUS_SIZE,4), mpiRc |
69 |
|
|
#endif |
70 |
|
|
|
71 |
jmc |
1.2 |
C-- Under a "put" scenario we |
72 |
adcroft |
1.1 |
C-- i. set completetion signal for buffer we put into. |
73 |
jmc |
1.2 |
C-- ii. wait for completetion signal indicating data has been put in |
74 |
adcroft |
1.1 |
C-- our buffer. |
75 |
|
|
C-- Under a messaging mode we "receive" the message. |
76 |
jmc |
1.5 |
C-- Under a "get" scenario <= not implemented, we |
77 |
adcroft |
1.1 |
C-- i. Check that the data is ready. |
78 |
|
|
C-- ii. Read the data. |
79 |
|
|
C-- iii. Set data read flag + memory sync. |
80 |
|
|
|
81 |
jmc |
1.5 |
ioUnit = errorMessageUnit |
82 |
adcroft |
1.1 |
|
83 |
jmc |
1.8 |
_BEGIN_MASTER(myThid) |
84 |
|
|
|
85 |
|
|
DO bj=1,nSy |
86 |
|
|
DO bi=1,nSx |
87 |
adcroft |
1.1 |
westCommMode = _tileCommModeW(bi,bj) |
88 |
|
|
eastCommMode = _tileCommModeE(bi,bj) |
89 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
90 |
|
|
write(ioUnit,'(A,5I6)') 'RECV_X,0 :',myProcId,bi,bj |
91 |
|
|
#endif |
92 |
jmc |
1.4 |
c biE = _tileBiE(bi,bj) |
93 |
|
|
c bjE = _tileBjE(bi,bj) |
94 |
|
|
c biW = _tileBiW(bi,bj) |
95 |
|
|
c bjW = _tileBjW(bi,bj) |
96 |
adcroft |
1.1 |
IF ( westCommMode .EQ. COMM_MSG ) THEN |
97 |
|
|
#ifdef ALLOW_USE_MPI |
98 |
|
|
#ifndef ALWAYS_USE_MPI |
99 |
|
|
IF ( usingMPI ) THEN |
100 |
|
|
#endif |
101 |
|
|
theProc = tilePidW(bi,bj) |
102 |
|
|
theTag = _tileTagRecvW(bi,bj) |
103 |
jmc |
1.5 |
theType = _MPI_TYPE_RL |
104 |
adcroft |
1.1 |
theSize = myd1 |
105 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
106 |
|
|
write(ioUnit,'(A,5I5,I8)') 'qq2xW: ',myProcId,bi,bj, |
107 |
|
|
& theProc,theTag,theSize |
108 |
|
|
#endif |
109 |
adcroft |
1.1 |
CALL MPI_Recv( arrayW(1,bi,bj), theSize, theType, |
110 |
|
|
& theProc, theTag, MPI_COMM_MODEL, |
111 |
|
|
& mpiStatus, mpiRc ) |
112 |
|
|
c if (theProc .eq. 0 .or. theProc .eq. 2) then |
113 |
|
|
c if (arrayW(1,bi,bj) .ne. 0.) then |
114 |
jmc |
1.5 |
c write(errormessageunit,*) 'qq2y: ',myProcId, |
115 |
adcroft |
1.1 |
c & theProc,theTag,theSize,(arrayW(i,bi,bj),i=1,32) |
116 |
|
|
c else |
117 |
jmc |
1.5 |
c write(errormessageunit,*) 'qq2n: ',myProcId, |
118 |
adcroft |
1.1 |
c & theProc,theTag,theSize,(arrayW(i,bi,bj),i=1,32) |
119 |
|
|
c endif |
120 |
|
|
c endif |
121 |
|
|
#ifndef ALWAYS_USE_MPI |
122 |
jmc |
1.5 |
ENDIF |
123 |
adcroft |
1.1 |
#endif |
124 |
|
|
#endif /* ALLOW_USE_MPI */ |
125 |
|
|
ENDIF |
126 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
127 |
|
|
write(ioUnit,'(A,5I6)') 'RECV_X,1 :',myProcId,bi,bj |
128 |
|
|
#endif |
129 |
adcroft |
1.1 |
IF ( eastCommMode .EQ. COMM_MSG ) THEN |
130 |
|
|
#ifdef ALLOW_USE_MPI |
131 |
|
|
#ifndef ALWAYS_USE_MPI |
132 |
|
|
IF ( usingMPI ) THEN |
133 |
|
|
#endif |
134 |
|
|
theProc = tilePidE(bi,bj) |
135 |
|
|
theTag = _tileTagRecvE(bi,bj) |
136 |
jmc |
1.5 |
theType = _MPI_TYPE_RL |
137 |
adcroft |
1.1 |
theSize = myd1 |
138 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
139 |
|
|
write(ioUnit,'(A,5I5,I8)') 'qq2xE: ',myProcId,bi,bj, |
140 |
|
|
& theProc,theTag,theSize |
141 |
|
|
#endif |
142 |
adcroft |
1.1 |
CALL MPI_Recv( arrayE(1,bi,bj), theSize, theType, |
143 |
|
|
& theProc, theTag, MPI_COMM_MODEL, |
144 |
|
|
& mpiStatus, mpiRc ) |
145 |
|
|
#ifndef ALWAYS_USE_MPI |
146 |
jmc |
1.5 |
ENDIF |
147 |
adcroft |
1.1 |
#endif |
148 |
|
|
#endif /* ALLOW_USE_MPI */ |
149 |
|
|
ENDIF |
150 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
151 |
|
|
write(ioUnit,'(A,5I6)') 'RECV_X,2 :',myProcId,bi,bj |
152 |
|
|
#endif |
153 |
adcroft |
1.1 |
ENDDO |
154 |
|
|
ENDDO |
155 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
156 |
|
|
write(ioUnit,'(A,5I6,I12)') 'RECV_X:',myProcId |
157 |
|
|
#endif |
158 |
adcroft |
1.1 |
|
159 |
jmc |
1.8 |
IF ( .NOT.exchUsesBarrier ) THEN |
160 |
|
|
DO bj=1,nSy |
161 |
|
|
DO bi=1,nSx |
162 |
|
|
IF ( exchNReqsX(1,bi,bj) .GT. 0 ) THEN |
163 |
|
|
#ifdef ALLOW_USE_MPI |
164 |
|
|
#ifndef ALWAYS_USE_MPI |
165 |
|
|
IF ( usingMPI ) THEN |
166 |
|
|
#endif |
167 |
|
|
CALL MPI_Waitall( exchNReqsX(1,bi,bj), exchReqIdX(1,1,bi,bj), |
168 |
|
|
& mpiStatus, mpiRC ) |
169 |
|
|
#ifndef ALWAYS_USE_MPI |
170 |
|
|
ENDIF |
171 |
|
|
#endif |
172 |
|
|
#endif /* ALLOW_USE_MPI */ |
173 |
|
|
ENDIF |
174 |
|
|
C Clear outstanding requests counter |
175 |
|
|
exchNReqsX(1,bi,bj) = 0 |
176 |
|
|
ENDDO |
177 |
|
|
ENDDO |
178 |
|
|
ENDIF |
179 |
|
|
|
180 |
|
|
_END_MASTER(myThid) |
181 |
|
|
|
182 |
|
|
C-- need to sync threads after master has received data ; |
183 |
|
|
C (done after mpi waitall in case waitall is really needed) |
184 |
|
|
_BARRIER |
185 |
|
|
|
186 |
|
|
C-- Threads wait for buffers I am going to read to be ready. |
187 |
|
|
C note: added BARRIER in exch_send_put S/R and here above (message |
188 |
|
|
C mode) so that we no longer needs this (undef EXCH_USE_SPINNING) |
189 |
|
|
#ifdef EXCH_USE_SPINNING |
190 |
adcroft |
1.1 |
IF ( exchUsesBarrier ) THEN |
191 |
|
|
C o On some machines ( T90 ) use system barrier rather than spinning. |
192 |
|
|
CALL BARRIER( myThid ) |
193 |
|
|
ELSE |
194 |
|
|
C o Spin waiting for completetion flag. This avoids a global-lock |
195 |
|
|
C i.e. we only lock waiting for data that we need. |
196 |
|
|
DO bj=myByLo(myThid),myByHi(myThid) |
197 |
|
|
DO bi=myBxLo(myThid),myBxHi(myThid) |
198 |
|
|
spinCount = 0 |
199 |
|
|
westCommMode = _tileCommModeW(bi,bj) |
200 |
|
|
eastCommMode = _tileCommModeE(bi,bj) |
201 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
202 |
|
|
write(ioUnit,'(A,5I6,I12)') 'spin:', myProcId,bi,bj, |
203 |
|
|
& westRecvAck(1,bi,bj), eastRecvAck(1,bi,bj), spinCount |
204 |
|
|
#endif |
205 |
adcroft |
1.1 |
10 CONTINUE |
206 |
jmc |
1.7 |
CALL FOOL_THE_COMPILER( spinCount ) |
207 |
adcroft |
1.1 |
spinCount = spinCount+1 |
208 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
209 |
|
|
write(ioUnit,'(A,5I6,I12)') 'spin:', myProcId,bi,bj, |
210 |
|
|
& westRecvAck(1,bi,bj), eastRecvAck(1,bi,bj), spinCount |
211 |
|
|
IF ( myThid.EQ.1 .AND. spinCount.GT. _EXCH_SPIN_LIMIT ) THEN |
212 |
|
|
STOP ' S/R EXCH_RECV_GET_X: spinCount > _EXCH_SPIN_LIMIT' |
213 |
|
|
ENDIF |
214 |
|
|
#endif |
215 |
|
|
IF ( westRecvAck(1,bi,bj) .EQ. 0 ) GOTO 10 |
216 |
|
|
IF ( eastRecvAck(1,bi,bj) .EQ. 0 ) GOTO 10 |
217 |
adcroft |
1.1 |
C Clear outstanding requests |
218 |
jmc |
1.5 |
westRecvAck(1,bi,bj) = 0 |
219 |
|
|
eastRecvAck(1,bi,bj) = 0 |
220 |
adcroft |
1.1 |
ENDDO |
221 |
|
|
ENDDO |
222 |
|
|
ENDIF |
223 |
jmc |
1.8 |
#endif /* EXCH_USE_SPINNING */ |
224 |
adcroft |
1.1 |
|
225 |
|
|
RETURN |
226 |
|
|
END |
227 |
|
|
|
228 |
jmc |
1.3 |
C---+----1----+----2----+----3----+----4----+----5----+----6----+----7-|--+----| |
229 |
jmc |
1.5 |
CBOP 0 |
230 |
|
|
C !ROUTINE: EXCH_RL_RECV_GET_VEC_Y |
231 |
adcroft |
1.1 |
|
232 |
jmc |
1.5 |
C !INTERFACE: |
233 |
jmc |
1.4 |
SUBROUTINE EXCH_RL_RECV_GET_VEC_Y( |
234 |
|
|
U arrayN, arrayS, |
235 |
|
|
I myd1, myThid ) |
236 |
jmc |
1.5 |
C !DESCRIPTION: |
237 |
|
|
C *==========================================================* |
238 |
|
|
C | SUBROUTINE EXCH_RL_RECV_GET_VEC_Y |
239 |
|
|
C | o "Receive" or "Get" Y edges for RL array. |
240 |
|
|
C *==========================================================* |
241 |
|
|
C | Routine that invokes actual message passing receive |
242 |
|
|
C | of data to update buffer in Y direction |
243 |
|
|
C *==========================================================* |
244 |
|
|
|
245 |
|
|
C !USES: |
246 |
adcroft |
1.1 |
IMPLICIT NONE |
247 |
|
|
|
248 |
|
|
C == Global variables == |
249 |
|
|
#include "SIZE.h" |
250 |
|
|
#include "EEPARAMS.h" |
251 |
|
|
#include "EESUPPORT.h" |
252 |
|
|
#include "EXCH.h" |
253 |
|
|
|
254 |
jmc |
1.5 |
C !INPUT/OUTPUT PARAMETERS: |
255 |
|
|
C arrayN :: buffer array to collect Northern Neighbour values |
256 |
|
|
C arrayS :: buffer array to collect Southern Neighbour values |
257 |
|
|
C myd1 :: size |
258 |
|
|
C myThid :: my Thread Id. number |
259 |
adcroft |
1.1 |
INTEGER myd1 |
260 |
|
|
_RL arrayN(myd1, nSx, nSy), arrayS(myd1, nSx, nSy) |
261 |
|
|
INTEGER myThid |
262 |
jmc |
1.5 |
CEOP |
263 |
adcroft |
1.1 |
|
264 |
jmc |
1.5 |
C !LOCAL VARIABLES: |
265 |
|
|
C bi, bj :: tile indices |
266 |
|
|
C biS, bjS :: South tile indices |
267 |
|
|
C biN, bjN :: North tile indices |
268 |
cnh |
1.6 |
C theProc :: Variables used in message building |
269 |
|
|
C theTag :: Variables used in message building |
270 |
|
|
C theType :: Variables used in message building |
271 |
|
|
C theSize :: Variables used in message building |
272 |
jmc |
1.5 |
C southCommMode :: variables holding type of communication |
273 |
|
|
C northCommMode :: a particular tile face uses. |
274 |
jmc |
1.4 |
INTEGER bi, bj |
275 |
|
|
c INTEGER biS, bjS, biN, bjN |
276 |
adcroft |
1.1 |
INTEGER southCommMode |
277 |
|
|
INTEGER northCommMode |
278 |
jmc |
1.9 |
INTEGER ioUnit |
279 |
|
|
#ifdef EXCH_USE_SPINNING |
280 |
adcroft |
1.1 |
INTEGER spinCount |
281 |
jmc |
1.9 |
#endif |
282 |
adcroft |
1.1 |
#ifdef ALLOW_USE_MPI |
283 |
|
|
INTEGER theProc, theTag, theType, theSize |
284 |
|
|
INTEGER mpiStatus(MPI_STATUS_SIZE,4), mpiRc |
285 |
|
|
#endif |
286 |
|
|
|
287 |
jmc |
1.2 |
C-- Under a "put" scenario we |
288 |
adcroft |
1.1 |
C-- i. set completetion signal for buffer we put into. |
289 |
jmc |
1.2 |
C-- ii. wait for completetion signal indicating data has been put in |
290 |
adcroft |
1.1 |
C-- our buffer. |
291 |
|
|
C-- Under a messaging mode we "receive" the message. |
292 |
jmc |
1.5 |
C-- Under a "get" scenario <= not implemented, we |
293 |
adcroft |
1.1 |
C-- i. Check that the data is ready. |
294 |
|
|
C-- ii. Read the data. |
295 |
|
|
C-- iii. Set data read flag + memory sync. |
296 |
|
|
|
297 |
jmc |
1.5 |
ioUnit = errorMessageUnit |
298 |
adcroft |
1.1 |
|
299 |
jmc |
1.8 |
_BEGIN_MASTER(myThid) |
300 |
|
|
|
301 |
|
|
DO bj=1,nSy |
302 |
|
|
DO bi=1,nSx |
303 |
adcroft |
1.1 |
southCommMode = _tileCommModeS(bi,bj) |
304 |
|
|
northCommMode = _tileCommModeN(bi,bj) |
305 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
306 |
|
|
write(ioUnit,'(A,5I6)') 'RECV_Y,0 :',myProcId,bi,bj |
307 |
|
|
#endif |
308 |
jmc |
1.4 |
c biN = _tileBiN(bi,bj) |
309 |
|
|
c bjN = _tileBjN(bi,bj) |
310 |
|
|
c biS = _tileBiS(bi,bj) |
311 |
|
|
c bjS = _tileBjS(bi,bj) |
312 |
adcroft |
1.1 |
IF ( southCommMode .EQ. COMM_MSG ) THEN |
313 |
|
|
#ifdef ALLOW_USE_MPI |
314 |
|
|
#ifndef ALWAYS_USE_MPI |
315 |
|
|
IF ( usingMPI ) THEN |
316 |
|
|
#endif |
317 |
|
|
theProc = tilePidS(bi,bj) |
318 |
|
|
theTag = _tileTagRecvS(bi,bj) |
319 |
jmc |
1.5 |
theType = _MPI_TYPE_RL |
320 |
adcroft |
1.1 |
theSize = myd1 |
321 |
|
|
CALL MPI_Recv( arrayS(1,bi,bj), theSize, theType, |
322 |
|
|
& theProc, theTag, MPI_COMM_MODEL, |
323 |
|
|
& mpiStatus, mpiRc ) |
324 |
|
|
#ifndef ALWAYS_USE_MPI |
325 |
jmc |
1.5 |
ENDIF |
326 |
adcroft |
1.1 |
#endif |
327 |
|
|
#endif /* ALLOW_USE_MPI */ |
328 |
|
|
ENDIF |
329 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
330 |
|
|
write(ioUnit,'(A,5I6)') 'RECV_Y,1 :',myProcId,bi,bj |
331 |
|
|
#endif |
332 |
adcroft |
1.1 |
IF ( northCommMode .EQ. COMM_MSG ) THEN |
333 |
|
|
#ifdef ALLOW_USE_MPI |
334 |
|
|
#ifndef ALWAYS_USE_MPI |
335 |
|
|
IF ( usingMPI ) THEN |
336 |
|
|
#endif |
337 |
|
|
theProc = tilePidN(bi,bj) |
338 |
|
|
theTag = _tileTagRecvN(bi,bj) |
339 |
jmc |
1.5 |
theType = _MPI_TYPE_RL |
340 |
adcroft |
1.1 |
theSize = myd1 |
341 |
|
|
CALL MPI_Recv( arrayN(1,bi,bj), theSize, theType, |
342 |
|
|
& theProc, theTag, MPI_COMM_MODEL, |
343 |
|
|
& mpiStatus, mpiRc ) |
344 |
|
|
#ifndef ALWAYS_USE_MPI |
345 |
jmc |
1.5 |
ENDIF |
346 |
adcroft |
1.1 |
#endif |
347 |
|
|
#endif /* ALLOW_USE_MPI */ |
348 |
|
|
ENDIF |
349 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
350 |
|
|
write(ioUnit,'(A,5I6)') 'RECV_Y,2 :',myProcId,bi,bj |
351 |
|
|
#endif |
352 |
adcroft |
1.1 |
ENDDO |
353 |
|
|
ENDDO |
354 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
355 |
|
|
write(ioUnit,'(A,5I6,I12)') 'RECV_Y:',myProcId |
356 |
|
|
#endif |
357 |
adcroft |
1.1 |
|
358 |
jmc |
1.8 |
C-- Processes wait for buffers I am going to read to be ready. |
359 |
|
|
IF ( .NOT.exchUsesBarrier ) THEN |
360 |
|
|
DO bj=1,nSy |
361 |
|
|
DO bi=1,nSx |
362 |
|
|
IF ( exchNReqsY(1,bi,bj) .GT. 0 ) THEN |
363 |
|
|
#ifdef ALLOW_USE_MPI |
364 |
|
|
#ifndef ALWAYS_USE_MPI |
365 |
|
|
IF ( usingMPI ) THEN |
366 |
|
|
#endif |
367 |
|
|
CALL MPI_Waitall( exchNReqsY(1,bi,bj), exchReqIdY(1,1,bi,bj), |
368 |
|
|
& mpiStatus, mpiRC ) |
369 |
|
|
#ifndef ALWAYS_USE_MPI |
370 |
|
|
ENDIF |
371 |
|
|
#endif |
372 |
|
|
#endif /* ALLOW_USE_MPI */ |
373 |
|
|
ENDIF |
374 |
|
|
C Clear outstanding requests counter |
375 |
|
|
exchNReqsY(1,bi,bj) = 0 |
376 |
|
|
ENDDO |
377 |
|
|
ENDDO |
378 |
|
|
ENDIF |
379 |
|
|
|
380 |
|
|
_END_MASTER(myThid) |
381 |
|
|
|
382 |
|
|
C-- need to sync threads after master has received data ; |
383 |
|
|
C (done after mpi waitall in case waitall is really needed) |
384 |
|
|
_BARRIER |
385 |
|
|
|
386 |
|
|
C-- Threads wait for buffers I am going to read to be ready. |
387 |
|
|
C note: added BARRIER in exch_send_put S/R and here above (message |
388 |
|
|
C mode) so that we no longer needs this (undef EXCH_USE_SPINNING) |
389 |
|
|
#ifdef EXCH_USE_SPINNING |
390 |
adcroft |
1.1 |
IF ( exchUsesBarrier ) THEN |
391 |
|
|
C o On some machines ( T90 ) use system barrier rather than spinning. |
392 |
|
|
CALL BARRIER( myThid ) |
393 |
|
|
ELSE |
394 |
|
|
C o Spin waiting for completetion flag. This avoids a global-lock |
395 |
|
|
C i.e. we only lock waiting for data that we need. |
396 |
|
|
DO bj=myByLo(myThid),myByHi(myThid) |
397 |
|
|
DO bi=myBxLo(myThid),myBxHi(myThid) |
398 |
|
|
spinCount = 0 |
399 |
|
|
southCommMode = _tileCommModeS(bi,bj) |
400 |
|
|
northCommMode = _tileCommModeN(bi,bj) |
401 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
402 |
|
|
write(ioUnit,'(A,5I6,I12)') 'spin:', myProcId,bi,bj, |
403 |
|
|
& southRecvAck(1,bi,bj), northRecvAck(1,bi,bj), spinCount |
404 |
|
|
#endif |
405 |
adcroft |
1.1 |
10 CONTINUE |
406 |
jmc |
1.7 |
CALL FOOL_THE_COMPILER( spinCount ) |
407 |
adcroft |
1.1 |
spinCount = spinCount+1 |
408 |
jmc |
1.5 |
#ifdef DBUG_EXCH_VEC |
409 |
|
|
write(ioUnit,'(A,5I6,I12)') 'spin:', myProcId,bi,bj, |
410 |
|
|
& southRecvAck(1,bi,bj), northRecvAck(1,bi,bj), spinCount |
411 |
|
|
IF ( myThid.EQ.1 .AND. spinCount.GT. _EXCH_SPIN_LIMIT ) THEN |
412 |
|
|
STOP ' S/R EXCH_RECV_GET_X: spinCount > _EXCH_SPIN_LIMIT' |
413 |
|
|
ENDIF |
414 |
|
|
#endif |
415 |
|
|
IF ( southRecvAck(1,bi,bj) .EQ. 0 ) GOTO 10 |
416 |
|
|
IF ( northRecvAck(1,bi,bj) .EQ. 0 ) GOTO 10 |
417 |
adcroft |
1.1 |
C Clear outstanding requests |
418 |
jmc |
1.5 |
southRecvAck(1,bi,bj) = 0 |
419 |
|
|
northRecvAck(1,bi,bj) = 0 |
420 |
adcroft |
1.1 |
ENDDO |
421 |
|
|
ENDDO |
422 |
|
|
ENDIF |
423 |
jmc |
1.8 |
#endif /* EXCH_USE_SPINNING */ |
424 |
adcroft |
1.1 |
|
425 |
|
|
RETURN |
426 |
|
|
END |