1 |
C $Header: /u/gcmpack/MITgcm/eesupp/src/gather_2d_wh_rx.template,v 1.1 2010/09/23 05:32:17 gforget Exp $ |
2 |
C $Name: $ |
3 |
|
4 |
#include "PACKAGES_CONFIG.h" |
5 |
#include "CPP_EEOPTIONS.h" |
6 |
|
7 |
CBOP |
8 |
C !ROUTINE: GATHER_2D_WH_RX |
9 |
C !INTERFACE: |
10 |
SUBROUTINE GATHER_2D_WH_RX( |
11 |
O gloBuff, |
12 |
I procBuff, |
13 |
I myThid ) |
14 |
C !DESCRIPTION: |
15 |
C Gather elements, including halos, of a global 2-D array from all mpi processes to process 0. |
16 |
C Note: done by Master-Thread ; might need barrier calls before and after |
17 |
C this S/R call. |
18 |
|
19 |
C !USES: |
20 |
IMPLICIT NONE |
21 |
#include "SIZE.h" |
22 |
#include "EEPARAMS.h" |
23 |
#include "EESUPPORT.h" |
24 |
|
25 |
C !INPUT/OUTPUT PARAMETERS: |
26 |
C gloBuff ( _RX ) :: full-domain 2D IO-buffer array (Output) |
27 |
C procBuff ( _RX ) :: proc-domain 2D IO-buffer array (Input) |
28 |
C myThid (integer):: my Thread Id number |
29 |
|
30 |
C sNxWh :: x tile size with halo included |
31 |
C sNyWh :: y tile size with halo included |
32 |
C pocNyWh :: processor sum of sNyWh |
33 |
C gloNyWh :: global sum of sNyWh |
34 |
INTEGER sNxWh |
35 |
INTEGER sNyWh |
36 |
INTEGER procNyWh |
37 |
INTEGER gloNyWh |
38 |
PARAMETER ( sNxWh = sNx+2*Olx ) |
39 |
PARAMETER ( sNyWh = sNy+2*Oly ) |
40 |
PARAMETER ( procNyWh = sNyWh*nSy*nSx ) |
41 |
PARAMETER ( gloNyWh = procNyWh*nPy*nPx ) |
42 |
|
43 |
_RX gloBuff(sNxWh,gloNyWh) |
44 |
_RX procBuff(sNxWh,procNyWh) |
45 |
INTEGER myThid |
46 |
CEOP |
47 |
|
48 |
C !LOCAL VARIABLES: |
49 |
INTEGER i,j |
50 |
#ifdef ALLOW_USE_MPI |
51 |
INTEGER jj, np, np0 |
52 |
_RX temp(sNxWh,gloNyWh) |
53 |
INTEGER istatus(MPI_STATUS_SIZE), ierr |
54 |
INTEGER lbuff, idest, itag, ready_to_receive |
55 |
#endif /* ALLOW_USE_MPI */ |
56 |
|
57 |
_BEGIN_MASTER( myThid ) |
58 |
|
59 |
IF( myProcId .EQ. 0 ) THEN |
60 |
C-- Process 0 fills-in its local data |
61 |
|
62 |
c DO j=1,gloNyWh |
63 |
c DO i=1,sNxWh |
64 |
c gloBuff(i,j) = 0. |
65 |
c ENDDO |
66 |
c ENDDO |
67 |
|
68 |
DO j=1,procNyWh |
69 |
DO i=1,sNxWh |
70 |
gloBuff(i,j) = procBuff(i,j) |
71 |
ENDDO |
72 |
ENDDO |
73 |
|
74 |
C- end if myProcId = 0 |
75 |
ENDIF |
76 |
|
77 |
#ifdef ALLOW_USE_MPI |
78 |
|
79 |
lbuff = sNxWh*procNyWh |
80 |
idest = 0 |
81 |
itag = 0 |
82 |
ready_to_receive = 0 |
83 |
|
84 |
IF( mpiMyId .EQ. 0 ) THEN |
85 |
|
86 |
C-- Process 0 polls and receives data from each process in turn |
87 |
DO np = 2, numberOfProcs |
88 |
np0 = np - 1 |
89 |
#ifndef DISABLE_MPI_READY_TO_RECEIVE |
90 |
CALL MPI_SEND (ready_to_receive, 1, MPI_INTEGER, |
91 |
& np0, itag, MPI_COMM_MODEL, ierr) |
92 |
#endif |
93 |
CALL MPI_RECV (temp, lbuff, _MPI_TYPE_RX, |
94 |
& np0, itag, MPI_COMM_MODEL, istatus, ierr) |
95 |
|
96 |
DO j=1,procNyWh |
97 |
DO i=1,sNxWh |
98 |
jj=j+procNyWh*(np-1) |
99 |
gloBuff(i,jj) = temp(i,j) |
100 |
ENDDO |
101 |
ENDDO |
102 |
C- end loop on np |
103 |
ENDDO |
104 |
|
105 |
ELSE |
106 |
|
107 |
C-- All proceses except 0 wait to be polled then send local array |
108 |
#ifndef DISABLE_MPI_READY_TO_RECEIVE |
109 |
CALL MPI_RECV (ready_to_receive, 1, MPI_INTEGER, |
110 |
& idest, itag, MPI_COMM_MODEL, istatus, ierr) |
111 |
#endif |
112 |
CALL MPI_SEND (procBuff, lbuff, _MPI_TYPE_RX, |
113 |
& idest, itag, MPI_COMM_MODEL, ierr) |
114 |
|
115 |
ENDIF |
116 |
|
117 |
#endif /* ALLOW_USE_MPI */ |
118 |
|
119 |
_END_MASTER( myThid ) |
120 |
|
121 |
RETURN |
122 |
END |