/[MITgcm]/MITgcm_contrib/MPMice/beaufort/code/eeboot_minimal.F
ViewVC logotype

Annotation of /MITgcm_contrib/MPMice/beaufort/code/eeboot_minimal.F

Parent Directory Parent Directory | Revision Log Revision Log | View Revision Graph Revision Graph


Revision 1.1 - (hide annotations) (download)
Sun May 31 03:41:36 2009 UTC (16 years, 2 months ago) by dimitri
Branch: MAIN
Saving code and input files, which had been used for test coupling of MITgcm with
MPMice and which were formely available at http://ecco2.jpl.nasa.gov/data1/beaufort/

1 dimitri 1.1 C $Header: /u/gcmpack/MITgcm/eesupp/src/eeboot_minimal.F,v 1.17 2006/07/29 22:57:54 jmc Exp $
2     C $Name: $
3    
4     #include "CPP_EEOPTIONS.h"
5    
6     CBOP
7     C !ROUTINE: EEBOOT_MINIMAL
8    
9     C !INTERFACE:
10     SUBROUTINE EEBOOT_MINIMAL
11     IMPLICIT NONE
12    
13     C !DESCRIPTION:
14     C *==========================================================*
15     C | SUBROUTINE EEBOOT\_MINIMAL
16     C | o Set an initial environment that is predictable i.e.
17     C | behaves in a similar way on all machines and stable.
18     C *==========================================================*
19     C | Under MPI this routine calls MPI\_INIT to setup the
20     C | mpi environment ( on some systems the code is running as
21     C | a single process prior to MPI\_INIT, on others the mpirun
22     C | script has already created multiple processes). Until
23     C | MPI\_Init is called it is unclear what state the
24     C | application is in. Once this routine has been run it is
25     C | "safe" to do things like I/O to report erros and to get
26     C | run parameters.
27     C | Note: This routine can also be compiled with CPP
28     C | directives set so that no multi-processing is initialise.
29     C | This is OK and will work fine.
30     C *==========================================================*
31    
32     C !USES:
33     C == Global data ==
34     #include "SIZE.h"
35     #include "EEPARAMS.h"
36     #include "EESUPPORT.h"
37    
38     C !LOCAL VARIABLES:
39     C == Local variables ==
40     C myThid :: Temp. dummy thread number.
41     C fNam :: Used to build name of file for standard
42     C output and error output.
43     INTEGER myThid
44     CHARACTER*13 fNam
45     #ifdef ALLOW_USE_MPI
46     C mpiRC :: Error code reporting variable used
47     C with MPI.
48     C msgBuffer :: Used to build messages for printing.
49     CHARACTER*(MAX_LEN_MBUF) msgBuffer
50     INTEGER mpiRC
51     INTEGER nptmp
52     #ifdef COMPONENT_MODULE
53     INTEGER mpiMyWid
54     #endif
55     #ifdef ALLOW_CPL_MPMICE
56     COMMON /CPL_MPI_ID/
57     & myworldid, local_ocean_leader, local_ice_leader
58     integer :: n, myid, numprocs, i, ierr, myworldid, numprocsworld
59     integer :: mycomponent
60     integer :: icesize, oceansize
61     integer :: local_ocean_leader, local_ice_leader
62     integer, dimension(:), allocatable :: components
63     integer, dimension(:), allocatable :: icegroup, oceangroup
64     #endif /* ALLOW_CPL_MPMICE */
65     #endif /* ALLOW_USE_MPI */
66     CEOP
67    
68     C-- Default values set to single processor case
69     numberOfProcs = 1
70     myProcId = 0
71     pidIO = myProcId
72     myProcessStr = '------'
73     C Set a dummy value for myThid because we are not multi-threading
74     C yet.
75     myThid = 1
76     #ifdef ALLOW_USE_MPI
77     C--
78     C-- MPI style multiple-process initialisation
79     C-- =========================================
80     #ifndef ALWAYS_USE_MPI
81     IF ( usingMPI ) THEN
82     #endif
83     C-- Initialise MPI multi-process parallel environment.
84     C On some systems program forks at this point. Others have already
85     C forked within mpirun - now thats an open standard!
86     CALL MPI_INIT( mpiRC )
87     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
88     eeBootError = .TRUE.
89     WRITE(msgBuffer,'(A,I5)')
90     & 'S/R EEBOOT_MINIMAL: MPI_INIT return code',
91     & mpiRC
92     CALL PRINT_ERROR( msgBuffer , myThid)
93     GOTO 999
94     ENDIF
95    
96     C-- MPI has now been initialized but now we need to either
97     C ask for a communicator or pretend that we have:
98     C Pretend that we have asked for a communicator
99     MPI_COMM_MODEL = MPI_COMM_WORLD
100    
101     #ifdef COMPONENT_MODULE
102     C-- Set the running directory
103     CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC )
104     CALL SETDIR( mpiMyWId )
105    
106     C- jmc: test:
107     C add a 1rst preliminary call EESET_PARAMS to set useCoupler
108     C (needed to decide either to call CPL_INIT or not)
109     CALL EESET_PARMS
110     IF ( eeBootError ) GOTO 999
111     C- jmc: test end ; otherwise, uncomment next line:
112     c useCoupler = .TRUE.
113    
114     C-- Ask coupler interface for a communicator
115     IF ( useCoupler) CALL CPL_INIT
116     #endif
117    
118     #ifdef ALLOW_CPL_MPMICE
119     CALL SETDIR_OCEAN( )
120     call MPI_comm_rank(MPI_COMM_WORLD, myworldid, ierr)
121     call MPI_comm_size(MPI_COMM_WORLD, numprocsworld, ierr)
122    
123     C allocate array components based on the number of processors
124     allocate(components(numprocsworld))
125    
126     C assign a component to the ocean code to organize processors into a group
127     mycomponent=0
128    
129     C gather components to all processors,
130     C so each knows who is ice and who is ocean
131     call MPI_allgather(mycomponent,1,MPI_INTEGER,components,1,
132     & MPI_INTEGER,MPI_COMM_WORLD,ierr)
133    
134     C form ice and ocean groups
135     C count the processors in each groups
136     icesize=0
137     oceansize=0
138     do i=1,numprocsworld
139     if(components(i).eq.0) then
140     oceansize=oceansize+1
141     elseif(components(i).eq.1) then
142     icesize=icesize+1
143     else
144     write(6,*) 'error: processor', i,
145     & 'not associated with ice or ocean'
146     stop
147     endif
148     enddo
149    
150     C allocate group arrays
151     allocate(icegroup(icesize))
152     allocate(oceangroup(oceansize))
153     C form the groups
154     icesize=0
155     oceansize=0
156     do i=1,numprocsworld
157     if(components(i).eq.0) then
158     oceansize=oceansize+1
159     oceangroup(oceansize)=i-1 ! ranks are from 0 to numprocsworld-1
160     elseif(components(i).eq.1) then
161     icesize=icesize+1
162     icegroup(icesize)=i-1 ! ranks are from 0 to numprocsworld-1
163     else
164     write(6,*) 'error: processor', i,
165     & 'not associated with ice or ocean'
166     endif
167     enddo
168    
169     C pick the lowest rank in the group as the local group leader
170     local_ocean_leader=oceangroup(1)
171     local_ice_leader=icegroup(1)
172    
173     C form ocean communicator
174     call MPI_comm_split(MPI_COMM_WORLD,mycomponent,myworldid,
175     & MPI_COMM_MODEL,ierr)
176     call MPI_comm_rank(MPI_COMM_MODEL,myid,ierr)
177     call MPI_comm_size(MPI_COMM_MODEL,numprocs,ierr)
178     #endif /* ALLOW_CPL_MPMICE */
179    
180     C-- Get my process number
181     CALL MPI_COMM_RANK( MPI_COMM_MODEL, mpiMyId, mpiRC )
182     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
183     eeBootError = .TRUE.
184     WRITE(msgBuffer,'(A,I5)')
185     & 'S/R EEBOOT_MINIMAL: MPI_COMM_RANK return code',
186     & mpiRC
187     CALL PRINT_ERROR( msgBuffer , myThid)
188     GOTO 999
189     ENDIF
190     myProcId = mpiMyId
191     WRITE(myProcessStr,'(I4.4)') myProcId
192     mpiPidIo = myProcId
193     pidIO = mpiPidIo
194     IF ( mpiPidIo .EQ. myProcId ) THEN
195     WRITE(fNam,'(A,A)') 'STDERR.', myProcessStr(1:4)
196     OPEN(errorMessageUnit,FILE=fNam,STATUS='unknown')
197     WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:4)
198     OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown')
199     ENDIF
200    
201     C-- Synchronise all processes
202     C Strictly this is superfluous, but by using it we can guarantee to
203     C find out about processes that did not start up.
204     CALL MPI_BARRIER( MPI_COMM_MODEL, mpiRC )
205     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
206     eeBootError = .TRUE.
207     WRITE(msgBuffer,'(A,I6)')
208     & 'S/R EEBOOT_MINIMAL: MPI_BARRIER return code',
209     & mpiRC
210     CALL PRINT_ERROR( msgBuffer , myThid)
211     GOTO 999
212     ENDIF
213    
214     C-- Get number of MPI processes
215     CALL MPI_COMM_SIZE ( MPI_COMM_MODEL, mpiNProcs, mpiRC )
216     IF ( mpiRC .NE. MPI_SUCCESS ) THEN
217     eeBootError = .TRUE.
218     WRITE(msgBuffer,'(A,I6)')
219     & 'S/R EEBOOT_MINIMAL: MPI_COMM_SIZE return code',
220     & mpiRC
221     CALL PRINT_ERROR( msgBuffer , myThid)
222     GOTO 999
223     ENDIF
224     numberOfProcs = mpiNProcs
225    
226     C-- Can not have more processes than compile time MAX_NO_PROCS
227     IF ( numberOfProcs .GT. MAX_NO_PROCS ) THEN
228     eeBootError = .TRUE.
229     WRITE(msgBuffer,'(A,2I6)')
230     & 'S/R EEBOOT_MINIMAL: Nb. of processes exceeds MAX_NO_PROCS',
231     & numberOfProcs, MAX_NO_PROCS
232     CALL PRINT_ERROR( msgBuffer , myThid)
233     WRITE(msgBuffer,'(2A)')
234     & ' Needs to increase MAX_NO_PROCS',
235     & ' in file "EEPARAMS.h" and to re-compile'
236     CALL PRINT_ERROR( msgBuffer , myThid)
237     GOTO 999
238     ENDIF
239     C-- Under MPI only allow same number of processes as proc.
240     C-- grid size.
241     C Strictly we are allowed more procs. but knowing there
242     C is an exact match makes things easier.
243     IF ( numberOfProcs .NE. nPx*nPy ) THEN
244     eeBootError = .TRUE.
245     nptmp = nPx*nPy
246     WRITE(msgBuffer,'(A,2I6)')
247     & 'S/R EEBOOT_MINIMAL: No. of processes not equal to nPx*nPy',
248     & numberOfProcs, nptmp
249     CALL PRINT_ERROR( msgBuffer , myThid)
250     GOTO 999
251     ENDIF
252    
253     #ifndef ALWAYS_USE_MPI
254     ENDIF
255     #endif
256    
257     #else /* ALLOW_USE_MPI */
258    
259     WRITE(myProcessStr,'(I4.4)') myProcId
260     WRITE(fNam,'(A,A)') 'STDERR.', myProcessStr(1:4)
261     OPEN(errorMessageUnit,FILE=fNam,STATUS='unknown')
262     c WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:4)
263     c OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown')
264    
265     #endif /* ALLOW_USE_MPI */
266     #ifdef USE_LIBHPM
267     CALL F_HPMINIT(myProcId, "mitgcmuv")
268     #endif
269    
270     999 CONTINUE
271    
272     RETURN
273     END
274    

  ViewVC Help
Powered by ViewVC 1.1.22