| 1 | dimitri | 1.3 | C $Header: /u/gcmpack/MITgcm_contrib/MPMice/beaufort/code/eeboot_minimal.F,v 1.2 2011/12/21 23:06:07 dimitri Exp $ | 
| 2 | dimitri | 1.1 | C $Name:  $ | 
| 3 |  |  |  | 
| 4 | dimitri | 1.2 | #include "PACKAGES_CONFIG.h" | 
| 5 | dimitri | 1.1 | #include "CPP_EEOPTIONS.h" | 
| 6 |  |  |  | 
| 7 |  |  | CBOP | 
| 8 |  |  | C     !ROUTINE: EEBOOT_MINIMAL | 
| 9 |  |  |  | 
| 10 |  |  | C     !INTERFACE: | 
| 11 |  |  | SUBROUTINE EEBOOT_MINIMAL | 
| 12 |  |  |  | 
| 13 |  |  | C     !DESCRIPTION: | 
| 14 |  |  | C     *==========================================================* | 
| 15 | dimitri | 1.2 | C     | SUBROUTINE EEBOOT\_MINIMAL | 
| 16 |  |  | C     | o Set an initial environment that is predictable i.e. | 
| 17 |  |  | C     | behaves in a similar way on all machines and stable. | 
| 18 | dimitri | 1.1 | C     *==========================================================* | 
| 19 | dimitri | 1.2 | C     | Under MPI this routine calls MPI\_INIT to setup the | 
| 20 |  |  | C     | mpi environment ( on some systems the code is running as | 
| 21 |  |  | C     | a single process prior to MPI\_INIT, on others the mpirun | 
| 22 |  |  | C     | script has already created multiple processes). Until | 
| 23 |  |  | C     | MPI\_Init is called it is unclear what state the | 
| 24 |  |  | C     | application is in. Once this routine has been run it is | 
| 25 |  |  | C     | "safe" to do things like I/O to report erros and to get | 
| 26 |  |  | C     | run parameters. | 
| 27 |  |  | C     | Note: This routine can also be compiled with CPP | 
| 28 |  |  | C     | directives set so that no multi-processing is initialise. | 
| 29 |  |  | C     | This is OK and will work fine. | 
| 30 | dimitri | 1.1 | C     *==========================================================* | 
| 31 |  |  |  | 
| 32 |  |  | C     !USES: | 
| 33 | dimitri | 1.2 | IMPLICIT NONE | 
| 34 | dimitri | 1.1 | C     == Global data == | 
| 35 |  |  | #include "SIZE.h" | 
| 36 |  |  | #include "EEPARAMS.h" | 
| 37 |  |  | #include "EESUPPORT.h" | 
| 38 |  |  |  | 
| 39 |  |  | C     !LOCAL VARIABLES: | 
| 40 |  |  | C     == Local variables == | 
| 41 |  |  | C     myThid           :: Temp. dummy thread number. | 
| 42 |  |  | C     fNam             :: Used to build name of file for standard | 
| 43 |  |  | C                         output and error output. | 
| 44 | dimitri | 1.2 | INTEGER myThid | 
| 45 | dimitri | 1.1 | CHARACTER*13 fNam | 
| 46 |  |  | #ifdef ALLOW_USE_MPI | 
| 47 | dimitri | 1.2 | C     mpiRC            :: Error code reporting variable used with MPI. | 
| 48 | dimitri | 1.1 | C     msgBuffer        :: Used to build messages for printing. | 
| 49 |  |  | CHARACTER*(MAX_LEN_MBUF) msgBuffer | 
| 50 |  |  | INTEGER mpiRC | 
| 51 |  |  | INTEGER nptmp | 
| 52 |  |  | #ifdef COMPONENT_MODULE | 
| 53 |  |  | INTEGER mpiMyWid | 
| 54 |  |  | #endif | 
| 55 |  |  | #ifdef ALLOW_CPL_MPMICE | 
| 56 |  |  | COMMON /CPL_MPI_ID/ | 
| 57 |  |  | &     myworldid, local_ocean_leader, local_ice_leader | 
| 58 |  |  | integer :: n, myid, numprocs, i, ierr, myworldid, numprocsworld | 
| 59 |  |  | integer :: mycomponent | 
| 60 |  |  | integer :: icesize, oceansize | 
| 61 |  |  | integer :: local_ocean_leader, local_ice_leader | 
| 62 |  |  | integer, dimension(:), allocatable :: components | 
| 63 |  |  | integer, dimension(:), allocatable :: icegroup, oceangroup | 
| 64 |  |  | #endif /* ALLOW_CPL_MPMICE */ | 
| 65 | dimitri | 1.2 | #if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD) | 
| 66 |  |  | INTEGER mpiMyWid, color | 
| 67 |  |  | #endif | 
| 68 | dimitri | 1.1 | #endif /* ALLOW_USE_MPI */ | 
| 69 |  |  | CEOP | 
| 70 |  |  |  | 
| 71 |  |  | C--   Default values set to single processor case | 
| 72 |  |  | numberOfProcs = 1 | 
| 73 |  |  | myProcId      = 0 | 
| 74 | dimitri | 1.2 | pidIO         = myProcId | 
| 75 | dimitri | 1.1 | myProcessStr  = '------' | 
| 76 |  |  | C     Set a dummy value for myThid because we are not multi-threading | 
| 77 |  |  | C     yet. | 
| 78 |  |  | myThid        = 1 | 
| 79 |  |  | #ifdef ALLOW_USE_MPI | 
| 80 |  |  | C-- | 
| 81 |  |  | C--   MPI style multiple-process initialisation | 
| 82 |  |  | C--   ========================================= | 
| 83 |  |  | #ifndef ALWAYS_USE_MPI | 
| 84 |  |  | IF ( usingMPI ) THEN | 
| 85 |  |  | #endif | 
| 86 |  |  | C--    Initialise MPI multi-process parallel environment. | 
| 87 |  |  | C      On some systems program forks at this point. Others have already | 
| 88 |  |  | C      forked within mpirun - now thats an open standard! | 
| 89 |  |  | CALL MPI_INIT( mpiRC ) | 
| 90 |  |  | IF ( mpiRC .NE. MPI_SUCCESS ) THEN | 
| 91 |  |  | eeBootError = .TRUE. | 
| 92 |  |  | WRITE(msgBuffer,'(A,I5)') | 
| 93 |  |  | &        'S/R EEBOOT_MINIMAL: MPI_INIT return code', | 
| 94 | dimitri | 1.2 | &        mpiRC | 
| 95 | dimitri | 1.1 | CALL PRINT_ERROR( msgBuffer , myThid) | 
| 96 |  |  | GOTO 999 | 
| 97 |  |  | ENDIF | 
| 98 |  |  |  | 
| 99 |  |  | C--    MPI has now been initialized but now we need to either | 
| 100 |  |  | C      ask for a communicator or pretend that we have: | 
| 101 |  |  | C      Pretend that we have asked for a communicator | 
| 102 |  |  | MPI_COMM_MODEL = MPI_COMM_WORLD | 
| 103 |  |  |  | 
| 104 | dimitri | 1.2 | #ifdef ALLOW_OASIS | 
| 105 |  |  | C      add a 1rst preliminary call EESET_PARAMS to set useOASIS | 
| 106 |  |  | C      (needed to decide either to call OASIS_INIT or not) | 
| 107 |  |  | CALL EESET_PARMS | 
| 108 |  |  | IF ( eeBootError ) GOTO 999 | 
| 109 |  |  | IF ( useOASIS ) CALL OASIS_INIT(MPI_COMM_MODEL) | 
| 110 |  |  | #endif /* ALLOW_OASIS */ | 
| 111 |  |  |  | 
| 112 | dimitri | 1.1 | #ifdef COMPONENT_MODULE | 
| 113 |  |  | C--    Set the running directory | 
| 114 |  |  | CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC ) | 
| 115 |  |  | CALL SETDIR( mpiMyWId ) | 
| 116 |  |  |  | 
| 117 |  |  | C- jmc: test: | 
| 118 |  |  | C      add a 1rst preliminary call EESET_PARAMS to set useCoupler | 
| 119 |  |  | C      (needed to decide either to call CPL_INIT or not) | 
| 120 |  |  | CALL EESET_PARMS | 
| 121 |  |  | IF ( eeBootError ) GOTO 999 | 
| 122 |  |  | C- jmc: test end ; otherwise, uncomment next line: | 
| 123 |  |  | c      useCoupler = .TRUE. | 
| 124 |  |  |  | 
| 125 |  |  | C--    Ask coupler interface for a communicator | 
| 126 |  |  | IF ( useCoupler) CALL CPL_INIT | 
| 127 |  |  | #endif | 
| 128 |  |  |  | 
| 129 | dimitri | 1.2 | C--    Case with Nest(ing) | 
| 130 |  |  | #if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD) | 
| 131 |  |  | C--    Set the running directory | 
| 132 |  |  | CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC ) | 
| 133 |  |  | CALL SETDIR( mpiMyWId ) | 
| 134 |  |  |  | 
| 135 |  |  | C--    Setup Nesting Execution Environment | 
| 136 |  |  | CALL NEST_EEINIT( mpiMyWId, color ) | 
| 137 |  |  | #endif /* ALLOW_NEST_PARENT | ALLOW_NEST_CHILD */ | 
| 138 |  |  |  | 
| 139 | dimitri | 1.3 | #if defined(ALLOW_CPL_MPMICE) && !defined(CPL_DEBUG) | 
| 140 | dimitri | 1.1 | CALL SETDIR_OCEAN( ) | 
| 141 |  |  | call MPI_comm_rank(MPI_COMM_WORLD, myworldid, ierr) | 
| 142 |  |  | call MPI_comm_size(MPI_COMM_WORLD, numprocsworld, ierr) | 
| 143 |  |  |  | 
| 144 |  |  | C     allocate array components based on the number of processors | 
| 145 |  |  | allocate(components(numprocsworld)) | 
| 146 |  |  |  | 
| 147 |  |  | C     assign a component to the ocean code to organize processors into a group | 
| 148 |  |  | mycomponent=0 | 
| 149 |  |  |  | 
| 150 |  |  | C     gather components to all processors, | 
| 151 |  |  | C     so each knows who is ice and who is ocean | 
| 152 |  |  | call MPI_allgather(mycomponent,1,MPI_INTEGER,components,1, | 
| 153 |  |  | &      MPI_INTEGER,MPI_COMM_WORLD,ierr) | 
| 154 |  |  |  | 
| 155 |  |  | C     form ice and ocean groups | 
| 156 |  |  | C     count the processors in each groups | 
| 157 |  |  | icesize=0 | 
| 158 |  |  | oceansize=0 | 
| 159 |  |  | do i=1,numprocsworld | 
| 160 |  |  | if(components(i).eq.0) then | 
| 161 |  |  | oceansize=oceansize+1 | 
| 162 |  |  | elseif(components(i).eq.1) then | 
| 163 |  |  | icesize=icesize+1 | 
| 164 |  |  | else | 
| 165 |  |  | write(6,*) 'error: processor', i, | 
| 166 |  |  | &            'not associated with ice or ocean' | 
| 167 |  |  | stop | 
| 168 |  |  | endif | 
| 169 |  |  | enddo | 
| 170 |  |  |  | 
| 171 |  |  | C     allocate group arrays | 
| 172 |  |  | allocate(icegroup(icesize)) | 
| 173 |  |  | allocate(oceangroup(oceansize)) | 
| 174 |  |  | C     form the groups | 
| 175 |  |  | icesize=0 | 
| 176 |  |  | oceansize=0 | 
| 177 |  |  | do i=1,numprocsworld | 
| 178 |  |  | if(components(i).eq.0) then | 
| 179 |  |  | oceansize=oceansize+1 | 
| 180 |  |  | oceangroup(oceansize)=i-1 ! ranks are from 0 to numprocsworld-1 | 
| 181 |  |  | elseif(components(i).eq.1) then | 
| 182 |  |  | icesize=icesize+1 | 
| 183 |  |  | icegroup(icesize)=i-1 ! ranks are from 0 to numprocsworld-1 | 
| 184 |  |  | else | 
| 185 |  |  | write(6,*) 'error: processor', i, | 
| 186 |  |  | &            'not associated with ice or ocean' | 
| 187 |  |  | endif | 
| 188 |  |  | enddo | 
| 189 |  |  |  | 
| 190 |  |  | C     pick the lowest rank in the group as the local group leader | 
| 191 |  |  | local_ocean_leader=oceangroup(1) | 
| 192 |  |  | local_ice_leader=icegroup(1) | 
| 193 |  |  |  | 
| 194 |  |  | C     form ocean communicator | 
| 195 |  |  | call MPI_comm_split(MPI_COMM_WORLD,mycomponent,myworldid, | 
| 196 |  |  | &      MPI_COMM_MODEL,ierr) | 
| 197 |  |  | call MPI_comm_rank(MPI_COMM_MODEL,myid,ierr) | 
| 198 |  |  | call MPI_comm_size(MPI_COMM_MODEL,numprocs,ierr) | 
| 199 | dimitri | 1.3 | #endif /* defined(ALLOW_CPL_MPMICE) && !defined(CPL_DEBUG) */ | 
| 200 | dimitri | 1.1 |  | 
| 201 | dimitri | 1.2 | C---+----1----+----2----+----3----+----4----+----5----+----6----+----7-|--+----| | 
| 202 |  |  |  | 
| 203 | dimitri | 1.1 | C--    Get my process number | 
| 204 |  |  | CALL MPI_COMM_RANK( MPI_COMM_MODEL, mpiMyId, mpiRC ) | 
| 205 |  |  | IF ( mpiRC .NE. MPI_SUCCESS ) THEN | 
| 206 |  |  | eeBootError = .TRUE. | 
| 207 |  |  | WRITE(msgBuffer,'(A,I5)') | 
| 208 |  |  | &        'S/R EEBOOT_MINIMAL: MPI_COMM_RANK return code', | 
| 209 | dimitri | 1.2 | &        mpiRC | 
| 210 | dimitri | 1.1 | CALL PRINT_ERROR( msgBuffer , myThid) | 
| 211 |  |  | GOTO 999 | 
| 212 |  |  | ENDIF | 
| 213 |  |  | myProcId = mpiMyId | 
| 214 | dimitri | 1.2 | WRITE(myProcessStr,'(I4.4)') myProcId | 
| 215 | dimitri | 1.1 | mpiPidIo = myProcId | 
| 216 |  |  | pidIO    = mpiPidIo | 
| 217 |  |  | IF ( mpiPidIo .EQ. myProcId ) THEN | 
| 218 |  |  | WRITE(fNam,'(A,A)') 'STDERR.', myProcessStr(1:4) | 
| 219 |  |  | OPEN(errorMessageUnit,FILE=fNam,STATUS='unknown') | 
| 220 |  |  | WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:4) | 
| 221 |  |  | OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown') | 
| 222 |  |  | ENDIF | 
| 223 |  |  |  | 
| 224 | dimitri | 1.2 | #if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD) | 
| 225 |  |  | WRITE(standardMessageUnit,'(2(A,I6))') | 
| 226 |  |  | &           ' mpiMyWId =', mpiMyWId, ' , color =',color | 
| 227 |  |  | #endif /* ALLOW_NEST_PARENT | ALLOW_NEST_CHILD */ | 
| 228 |  |  |  | 
| 229 | dimitri | 1.1 | C--    Synchronise all processes | 
| 230 | dimitri | 1.2 | C      Strictly this is superfluous, but by using it we can guarantee to | 
| 231 | dimitri | 1.1 | C      find out about processes that did not start up. | 
| 232 |  |  | CALL MPI_BARRIER( MPI_COMM_MODEL, mpiRC ) | 
| 233 |  |  | IF ( mpiRC .NE. MPI_SUCCESS ) THEN | 
| 234 |  |  | eeBootError = .TRUE. | 
| 235 |  |  | WRITE(msgBuffer,'(A,I6)') | 
| 236 |  |  | &        'S/R EEBOOT_MINIMAL: MPI_BARRIER return code', | 
| 237 | dimitri | 1.2 | &        mpiRC | 
| 238 | dimitri | 1.1 | CALL PRINT_ERROR( msgBuffer , myThid) | 
| 239 |  |  | GOTO 999 | 
| 240 |  |  | ENDIF | 
| 241 |  |  |  | 
| 242 |  |  | C--    Get number of MPI processes | 
| 243 |  |  | CALL MPI_COMM_SIZE ( MPI_COMM_MODEL, mpiNProcs, mpiRC ) | 
| 244 |  |  | IF ( mpiRC .NE. MPI_SUCCESS ) THEN | 
| 245 |  |  | eeBootError = .TRUE. | 
| 246 |  |  | WRITE(msgBuffer,'(A,I6)') | 
| 247 |  |  | &        'S/R EEBOOT_MINIMAL: MPI_COMM_SIZE return code', | 
| 248 | dimitri | 1.2 | &        mpiRC | 
| 249 | dimitri | 1.1 | CALL PRINT_ERROR( msgBuffer , myThid) | 
| 250 |  |  | GOTO 999 | 
| 251 |  |  | ENDIF | 
| 252 |  |  | numberOfProcs = mpiNProcs | 
| 253 |  |  |  | 
| 254 |  |  | C--    Can not have more processes than compile time MAX_NO_PROCS | 
| 255 |  |  | IF ( numberOfProcs .GT. MAX_NO_PROCS ) THEN | 
| 256 |  |  | eeBootError = .TRUE. | 
| 257 |  |  | WRITE(msgBuffer,'(A,2I6)') | 
| 258 |  |  | &    'S/R EEBOOT_MINIMAL: Nb. of processes exceeds MAX_NO_PROCS', | 
| 259 |  |  | &    numberOfProcs, MAX_NO_PROCS | 
| 260 |  |  | CALL PRINT_ERROR( msgBuffer , myThid) | 
| 261 |  |  | WRITE(msgBuffer,'(2A)') | 
| 262 |  |  | &    ' Needs to increase MAX_NO_PROCS', | 
| 263 |  |  | &    ' in file "EEPARAMS.h" and to re-compile' | 
| 264 |  |  | CALL PRINT_ERROR( msgBuffer , myThid) | 
| 265 |  |  | GOTO 999 | 
| 266 |  |  | ENDIF | 
| 267 |  |  | C--    Under MPI only allow same number of processes as proc. | 
| 268 |  |  | C--    grid size. | 
| 269 |  |  | C      Strictly we are allowed more procs. but knowing there | 
| 270 |  |  | C      is an exact match makes things easier. | 
| 271 |  |  | IF ( numberOfProcs .NE. nPx*nPy ) THEN | 
| 272 |  |  | eeBootError = .TRUE. | 
| 273 |  |  | nptmp = nPx*nPy | 
| 274 |  |  | WRITE(msgBuffer,'(A,2I6)') | 
| 275 |  |  | &  'S/R EEBOOT_MINIMAL: No. of processes not equal to nPx*nPy', | 
| 276 |  |  | &  numberOfProcs, nptmp | 
| 277 |  |  | CALL PRINT_ERROR( msgBuffer , myThid) | 
| 278 |  |  | GOTO 999 | 
| 279 |  |  | ENDIF | 
| 280 |  |  |  | 
| 281 |  |  | #ifndef ALWAYS_USE_MPI | 
| 282 |  |  | ENDIF | 
| 283 |  |  | #endif | 
| 284 |  |  |  | 
| 285 |  |  | #else /* ALLOW_USE_MPI */ | 
| 286 |  |  |  | 
| 287 | dimitri | 1.2 | WRITE(myProcessStr,'(I4.4)') myProcId | 
| 288 | dimitri | 1.1 | WRITE(fNam,'(A,A)') 'STDERR.', myProcessStr(1:4) | 
| 289 |  |  | OPEN(errorMessageUnit,FILE=fNam,STATUS='unknown') | 
| 290 |  |  | c       WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:4) | 
| 291 |  |  | c       OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown') | 
| 292 |  |  |  | 
| 293 |  |  | #endif /* ALLOW_USE_MPI */ | 
| 294 |  |  | #ifdef USE_LIBHPM | 
| 295 |  |  | CALL F_HPMINIT(myProcId, "mitgcmuv") | 
| 296 |  |  | #endif | 
| 297 |  |  |  | 
| 298 |  |  | 999  CONTINUE | 
| 299 |  |  |  | 
| 300 |  |  | RETURN | 
| 301 |  |  | END |