9 integer Coupler_id /0/ ! this is Coupler's id, used to address
10 ! Coupler. This is a default value,
11 ! possibly to be redefined later
13 ! Make Coupler's id 0 if it is active (i.e. communnicating with
14 ! the Component.) Otherwise, make it a negative integer; in this case,
15 ! the Component is standalone.
19 parameter (ibuffer_size=10)
20 integer Coupler_rank,my_id,COMM_local, &
21 &component_master_rank_global,process_rank_global, &
22 &component_master_rank_local,process_rank_local,&
23 &component_nprocs,FlexLev,ibuffer(ibuffer_size),nprocs_global
25 integer kind_REAL,kind_INTEGER,MPI_kind_REAL,&
26 &kind_alt_REAL,MPI_kind_alt_REAL
27 parameter (kind_REAL=8,kind_INTEGER=4)
28 parameter (kind_alt_REAL=12-kind_REAL)
29 ! kind_INTEGER must be number of bytes equal to number of bytes
30 ! implied by MPI_INTEGER MPI constant; all integers sent/received
31 ! are of this kind. No value other than 4 is anticipated as of now
32 ! kind_REAL is type of real data to communicate. The corresponding
33 ! MPI data type variable MPI_kind_REAL is assigned in CMP_INIT.
34 ! kind_alt_REAL is alternative type of real data to communicate.
35 ! The corresponding MPI data type variable MPI_kind_alt_REAL is
36 ! assigned in CMP_INIT. (It is used in subroutines CMP_alt_SEND
43 !***********************************************************************
45 SUBROUTINE CMP_INIT(id,flex)
48 ! This subroutine must be called by every Component right upon
49 ! calling MPI_INIT. It assigns a value to the Component communicator
50 ! COMM_local (which is a global variable in module CMP), to be
51 ! thereafter used by the Component in place of
52 ! MPI_COMM_WORLD wherever it is used by the Component's
53 ! standalone version. Besides, it stores the Component's id,
54 ! the process's ranks, and the "flexibility level" (flex) requested
55 ! by the Component in glob. variables. (The latter parameter affects
56 ! the mode of communications; for its description, see CMP_SEND and
57 ! CMP_RECV.) Finally, it starts handshaking with Coupler, receiving
58 ! the unique (global, i.e. in MPI_COMM_WORLD) Coupler process
59 ! rank Coupler_rank from Coupler
60 ! ibuffer may include additional
69 integer ierr,color,key,status(MPI_STATUS_SIZE),tag,dummy
74 ! Determine if MPI is initialized, if not initialize
75 call MPI_INITIALIZED(izd,ierr)
76 if (.not.izd) call MPI_INIT(ierr)
78 ! Determine MPI send/receive types according to prescribed
79 ! types for arrays to be communicated
80 if (kind_REAL.eq.8) then
81 MPI_kind_REAL=MPI_REAL8
82 MPI_kind_alt_REAL=MPI_REAL4
83 else if (kind_REAL.eq.4) then
84 MPI_kind_REAL=MPI_REAL4
85 MPI_kind_alt_REAL=MPI_REAL8
87 write(s,'(i0)') kind_REAL
89 & 'CMP_INIT: illegal value of kind_REAL='//s,1)
91 if (kind_INTEGER.ne.4) then
92 write(s,'(i0)') kind_INTEGER
94 & 'CMP_INIT: illegal value of kind_INTEGER='//s,1)
97 ! Store the Component's id
101 ! Store the Component's "flexibility level"
105 ! Assign a value to the Component communicator
106 ! COMM_local, to be thereafter used by the Component in place of
107 ! MPI_COMM_WORLD wherever it is used by the Component's
112 ! print*,'CMP_INIT: to call MPI_COMM_SPLIT, color=',color
113 call MPI_COMM_SPLIT(MPI_COMM_WORLD,color,key,COMM_local,ierr)
114 call GLOB_ABORT(ierr,'CMP_INIT: error in MPI_COMM_SPLIT',1)
116 ! Store the process's global and local ranks
118 ! print*,'CMP_INIT: to call MPI_COMM_RANK for global rank'
119 call MPI_COMM_RANK(MPI_COMM_WORLD,process_rank_global,ierr)
120 call GLOB_ABORT(ierr, &
121 &'CMP_INIT: error in MPI_COMM_RANK(MPI_COMM_WORLD...)',1)
122 ! print*,'CMP_INIT: to call MPI_COMM_RANK for local rank'
123 call MPI_COMM_RANK(COMM_local,process_rank_local,ierr)
124 call GLOB_ABORT(ierr, &
125 &'CMP_INIT: error in MPI_COMM_RANK(COMM_local...)',1)
127 ! Store component_nprocs - component's number of processes;
128 ! calculate global number number of processes;
129 ! determine whether it is standalone mode and if it is, make
130 ! Coupler's id negative and return
132 call MPI_COMM_SIZE(COMM_local,component_nprocs,ierr)
133 call MPI_COMM_SIZE(MPI_COMM_WORLD,nprocs_global,ierr)
134 if (component_nprocs.eq.nprocs_global) then
135 if(process_rank_local.eq.0) then
136 call wrf_debug(2,'CMP_INIT: standalone mode')
142 ! Start handshaking with Coupler (all processes):
143 ! receive the unique (global, i.e. in MPI_COMM_WORLD) Coupler
144 ! process rank Coupler_rank from Coupler
147 ! print*,'CMP_INIT: to call MPI_RECV'
148 call MPI_RECV(ibuffer,ibuffer_size,MPI_INTEGER,MPI_ANY_SOURCE,tag,&
149 &MPI_COMM_WORLD,status,ierr)
150 call GLOB_ABORT(ierr,'CMP_INIT: error in MPI_RECV',1)
151 Coupler_rank=ibuffer(2)
152 if (ibuffer(1).ne.Coupler_id) then
153 call wrf_debug(2,'CMP_INIT: stopped, rcvd ibuffer(1) value ' &
154 & //'is not C id: ',ibuffer(1))
155 CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
157 if (ibuffer(3).ne.ibuffer_size) then
158 call wrf_debug(2,'CMP_INIT: stopped, rcvd ibuffer(3) value ', &
159 & ibuffer(3),' is not ibuffer_size=',ibuffer_size)
160 CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
163 ! Inform Coupler that this components exists and is active
165 call MPI_GATHER(id,1,MPI_INTEGER,dummy,1,MPI_INTEGER, &
166 &Coupler_rank,MPI_COMM_WORLD,ierr)
170 ! >'CMP_INIT: ranks: process local, global, Coupler; Coupler_id: ',
171 ! >process_rank_local,process_rank_global,Coupler_rank,Coupler_id
176 !***********************************************************************
178 SUBROUTINE CMP_INTRO(master_rank_local)
180 ! This routine must be called by all Component's processes
181 ! which must all know the local rank of Component's master
182 ! process (master_rank_local)
183 ! Alternatively, SUBROUTINE CMP_INTRO_m can be called
184 ! from Component's master process only, and SUBROUTINE CMP_INTRO_s
185 ! from all other processes. In this case, the local rank of
186 ! Component's master process will be determined and broadcast
192 character*255 message
193 integer master_rank_local,ierr,ibuf(3),color,key,tag
196 ! print*,'CMP_INTRO: entered ',master_rank_local,process_rank_local
199 component_master_rank_local=master_rank_local
201 if (Coupler_id.lt.0) return ! <- standalone mode
203 ! If this process is the Component's master process,
204 ! complete handshaking with Coupler:
205 ! "register", i.e. send Component master process global rank
206 ! to Coupler. Also, send the requested "flexibility level".
207 ! (Sending Component's id (in ibuf(1)) is for double-check only.)
209 if (process_rank_local.eq.master_rank_local) then
210 component_master_rank_global=process_rank_global
211 ibuf(1)=my_id ! redundant, sent for control only
212 ibuf(2)=process_rank_global
215 write(message,*) 'CMP_INTRO: to call MPI_SEND ', &
216 & process_rank_local, process_rank_global
217 call wrf_debug(2,message)
218 call MPI_SEND(ibuf,3,MPI_INTEGER,Coupler_rank,tag, &
219 & MPI_COMM_WORLD,ierr)
221 write(message,*) 'CMP_INTRO: error in MPI_SEND, process ', &
222 & process_rank_global
223 call wrf_error_fatal(message)
226 ! print*,'CMP_INTRO: returning ',process_rank_local,
227 ! > process_rank_global,Coupler_rank
231 !***********************************************************************
233 SUBROUTINE CMP_INTRO_m
235 ! This routine must be called by Component's master process (only),
236 ! if CMP_INTRO is not called (see comments in CMP_INTRO)
241 character*255 :: message
242 integer ierr,ibuf(3),color,key,tag,i
245 ! print*,'CMP_INTRO_m: entered, process_rank_local=',
246 ! >process_rank_local
248 component_master_rank_local=process_rank_local
249 component_master_rank_global=process_rank_global
252 do i=0,component_nprocs-1
253 if (i.ne.component_master_rank_local) then
254 ibuf(1)=component_master_rank_local
255 ibuf(2)=component_master_rank_global
256 call MPI_SEND(ibuf,2,MPI_INTEGER,i,tag,COMM_local,ierr)
258 write(message,*) 'CMP_INTRO_m: error in 1st MPI_SEND, i=',i
259 call wrf_error_fatal(message)
264 if (Coupler_id.lt.0) return ! <- standalone mode
266 ! Complete handshaking with Coupler:
267 ! "register", i.e. send Component master process global rank
268 ! to Coupler. Also, send the requested "flexibility level".
269 ! (Sending Component's id (in ibuf(1)) is for double-check only.)
272 ibuf(1)=my_id ! redundant, sent for control only
273 ibuf(2)=process_rank_global
275 ! print*,'CMP_INTRO_m: to call MPI_SEND ',process_rank_local,
276 ! > process_rank_global
277 call MPI_SEND(ibuf,3,MPI_INTEGER,Coupler_rank,tag, &
278 &MPI_COMM_WORLD,ierr)
280 write(message,*) 'CMP_INTRO_m: error in MPI_SEND, process ', &
281 & process_rank_global
282 CALL wrf_error_fatal(message)
284 ! print*,'CMP_INTRO_m: returning ',process_rank_local,
285 ! > process_rank_global
289 !***********************************************************************
291 SUBROUTINE CMP_INTRO_s
293 ! This routine must be called by all Component's processes other
294 ! than master process,
295 ! if CMP_INTRO is not called (see comments in CMP_INTRO)
300 character*255 :: message
301 integer ierr,ibuf(3),color,key,tag,i,status(MPI_STATUS_SIZE)
304 ! print*,'CMP_INTRO_s: entered, process_rank_local=',
305 ! >process_rank_local
308 call MPI_RECV(ibuf,2,MPI_INTEGER,MPI_ANY_SOURCE,tag, &
309 &COMM_local,status,ierr)
311 write(message,*) 'CMP_INTRO_s: error in MPI_RECV ', &
313 call wrf_error_fatal(message)
315 component_master_rank_local=ibuf(1)
316 component_master_rank_global=ibuf(2)
317 ! WtF? do i=0,component_nprocs-1
318 ! WtF? if (i.ne.component_master_rank_local) then
319 ! WtF? ibuf(1)=component_master_rank_local
320 ! WtF? ibuf(2)=component_master_rank_global
321 ! WtF? call MPI_SEND(ibuf,2,MPI_INTEGER,i,tag,COMM_local,ierr)
325 ! print*,'CMP_INTRO_s: returning ',process_rank_local,
326 ! > process_rank_global,component_master_rank_local,
327 ! > component_master_rank_global
331 !***********************************************************************
333 SUBROUTINE CMP_SEND(F,N)
338 character*255 :: message
340 real(kind=kind_REAL) F(N)
342 if (Coupler_id.lt.0) return ! <- standalone mode
344 ! call CMP_DBG_CR(6,'CMP_SEND: entered')
346 if (process_rank_local.ne.component_master_rank_local) then
347 if (FlexLev.eq.0) then
348 ! With "flexibility level" FlexLev=0, only Component master
349 ! process is supposed to call this subroutine.
351 & '("*** CMP_SEND: process_rank_local=",i4," ***"/' &
352 & //'"*** and component_master_rank_local=",i4," differ: ***"/'&
353 & //'"*** STOPPED ***")') &
354 & process_rank_local,component_master_rank_local
355 call wrf_error_fatal(message)
356 else if (FlexLev.eq.1) then
357 ! With "flexibility level" FlexLev=1, any Component process is
358 ! allowed to call this subroutine but only the Component
359 ! master process can actually send data (so the
360 ! others just make a dummy call), as the Coupler process only
361 ! receives data from the Component master process.
363 else if (FlexLev.ne.2 .and. FlexLev.ne.3) then
365 & '("*** CMP_SEND: illegal value of FlexLev",i9/' &
366 & //'"*** STOPPED")') FlexLev
367 call wrf_error_fatal(message)
369 ! With "flexibility level" FlexLev=2 or FlexLev=3, any
370 ! Component process is allowed to actually send data.
371 ! [In this case, the Coupler process (in CPL_RECV) receives
372 ! from MPI_ANY_SOURCE rather than component_master_rank_global,
373 ! and it is only identification by tag which enables Coupler
374 ! to receive the data from the right source.]
375 ! But in any case only one Component process may actually be
376 ! engaged in a particular exchange of data with Coupler.
381 call MPI_SEND(F,N,MPI_kind_REAL,Coupler_rank,tag, &
382 &MPI_COMM_WORLD,ierr)
383 call GLOB_ABORT(ierr,'CMP_SEND: error in MPI_SEND',1)
385 ! call CMP_DBG_CR(6,'CMP_SEND: exiting')
389 !***********************************************************************
391 SUBROUTINE CMP_alt_SEND(F,N)
396 character*255 :: message
398 real(kind=kind_alt_REAL) F(N)
400 if (Coupler_id.lt.0) return ! <- standalone mode
402 ! call CMP_DBG_CR(6,'CMP_alt_SEND: entered')
404 if (process_rank_local.ne.component_master_rank_local) then
405 if (FlexLev.eq.0) then
406 ! With "flexibility level" FlexLev=0, only Component master
407 ! process is supposed to call this subroutine.
409 & '("*** CMP_SEND: process_rank_local=",i4," ***"/' &
410 & //'"*** and component_master_rank_local=",i4," differ: ***"/'&
411 & //'"*** STOPPED ***")') &
412 & process_rank_local,component_master_rank_local
413 call wrf_error_fatal(message)
414 else if (FlexLev.eq.1) then
415 ! With "flexibility level" FlexLev=1, any Component process is
416 ! allowed to call this subroutine but only the Component
417 ! master process can actually send data (so the
418 ! others just make a dummy call), as the Coupler process only
419 ! receives data from the Component master process.
421 else if (FlexLev.ne.2 .and. FlexLev.ne.3) then
423 & '("*** CMP_SEND: illegal value of FlexLev",i9/ ' &
424 & //'"*** STOPPED")') FlexLev
425 call wrf_error_fatal(message)
427 ! With "flexibility level" FlexLev=2 or FlexLev=3, any
428 ! Component process is allowed to actually send data.
429 ! [In this case, the Coupler process (in CPL_RECV) receives
430 ! from MPI_ANY_SOURCE rather than component_master_rank_global,
431 ! and it is only identification by tag which enables Coupler
432 ! to receive the data from the right source.]
433 ! But in any case only one Component process may actually be
434 ! engaged in a particular exchange of data with Coupler.
439 call MPI_SEND(F,N,MPI_kind_alt_REAL,Coupler_rank,tag, &
440 &MPI_COMM_WORLD,ierr)
441 call GLOB_ABORT(ierr,'CMP_SEND: error in MPI_SEND',1)
443 ! call CMP_DBG_CR(6,'CMP_SEND: exiting')
447 !***********************************************************************
449 SUBROUTINE CMP_gnr_SEND(F,N,MPI_DATATYPE)
455 integer N,MPI_DATATYPE
457 character*255 :: message
461 if (Coupler_id.lt.0) return ! <- standalone mode
463 ! call CMP_DBG_CR(6,'CMP_alt_SEND: entered')
465 if (process_rank_local.ne.component_master_rank_local) then
466 if (FlexLev.eq.0) then
467 ! With "flexibility level" FlexLev=0, only Component master
468 ! process is supposed to call this subroutine.
470 & '("*** CMP_SEND: process_rank_local=",i4," ***"/ ' &
471 & //'"*** and component_master_rank_local=",i4," differ: ***"/'&
472 & //' "*** STOPPED ***")') &
473 & process_rank_local,component_master_rank_local
474 call wrf_error_fatal(message)
475 else if (FlexLev.eq.1) then
476 ! With "flexibility level" FlexLev=1, any Component process is
477 ! allowed to call this subroutine but only the Component
478 ! master process can actually send data (so the
479 ! others just make a dummy call), as the Coupler process only
480 ! receives data from the Component master process.
482 else if (FlexLev.ne.2 .and. FlexLev.ne.3) then
484 & '("*** CMP_SEND: illegal value of FlexLev",i9/ ' &
485 & //'"*** STOPPED")') FlexLev
486 CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
488 ! With "flexibility level" FlexLev=2 or FlexLev=3, any
489 ! Component process is allowed to actually send data.
490 ! [In this case, the Coupler process (in CPL_RECV) receives
491 ! from MPI_ANY_SOURCE rather than component_master_rank_global,
492 ! and it is only identification by tag which enables Coupler
493 ! to receive the data from the right source.]
494 ! But in any case only one Component process may actually be
495 ! engaged in a particular exchange of data with Coupler.
500 call MPI_SEND(F,N,MPI_DATATYPE,Coupler_rank,tag, &
501 &MPI_COMM_WORLD,ierr)
502 call GLOB_ABORT(ierr,'CMP_SEND: error in MPI_SEND',1)
504 ! call CMP_DBG_CR(6,'CMP_SEND: exiting')
508 !***********************************************************************
510 SUBROUTINE CMP_INTEGER_SEND(F,N)
515 character*255 :: message
519 if (Coupler_id.lt.0) return ! <- standalone mode
521 ! print*,'CMP_INTEGER_SEND: entered with N=',N,' F=',F,
522 ! > '; my_id=',my_id,'Coupler_rank=',Coupler_rank
524 if (process_rank_local.ne.component_master_rank_local) then
525 if (FlexLev.eq.0) then
526 ! With "flexibility level" FlexLev=0, only Component master
527 ! process is supposed to call this subroutine.
529 '("*** CMP_SEND: process_rank_local=",i4," ***"/ ' &
530 & //'"*** and component_master_rank_local=",i4," differ: ***"/'&
531 & //'"*** STOPPED ***")') &
532 & process_rank_local,component_master_rank_local
533 call wrf_error_fatal(message)
534 else if (FlexLev.eq.1) then
535 ! With "flexibility level" FlexLev=1, any Component process is
536 ! allowed to call this subroutine but only the Component
537 ! master process can actually send data (so the
538 ! others just make a dummy call), as the Coupler process only
539 ! receives data from the Component master process.
541 else if (FlexLev.ne.2 .and. FlexLev.ne.3) then
543 & '("*** CMP_SEND: illegal value of FlexLev",i9/ ' &
544 & //'"*** STOPPED")') FlexLev
545 CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
547 ! With "flexibility level" FlexLev=2 or FlexLev=3, any
548 ! Component process is allowed to actually send data.
549 ! [In this case, the Coupler process (in CPL_RECV) receives
550 ! from MPI_ANY_SOURCE rather than component_master_rank_global,
551 ! and it is only identification by tag which enables Coupler
552 ! to receive the data from the right source.]
553 ! But in any case only one Component process may actually be
554 ! engaged in a particular exchange of data with Coupler.
558 write(message,*) 'CMP_INTEGER_SEND: to call MPI_SEND; F=', &
559 & F,' N=',N,' Coupler_rank=',Coupler_rank,' tag=',tag
560 call wrf_debug(2,message)
561 call MPI_SEND(F,N,MPI_INTEGER,Coupler_rank,tag, &
562 &MPI_COMM_WORLD,ierr)
563 call GLOB_ABORT(ierr,'CMP_INTEGER_SEND: error in MPI_SEND',1)
564 call wrf_debug(1,'CMP_INTEGER_SEND: to return')
569 !***********************************************************************
571 SUBROUTINE CMP_RECV(F,N)
576 character*255 :: message
577 integer N,ierr,tag,ibuf(3),status(MPI_STATUS_SIZE)
578 real(kind=kind_REAL) F(N)
580 if (Coupler_id.lt.0) return ! <- standalone mode
582 ! call CMP_DBG_CR(6,'CMP_RECV: entered')
584 if (process_rank_local.ne.component_master_rank_local) then
586 if (FlexLev.eq.0) then
588 ! With "flexibility level" FlexLev=0, only Component master
589 ! process is supposed to call this subroutine.
592 & '("*** CMP_RECV: process_rank_local=",i4," ***"/ ' &
593 & //'"*** and component_master_rank_local=",i4," differ: ***"/'&
594 & //' "*** STOPPED ***")') &
595 & process_rank_local,component_master_rank_local
596 call wrf_error_fatal(message)
598 else if (FlexLev.eq.1 .or. FlexLev.eq.2) then
600 ! With "flexibility level" FlexLev=1 or FlexLev=2, any
601 ! Component process is allowed to call this subroutine but
602 ! only the Component master process is supposed to actually
603 ! receive data (so the others just make a dummy call), as
604 ! the Coupler process only sends data to the Component master
609 else if (FlexLev.eq.3) then
611 ! With "flexibility level" FlexLev=3, any Component process
612 ! may actually receive data.
613 ! [In this case, the Coupler process (in CPL_SEND) first
614 ! receives the Component process global rank
615 ! (process_rank_global) from this subroutine, the source being
616 ! MPI_ANY_SOURCE, so it is only identification by tag which
617 ! enables Coupler to receive process_rank_global from the right
618 ! source. Upon the receipt, the Coupler process (in CPL_SEND)
619 ! sends the data to this Component process, rather than to
620 ! the Component master process as is the case with lower
621 ! "flexibility levels".]
622 ! But in any case only one Component process may actually be
623 ! engaged in a particular exchange of data with Coupler.
626 ibuf(2)=process_rank_global
628 call MPI_SEND(ibuf,2,MPI_INTEGER,Coupler_rank,tag, &
629 & MPI_COMM_WORLD,ierr)
630 call GLOB_ABORT(ierr,'CMP_RECV: error in MPI_SEND',1)
634 print '("*** CMP_RECV: illegal value of FlexLev",i9/ &
635 & "*** STOPPED")',FlexLev
636 CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
643 call MPI_RECV(F,N,MPI_kind_REAL,Coupler_rank,tag, &
644 &MPI_COMM_WORLD,status,ierr)
645 call GLOB_ABORT(ierr,'CMP_RECV: error in MPI_RECV',1)
647 ! call CMP_DBG_CR(6,'CMP_RECV: exiting')
652 !***********************************************************************
654 SUBROUTINE CMP_alt_RECV(F,N)
659 character*255 :: message
660 integer N,ierr,tag,ibuf(3),status(MPI_STATUS_SIZE)
661 real(kind=kind_alt_REAL) F(N)
663 if (Coupler_id.lt.0) return ! <- standalone mode
665 ! call CMP_DBG_CR(6,'CMP_alt_RECV: entered')
667 if (process_rank_local.ne.component_master_rank_local) then
669 if (FlexLev.eq.0) then
671 ! With "flexibility level" FlexLev=0, only Component master
672 ! process is supposed to call this subroutine.
675 & '("*** CMP_alt_RECV: process_rank_local=",i4," ***"/ ' &
676 & //'"*** and component_master_rank_local=",i4," differ: ***"/'&
677 & //'"*** STOPPED ***")') &
678 & process_rank_local,component_master_rank_local
679 CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
681 else if (FlexLev.eq.1 .or. FlexLev.eq.2) then
683 ! With "flexibility level" FlexLev=1 or FlexLev=2, any
684 ! Component process is allowed to call this subroutine but
685 ! only the Component master process is supposed to actually
686 ! receive data (so the others just make a dummy call), as
687 ! the Coupler process only sends data to the Component master
692 else if (FlexLev.eq.3) then
694 ! With "flexibility level" FlexLev=3, any Component process
695 ! may actually receive data.
696 ! [In this case, the Coupler process (in CPL_SEND) first
697 ! receives the Component process global rank
698 ! (process_rank_global) from this subroutine, the source being
699 ! MPI_ANY_SOURCE, so it is only identification by tag which
700 ! enables Coupler to receive process_rank_global from the right
701 ! source. Upon the receipt, the Coupler process (in CPL_SEND)
702 ! sends the data to this Component process, rather than to
703 ! the Component master process as is the case with lower
704 ! "flexibility levels".]
705 ! But in any case only one Component process may actually be
706 ! engaged in a particular exchange of data with Coupler.
709 ibuf(2)=process_rank_global
711 call MPI_SEND(ibuf,2,MPI_INTEGER,Coupler_rank,tag, &
712 & MPI_COMM_WORLD,ierr)
713 call GLOB_ABORT(ierr,'CMP_alt_RECV: error in MPI_SEND',1)
717 write(message,'("*** CMP_alt_RECV: illegal value of FlexLev",'&
718 & //'i9/"*** STOPPED")') FlexLev
719 call wrf_error_fatal(message)
725 call MPI_RECV(F,N,MPI_kind_alt_REAL,Coupler_rank,tag, &
726 &MPI_COMM_WORLD,status,ierr)
727 call GLOB_ABORT(ierr,'CMP_alt_RECV: error in MPI_RECV',1)
729 ! call CMP_DBG_CR(6,'CMP_alt_RECV: exiting')
734 !***********************************************************************
736 SUBROUTINE CMP_gnr_RECV(F,N,MPI_DATATYPE)
742 integer N,MPI_DATATYPE
744 character*255 :: message
745 integer ierr,tag,ibuf(3),status(MPI_STATUS_SIZE)
748 if (Coupler_id.lt.0) return ! <- standalone mode
750 ! call CMP_DBG_CR(6,'CMP_gnr_RECV: entered')
752 if (process_rank_local.ne.component_master_rank_local) then
754 if (FlexLev.eq.0) then
756 ! With "flexibility level" FlexLev=0, only Component master
757 ! process is supposed to call this subroutine.
760 & '("*** CMP_gnr_RECV: process_rank_local=",i4," ***"/ ' &
761 & //'"*** and component_master_rank_local=",i4," differ: ***"/'&
762 & //'"*** STOPPED ***")') &
763 & process_rank_local,component_master_rank_local
764 call wrf_error_fatal(message)
766 else if (FlexLev.eq.1 .or. FlexLev.eq.2) then
768 ! With "flexibility level" FlexLev=1 or FlexLev=2, any
769 ! Component process is allowed to call this subroutine but
770 ! only the Component master process is supposed to actually
771 ! receive data (so the others just make a dummy call), as
772 ! the Coupler process only sends data to the Component master
777 else if (FlexLev.eq.3) then
779 ! With "flexibility level" FlexLev=3, any Component process
780 ! may actually receive data.
781 ! [In this case, the Coupler process (in CPL_SEND) first
782 ! receives the Component process global rank
783 ! (process_rank_global) from this subroutine, the source being
784 ! MPI_ANY_SOURCE, so it is only identification by tag which
785 ! enables Coupler to receive process_rank_global from the right
786 ! source. Upon the receipt, the Coupler process (in CPL_SEND)
787 ! sends the data to this Component process, rather than to
788 ! the Component master process as is the case with lower
789 ! "flexibility levels".]
790 ! But in any case only one Component process may actually be
791 ! engaged in a particular exchange of data with Coupler.
794 ibuf(2)=process_rank_global
796 call MPI_SEND(ibuf,2,MPI_INTEGER,Coupler_rank,tag, &
797 & MPI_COMM_WORLD,ierr)
798 call GLOB_ABORT(ierr,'CMP_gnr_RECV: error in MPI_SEND',1)
802 write(message,'("*** CMP_gnr_RECV: illegal value of FlexLev",'&
803 & //'i9/ "*** STOPPED")') FlexLev
804 call wrf_debug(2,message)
811 call MPI_RECV(F,N,MPI_DATATYPE,Coupler_rank,tag, &
812 &MPI_COMM_WORLD,status,ierr)
813 call GLOB_ABORT(ierr,'CMP_gnr_RECV: error in MPI_RECV',1)
815 ! call CMP_DBG_CR(6,'CMP_gnr_RECV: exiting')
820 !***********************************************************************
822 SUBROUTINE CMP_ANNOUNCE(nunit,s)
829 character*255 message
833 if (process_rank_local.eq.component_master_rank_local) then
834 if(nunit==0 .or. nunit==6) then
837 write(nunit,*) trim(s)
839 else if (FlexLev.eq.0) then
841 ! With "flexibility level" FlexLev=0, only Component master
842 ! process is supposed to call this subroutine.
845 & '("*** CMP_ANNOUNCE: process_rank_local=",i4," ***"/ ' &
846 & //'"*** and component_master_rank_local=",i4," differ: ***"/'&
847 & //'"*** STOPPED ***")') &
848 & process_rank_local,component_master_rank_local
849 call wrf_error_fatal(message)
856 !***********************************************************************
858 SUBROUTINE CMP_STDOUT(s)
860 ! USE CMP_COMM, ONLY: Coupler_id,process_rank_global
861 ! <- These values may not have the right value by this moment,
862 ! as this routine may be called before CMP_INIT - 02/23/05
873 &file='/nfsuser/g01/wx20ds/C/cmp.stdout', &
874 &form='formatted',status='old',iostat=ios)
877 if (mess.eq.'mess') then
878 ! print*,'CMP_STDOUT: unit 6 left alone, process ',
879 ! > process_rank_global
880 ! <- process_rank_global may be undefined by this moment, as
881 ! this routine may be called before CMP_INIT - 02/23/05
888 ! if (Coupler_id.lt.0) RETURN ! Nothing is to occur if there is
889 ! no communication with Coupler,
890 ! i.e. if Component is standalone
891 ! <- Coupler_id may not have the right value by this moment,
892 ! as this routine may be called before CMP_INIT - 02/23/05
894 if (len_trim(s).eq.0) RETURN
898 open(6,file=trim(s),form='formatted',status='unknown')
900 call wrf_debug(2,'CMP_STDOUT: unit 6 closed, reopened as ' &
906 !***********************************************************************
908 SUBROUTINE CMP_DBG_CR(nunit,s)
910 ! Debugging routine: mainly, prints Coupler_rank
919 integer ncall/0/,ncallmax/5000/
923 if (s(5:6).eq.'m:') then
924 if (process_rank_local .ne. component_master_rank_local) RETURN
927 if (ncall.ge.ncallmax) RETURN
930 write(nunit,*)process_rank_global,ncall,Coupler_id,Coupler_rank,s
932 ! The following assumes that Coupler_rank must be =0, comment out if
933 ! this is not the case
934 call GLOB_ABORT(Coupler_rank, &
935 &'CMP_DBG_CR: Coupler_rank.ne.0, aborting',1)
940 !***********************************************************************
942 SUBROUTINE CMP_FLUSH(nunit)
953 do i=0,component_nprocs-1
954 call MPI_BARRIER(COMM_local,ierr)
955 call GLOB_ABORT(ierr,'CMP_FLUSH: MPI_BARRIER failed, aborting', &
957 if (i.eq.process_rank_local) FLUSH(nunit)
963 !***********************************************************************
965 subroutine CMP_FINALIZE(izd,ierr)
979 call MPI_INITIALIZED(izd,ierr1)
980 if (izd) call MPI_FINALIZE(ierr2)
981 ierr=abs(ierr1)+abs(ierr2)