1 #define NEST_FULL_INFLUENCE(A,B) A=B
6 USE module_driver_constants
9 USE module_cpl, ONLY : coupler_on, cpl_init
16 INTEGER, PARAMETER :: MPI_UNDEFINED = -1
19 INTEGER, PARAMETER :: max_halo_width = 6 ! 5
21 INTEGER :: ips_save, ipe_save, jps_save, jpe_save, itrace
22 INTEGER :: lats_to_mic, minx, miny
24 INTEGER :: communicator_stack_cursor = 0
25 INTEGER :: current_id = 1
26 INTEGER, DIMENSION(max_domains) :: ntasks_stack, ntasks_y_stack &
27 , ntasks_x_stack, mytask_stack &
28 , mytask_x_stack, mytask_y_stack &
30 INTEGER, DIMENSION(max_domains) :: ntasks_store, ntasks_y_store &
31 , ntasks_x_store, mytask_store &
32 , mytask_x_store, mytask_y_store
33 INTEGER ntasks, ntasks_y, ntasks_x, mytask, mytask_x, mytask_y
35 INTEGER, DIMENSION(max_domains) :: local_communicator_stack, local_communicator_periodic_stack &
36 ,local_iocommunicator_stack &
37 ,local_communicator_x_stack, local_communicator_y_stack
38 INTEGER, DIMENSION(max_domains) :: local_communicator_store, local_communicator_periodic_store &
39 ,local_iocommunicator_store &
40 ,local_communicator_x_store, local_communicator_y_store
42 INTEGER :: mpi_comm_allcompute = MPI_UNDEFINED
43 INTEGER :: local_communicator = MPI_UNDEFINED
44 INTEGER :: local_communicator_periodic = MPI_UNDEFINED
45 INTEGER :: local_iocommunicator = MPI_UNDEFINED
46 INTEGER :: local_communicator_x = MPI_UNDEFINED
47 INTEGER :: local_communicator_y = MPI_UNDEFINED ! subcommunicators for rows and cols of mesh
48 INTEGER :: local_quilt_comm = MPI_UNDEFINED ! added 20151212 jm
49 LOGICAL :: dm_debug_flag = .FALSE.
50 ! for parallel nesting, 201408, jm
51 INTEGER intercomm_to_mom( max_domains ), intercomm_to_kid( max_nests, max_domains )
52 INTEGER mpi_comm_to_mom( max_domains ), mpi_comm_to_kid( max_nests, max_domains )
53 INTEGER which_kid(max_domains), nkids(max_domains)
54 INTEGER nest_task_offsets(max_domains)
55 LOGICAL intercomm_active( max_domains )
56 LOGICAL domain_active_this_task( max_domains )
57 ! see comments below (search for "Communicator definition")
58 INTEGER tasks_per_split
59 INTEGER comm_start(max_domains) ! set in dm_task_split
60 ! INTEGER comm_pes (max_domains) ! either this may be set in dm_task_split
61 ! INTEGER comm_pes_x(max_domains) ! or these may be set in dm_task_split
62 ! INTEGER comm_pes_y(max_domains) ! " " may be set in dm_task_split
63 ! INTEGER comm_domain(max_domains) ! set in dm_task_split
64 INTEGER nest_pes_x(max_domains) ! set in dm_task_split
65 INTEGER nest_pes_y(max_domains) ! set in dm_task_split
66 INTEGER comms_i_am_in (max_domains) ! list of local communicators this task is a member of
67 INTEGER loc_comm(max_domains)
69 INTEGER nio_tasks_per_group(max_domains), nio_groups, num_io_tasks
70 NAMELIST /dm_task_split/ tasks_per_split, comm_start, nest_pes_x, nest_pes_y
71 NAMELIST /namelist_quilt/ nio_tasks_per_group, nio_groups, poll_servers
75 integer :: c_ipsy, c_ipey, c_kpsy, c_kpey, c_kpsx, c_kpex, c_ipex, c_ipsx, c_jpex, c_jpsx, c_jpey, c_jpsy
76 integer :: c_imsy, c_imey, c_kmsy, c_kmey, c_kmsx, c_kmex, c_imex, c_imsx, c_jmex, c_jmsx, c_jmey, c_jmsy
80 INTERFACE wrf_dm_maxval
81 #if ( defined(PROMOTE_FLOAT) || ( RWORDSIZE == DWORDSIZE ) )
82 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer
84 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer, wrf_dm_maxval_doubleprecision
88 INTERFACE wrf_dm_minval ! gopal's doing
89 #if ( defined(PROMOTE_FLOAT) || ( RWORDSIZE == DWORDSIZE ) )
90 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer
92 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer, wrf_dm_minval_doubleprecision
98 SUBROUTINE MPASPECT( P, MINM, MINN, PROCMIN_M, PROCMIN_N )
100 INTEGER P, M, N, MINI, MINM, MINN, PROCMIN_M, PROCMIN_N
105 IF ( MOD( P, M ) .EQ. 0 ) THEN
107 IF ( ABS(M-N) .LT. MINI &
108 .AND. M .GE. PROCMIN_M &
109 .AND. N .GE. PROCMIN_N &
117 IF ( MINM .LT. PROCMIN_M .OR. MINN .LT. PROCMIN_N ) THEN
118 WRITE( wrf_err_message , * )'MPASPECT: UNABLE TO GENERATE PROCESSOR MESH. STOPPING.'
119 CALL wrf_message ( TRIM ( wrf_err_message ) )
120 WRITE( wrf_err_message , * )' PROCMIN_M ', PROCMIN_M
121 CALL wrf_message ( TRIM ( wrf_err_message ) )
122 WRITE( wrf_err_message , * )' PROCMIN_N ', PROCMIN_N
123 CALL wrf_message ( TRIM ( wrf_err_message ) )
124 WRITE( wrf_err_message , * )' P ', P
125 CALL wrf_message ( TRIM ( wrf_err_message ) )
126 WRITE( wrf_err_message , * )' MINM ', MINM
127 CALL wrf_message ( TRIM ( wrf_err_message ) )
128 WRITE( wrf_err_message , * )' MINN ', MINN
129 CALL wrf_message ( TRIM ( wrf_err_message ) )
130 CALL wrf_error_fatal ( 'module_dm: mpaspect' )
133 END SUBROUTINE MPASPECT
135 SUBROUTINE compute_mesh( ntasks , ntasks_x, ntasks_y )
137 INTEGER, INTENT(IN) :: ntasks
138 INTEGER, INTENT(OUT) :: ntasks_x, ntasks_y
140 CALL nl_get_nproc_x ( 1, ntasks_x )
141 CALL nl_get_nproc_y ( 1, ntasks_y )
142 CALL nl_get_lats_to_mic ( 1, lats_to_mic )
143 ! check if user has specified in the namelist
144 IF ( ntasks_x .GT. 0 .OR. ntasks_y .GT. 0 ) THEN
145 ! if only ntasks_x is specified then make it 1-d decomp in i
146 IF ( ntasks_x .GT. 0 .AND. ntasks_y .EQ. -1 ) THEN
147 ntasks_y = ntasks / ntasks_x
148 ! if only ntasks_y is specified then make it 1-d decomp in j
149 ELSE IF ( ntasks_x .EQ. -1 .AND. ntasks_y .GT. 0 ) THEN
150 ntasks_x = ntasks / ntasks_y
152 ! make sure user knows what they're doing
153 IF ( ntasks_x * ntasks_y .NE. ntasks ) THEN
154 WRITE( wrf_err_message , * )'WRF_DM_INITIALIZE (RSL_LITE): nproc_x * nproc_y in namelist ne ',ntasks
155 CALL wrf_error_fatal ( wrf_err_message )
157 ELSE IF ( lats_to_mic .GT. 0 ) THEN
158 ntasks_x = ntasks / 2
160 IF ( ntasks_x * ntasks_y .NE. ntasks ) THEN
161 WRITE( wrf_err_message , * )&
162 'WRF_DM_INITIALIZE (lats_to_mic > 0) nproc_x (',ntasks_x,')* nproc_y (',ntasks_y,&
163 ') in namelist ne ',ntasks
164 CALL wrf_error_fatal ( wrf_err_message )
167 ! When neither is specified, work out mesh with MPASPECT
168 ! Pass nproc_ln and nproc_nt so that number of procs in
169 ! i-dim (nproc_ln) is equal or lesser.
170 CALL mpaspect ( ntasks, ntasks_x, ntasks_y, 1, 1 )
172 ntasks_store(1) = ntasks
173 ntasks_x_store(1) = ntasks_x
174 ntasks_y_store(1) = ntasks_y
175 END SUBROUTINE compute_mesh
177 SUBROUTINE wrf_dm_initialize
180 INTEGER :: local_comm_per, local_comm_x, local_comm_y, local_comm2, new_local_comm, group, newgroup, p, p1, ierr,itmp
181 INTEGER, ALLOCATABLE, DIMENSION(:) :: ranks
183 INTEGER, DIMENSION(2) :: dims, coords
184 LOGICAL, DIMENSION(2) :: isperiodic
185 LOGICAL :: reorder_mesh
187 CALL instate_communicators_for_domain(1)
189 CALL wrf_get_dm_communicator ( new_local_comm )
190 dims(1) = nest_pes_y(1) ! rows
191 dims(2) = nest_pes_x(1) ! columns
192 isperiodic(1) = .true.
193 isperiodic(2) = .true.
194 CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_comm_per, ierr )
195 local_communicator_periodic_store(1) = local_comm_per
196 ! set all the domains' periodic communicators to this one <- kludge, 20151223, splitting domains won't work for period bc's
197 local_communicator_periodic_store = local_comm_per
198 local_communicator_periodic = local_comm_per
209 intercomm_active = .TRUE.
210 domain_active_this_task = .TRUE.
212 CALL nl_set_nproc_x ( 1, ntasks_x )
213 CALL nl_set_nproc_y ( 1, ntasks_y )
214 WRITE( wrf_err_message , * )'Ntasks in X ',ntasks_x,', ntasks in Y ',ntasks_y
215 CALL wrf_message( wrf_err_message )
216 #if ( defined( DM_PARALLEL ) && ( ! defined( STUBMPI ) ) )
217 CALL MPI_BARRIER( local_communicator, ierr )
220 END SUBROUTINE wrf_dm_initialize
222 SUBROUTINE get_dm_max_halo_width( id, width )
224 INTEGER, INTENT(IN) :: id
225 INTEGER, INTENT(OUT) :: width
226 IF ( id .EQ. 1 ) THEN ! this is coarse domain
227 width = max_halo_width
229 width = max_halo_width + 3
232 END SUBROUTINE get_dm_max_halo_width
234 SUBROUTINE patch_domain_rsl_lite( id , parent, parent_id, &
235 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
236 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
237 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
238 sp1x , ep1x , sm1x , em1x , &
239 sp2x , ep2x , sm2x , em2x , &
240 sp3x , ep3x , sm3x , em3x , &
241 sp1y , ep1y , sm1y , em1y , &
242 sp2y , ep2y , sm2y , em2y , &
243 sp3y , ep3y , sm3y , em3y , &
246 #if ( ( defined(SGIALTIX) || defined(FUJITSU_FX10) || defined(KEEP_INT_AROUND) ) && (! defined(MOVE_NESTS) ) )
247 USE module_domain, ONLY : domain, head_grid, find_grid_by_id, alloc_space_field
249 USE module_domain, ONLY : domain, head_grid, find_grid_by_id
253 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
254 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
255 sm1 , em1 , sm2 , em2 , sm3 , em3
256 INTEGER, INTENT(OUT) :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
257 sm1x , em1x , sm2x , em2x , sm3x , em3x
258 INTEGER, INTENT(OUT) :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
259 sm1y , em1y , sm2y , em2y , sm3y , em3y
260 INTEGER, INTENT(IN) :: id, parent_id
261 TYPE(domain),POINTER :: parent
264 INTEGER :: ids, ide, jds, jde, kds, kde
265 INTEGER :: ims, ime, jms, jme, kms, kme
266 INTEGER :: ips, ipe, jps, jpe, kps, kpe
267 INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex
268 INTEGER :: ipsx, ipex, jpsx, jpex, kpsx, kpex
269 INTEGER :: imsy, imey, jmsy, jmey, kmsy, kmey
270 INTEGER :: ipsy, ipey, jpsy, jpey, kpsy, kpey
272 INTEGER :: c_sd1 , c_ed1 , c_sd2 , c_ed2 , c_sd3 , c_ed3
273 INTEGER :: c_sp1 , c_ep1 , c_sp2 , c_ep2 , c_sp3 , c_ep3 , &
274 c_sm1 , c_em1 , c_sm2 , c_em2 , c_sm3 , c_em3
275 INTEGER :: c_sp1x , c_ep1x , c_sp2x , c_ep2x , c_sp3x , c_ep3x , &
276 c_sm1x , c_em1x , c_sm2x , c_em2x , c_sm3x , c_em3x
277 INTEGER :: c_sp1y , c_ep1y , c_sp2y , c_ep2y , c_sp3y , c_ep3y , &
278 c_sm1y , c_em1y , c_sm2y , c_em2y , c_sm3y , c_em3y
280 INTEGER :: c_ids, c_ide, c_jds, c_jde, c_kds, c_kde
281 INTEGER :: c_ims, c_ime, c_jms, c_jme, c_kms, c_kme
282 INTEGER :: c_ips, c_ipe, c_jps, c_jpe, c_kps, c_kpe
284 INTEGER :: idim , jdim , kdim , rem , a, b
285 INTEGER :: i, j, ni, nj, Px, Py, P
287 INTEGER :: parent_grid_ratio, i_parent_start, j_parent_start
289 INTEGER :: idim_cd, jdim_cd, ierr
293 INTEGER :: e_we, e_sn
296 TYPE(domain), POINTER :: intermediate_grid
297 TYPE(domain), POINTER :: nest_grid
298 CHARACTER*256 :: mess
300 INTEGER parent_max_halo_width
301 INTEGER thisdomain_max_halo_width
305 CALL nl_get_lats_to_mic( 1, lats_to_mic )
306 IF ( lats_to_mic .GT. 0 ) THEN
307 minx = -99 ! code to task_for_point to do split decomposition over MIC and host
308 miny = lats_to_mic ! number of latitudes that should be assigned to MIC
316 SELECT CASE ( model_data_order )
317 ! need to finish other cases
318 CASE ( DATA_ORDER_ZXY )
319 ids = sd2 ; ide = ed2
320 jds = sd3 ; jde = ed3
321 kds = sd1 ; kde = ed1
322 CASE ( DATA_ORDER_XYZ )
323 ids = sd1 ; ide = ed1
324 jds = sd2 ; jde = ed2
325 kds = sd3 ; kde = ed3
326 CASE ( DATA_ORDER_XZY )
327 ids = sd1 ; ide = ed1
328 jds = sd3 ; jde = ed3
329 kds = sd2 ; kde = ed2
330 CASE ( DATA_ORDER_YXZ)
331 ids = sd2 ; ide = ed2
332 jds = sd1 ; jde = ed1
333 kds = sd3 ; kde = ed3
336 CALL nl_get_max_dom( 1 , max_dom )
338 CALL get_dm_max_halo_width( id , thisdomain_max_halo_width )
339 IF ( id .GT. 1 ) THEN
340 CALL get_dm_max_halo_width( parent%id , parent_max_halo_width )
343 CALL compute_memory_dims_rsl_lite ( id, thisdomain_max_halo_width, 0 , bdx, bdy, &
344 ids, ide, jds, jde, kds, kde, &
345 ims, ime, jms, jme, kms, kme, &
346 imsx, imex, jmsx, jmex, kmsx, kmex, &
347 imsy, imey, jmsy, jmey, kmsy, kmey, &
348 ips, ipe, jps, jpe, kps, kpe, &
349 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
350 ipsy, ipey, jpsy, jpey, kpsy, kpey )
352 ! ensure that the every parent domain point has a full set of nested points under it
353 ! even at the borders. Do this by making sure the number of nest points is a multiple of
354 ! the nesting ratio. Note that this is important mostly to the intermediate domain, which
355 ! is the subject of the scatter gather comms with the parent
357 IF ( id .GT. 1 ) THEN
358 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
359 if ( mod(ime,parent_grid_ratio) .NE. 0 ) ime = ime + parent_grid_ratio - mod(ime,parent_grid_ratio)
360 if ( mod(jme,parent_grid_ratio) .NE. 0 ) jme = jme + parent_grid_ratio - mod(jme,parent_grid_ratio)
363 SELECT CASE ( model_data_order )
364 CASE ( DATA_ORDER_ZXY )
365 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
366 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
367 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
368 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
369 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
370 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
371 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
372 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
373 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
374 CASE ( DATA_ORDER_ZYX )
375 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
376 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
377 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
378 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
379 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
380 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
381 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
382 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
383 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
384 CASE ( DATA_ORDER_XYZ )
385 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
386 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
387 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
388 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
389 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
390 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
391 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
392 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
393 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
394 CASE ( DATA_ORDER_YXZ)
395 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
396 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
397 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
398 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
399 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
400 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
401 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
402 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
403 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
404 CASE ( DATA_ORDER_XZY )
405 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
406 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
407 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
408 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
409 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
410 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
411 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
412 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
413 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
414 CASE ( DATA_ORDER_YZX )
415 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
416 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
417 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
418 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
419 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
420 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
421 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
422 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
423 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
427 WRITE(wrf_err_message,*)'*************************************'
428 CALL wrf_message( TRIM(wrf_err_message) )
429 WRITE(wrf_err_message,*)'Parent domain'
430 CALL wrf_message( TRIM(wrf_err_message) )
431 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
432 CALL wrf_message( TRIM(wrf_err_message) )
433 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
434 CALL wrf_message( TRIM(wrf_err_message) )
435 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
436 CALL wrf_message( TRIM(wrf_err_message) )
437 WRITE(wrf_err_message,*)'*************************************'
438 CALL wrf_message( TRIM(wrf_err_message) )
441 IF ( id .GT. 1 ) THEN
443 CALL nl_get_shw( id, shw )
444 CALL nl_get_i_parent_start( id , i_parent_start )
445 CALL nl_get_j_parent_start( id , j_parent_start )
446 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
448 SELECT CASE ( model_data_order )
449 CASE ( DATA_ORDER_ZXY )
453 c_kds = sd1 ; c_kde = ed1
454 CASE ( DATA_ORDER_ZYX )
458 c_kds = sd1 ; c_kde = ed1
459 CASE ( DATA_ORDER_XYZ )
463 c_kds = sd3 ; c_kde = ed3
464 CASE ( DATA_ORDER_YXZ)
468 c_kds = sd3 ; c_kde = ed3
469 CASE ( DATA_ORDER_XZY )
473 c_kds = sd2 ; c_kde = ed2
474 CASE ( DATA_ORDER_YZX )
478 c_kds = sd2 ; c_kde = ed2
481 idim_cd = idim / parent_grid_ratio + 1 + 2*shw + 1
482 jdim_cd = jdim / parent_grid_ratio + 1 + 2*shw + 1
484 c_ids = i_parent_start-shw ; c_ide = c_ids + idim_cd - 1
485 c_jds = j_parent_start-shw ; c_jde = c_jds + jdim_cd - 1
488 call nl_get_e_we( id -1, e_we )
489 call nl_get_e_sn( id -1, e_sn )
491 if ( c_ids .le. 0 ) c_ids = 1
492 if ( c_ide .gt. e_we) c_ide = e_we
493 if ( c_jds .le. 0 ) c_jds = 1
494 if ( c_jde .gt. e_sn) c_jde = e_sn
496 ! we want the intermediate domain to be decomposed the
497 ! the same as the underlying nest. So try this:
500 nj = ( c_jds - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
503 ni = ( i - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
504 !jm CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
505 CALL task_for_point ( ni, nj, ids, ide, jds, jde, nest_pes_x(id), nest_pes_y(id),Px,Py, &
507 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (a)')
508 IF ( Px .EQ. mytask_x ) THEN
510 IF ( c_ips .EQ. -1 ) c_ips = i
513 IF ( ierr .NE. 0 ) THEN
514 CALL tfp_message("<stdin>",__LINE__)
516 IF (c_ips .EQ. -1 ) THEN
522 ni = ( c_ids - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
525 nj = ( j - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
526 ! CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
527 CALL task_for_point ( ni, nj, ids, ide, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
529 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (b)')
532 IF ( Py .EQ. mytask_y ) THEN
534 IF ( c_jps .EQ. -1 ) c_jps = j
537 IF ( ierr .NE. 0 ) THEN
538 CALL tfp_message("<stdin>",__LINE__)
540 IF (c_jps .EQ. -1 ) THEN
546 IF (c_ipe .EQ. -1 .or. c_jpe .EQ. -1) THEN
555 nj = ( c_jds - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
558 ! CALL task_for_point ( k, nj, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
559 CALL task_for_point ( k, nj, kds, kde, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
561 IF ( Px .EQ. mytask_x ) THEN
563 IF ( c_kpsx .EQ. -1 ) c_kpsx = k
566 IF ( ierr .NE. 0 ) THEN
567 CALL tfp_message("<stdin>",__LINE__)
569 IF (c_kpsx .EQ. -1 ) THEN
578 nj = ( j - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
579 ! CALL task_for_point ( k, nj, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
580 CALL task_for_point ( k, nj, kds, kde, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
582 IF ( Py .EQ. mytask_y ) THEN
584 IF ( c_jpsx .EQ. -1 ) c_jpsx = j
587 IF ( ierr .NE. 0 ) THEN
588 CALL tfp_message("<stdin>",__LINE__)
590 IF (c_jpsx .EQ. -1 ) THEN
595 IF (c_ipex .EQ. -1 .or. c_jpex .EQ. -1) THEN
602 c_kpsy = c_kpsx ! same as above
603 c_kpey = c_kpex ! same as above
609 ni = ( i - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
610 ! CALL task_for_point ( ni, k, ids, ide, kds, kde, ntasks_y, ntasks_x, Py, Px, &
611 CALL task_for_point ( ni, k, ids, ide, kds, kde, nest_pes_y(id), nest_pes_x(id), Py, Px, &
612 1, 1, ierr ) ! x and y for proc mesh reversed
613 IF ( Py .EQ. mytask_y ) THEN
615 IF ( c_ipsy .EQ. -1 ) c_ipsy = i
618 IF ( ierr .NE. 0 ) THEN
619 CALL tfp_message("<stdin>",__LINE__)
621 IF (c_ipsy .EQ. -1 ) THEN
628 IF ( c_ips <= c_ipe ) THEN
629 ! extend the patch dimensions out shw along edges of domain
630 IF ( mytask_x .EQ. 0 ) THEN
633 c_ipsy = c_ipsy - shw
636 ! IF ( mytask_x .EQ. ntasks_x-1 ) THEN
637 IF ( mytask_x .EQ. nest_pes_x(id)-1 ) THEN
640 c_ipey = c_ipey + shw
643 c_ims = max( c_ips - max(shw,thisdomain_max_halo_width), c_ids - bdx ) - 1
644 c_ime = min( c_ipe + max(shw,thisdomain_max_halo_width), c_ide + bdx ) + 1
652 IF ( c_jps <= c_jpe ) THEN
653 ! extend the patch dimensions out shw along edges of domain
654 IF ( mytask_y .EQ. 0 ) THEN
657 c_jpsx = c_jpsx - shw
660 ! IF ( mytask_y .EQ. ntasks_y-1 ) THEN
661 IF ( mytask_y .EQ. nest_pes_y(id)-1 ) THEN
664 c_jpex = c_jpex + shw
667 c_jms = max( c_jps - max(shw,thisdomain_max_halo_width), c_jds - bdx ) - 1
668 c_jme = min( c_jpe + max(shw,thisdomain_max_halo_width), c_jde + bdx ) + 1
679 ! Default initializations
680 c_sm1x = 1 ; c_em1x = 1 ; c_sm2x = 1 ; c_em2x = 1 ; c_sm3x = 1 ; c_em3x = 1
681 c_sm1y = 1 ; c_em1y = 1 ; c_sm2y = 1 ; c_em2y = 1 ; c_sm3y = 1 ; c_em3y = 1
689 IF ( c_kpsx .EQ. 0 .AND. c_kpex .EQ. -1 ) THEN
693 IF ( c_kpsy .EQ. 0 .AND. c_kpey .EQ. -1 ) THEN
702 IF ( c_ipsy .EQ. 0 .AND. c_ipey .EQ. -1 ) THEN
715 IF ( c_jpsx .EQ. 0 .AND. c_jpex .EQ. -1 ) THEN
752 WRITE(wrf_err_message,*)'*************************************'
753 CALL wrf_message( TRIM(wrf_err_message) )
754 WRITE(wrf_err_message,*)'Nesting domain'
755 CALL wrf_message( TRIM(wrf_err_message) )
756 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
757 CALL wrf_message( TRIM(wrf_err_message) )
758 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
759 CALL wrf_message( TRIM(wrf_err_message) )
760 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
761 CALL wrf_message( TRIM(wrf_err_message) )
762 WRITE(wrf_err_message,*)'INTERMEDIATE domain'
763 CALL wrf_message( TRIM(wrf_err_message) )
764 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',c_ids,c_ide,c_jds,c_jde
765 CALL wrf_message( TRIM(wrf_err_message) )
766 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',c_ims,c_ime,c_jms,c_jme
767 CALL wrf_message( TRIM(wrf_err_message) )
768 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',c_ips,c_ipe,c_jps,c_jpe
769 CALL wrf_message( TRIM(wrf_err_message) )
770 WRITE(wrf_err_message,*)'*************************************'
771 CALL wrf_message( TRIM(wrf_err_message) )
773 SELECT CASE ( model_data_order )
774 CASE ( DATA_ORDER_ZXY )
775 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
776 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
777 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
778 CASE ( DATA_ORDER_ZYX )
779 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
780 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
781 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
782 CASE ( DATA_ORDER_XYZ )
783 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
784 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
785 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
786 CASE ( DATA_ORDER_YXZ)
787 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
788 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
789 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
790 CASE ( DATA_ORDER_XZY )
791 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
792 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
793 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
794 CASE ( DATA_ORDER_YZX )
795 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
796 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
797 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
800 ALLOCATE ( intermediate_grid )
801 ALLOCATE ( intermediate_grid%parents( max_parents ) )
802 ALLOCATE ( intermediate_grid%nests( max_nests ) )
803 intermediate_grid%allocated=.false.
804 NULLIFY( intermediate_grid%sibling )
806 NULLIFY( intermediate_grid%nests(i)%ptr )
808 NULLIFY (intermediate_grid%next)
809 NULLIFY (intermediate_grid%same_level)
810 NULLIFY (intermediate_grid%i_start)
811 NULLIFY (intermediate_grid%j_start)
812 NULLIFY (intermediate_grid%i_end)
813 NULLIFY (intermediate_grid%j_end)
814 intermediate_grid%id = id ! these must be the same. Other parts of code depend on it (see gen_comms.c)
815 intermediate_grid%num_nests = 0
816 intermediate_grid%num_siblings = 0
817 intermediate_grid%num_parents = 1
818 intermediate_grid%max_tiles = 0
819 intermediate_grid%num_tiles_spec = 0
820 #if ( EM_CORE == 1 && DA_CORE != 1 )
821 intermediate_grid%active_this_task = .true.
823 CALL find_grid_by_id ( id, head_grid, nest_grid )
825 nest_grid%intermediate_grid => intermediate_grid ! nest grid now has a pointer to this baby
826 intermediate_grid%parents(1)%ptr => nest_grid ! the intermediate grid considers nest its parent
827 intermediate_grid%num_parents = 1
829 intermediate_grid%is_intermediate = .TRUE.
830 SELECT CASE ( model_data_order )
831 CASE ( DATA_ORDER_ZXY )
832 intermediate_grid%nids = nest_grid%sd32 ; intermediate_grid%njds = nest_grid%sd33
833 intermediate_grid%nide = nest_grid%ed32 ; intermediate_grid%njde = nest_grid%sd33
834 CASE ( DATA_ORDER_ZYX )
835 intermediate_grid%nids = nest_grid%sd33 ; intermediate_grid%njds = nest_grid%sd32
836 intermediate_grid%nide = nest_grid%ed33 ; intermediate_grid%njde = nest_grid%sd32
837 CASE ( DATA_ORDER_XYZ )
838 intermediate_grid%nids = nest_grid%sd31 ; intermediate_grid%njds = nest_grid%sd32
839 intermediate_grid%nide = nest_grid%ed31 ; intermediate_grid%njde = nest_grid%sd32
840 CASE ( DATA_ORDER_YXZ)
841 intermediate_grid%nids = nest_grid%sd32 ; intermediate_grid%njds = nest_grid%sd31
842 intermediate_grid%nide = nest_grid%ed32 ; intermediate_grid%njde = nest_grid%sd31
843 CASE ( DATA_ORDER_XZY )
844 intermediate_grid%nids = nest_grid%sd31 ; intermediate_grid%njds = nest_grid%sd33
845 intermediate_grid%nide = nest_grid%ed31 ; intermediate_grid%njde = nest_grid%sd33
846 CASE ( DATA_ORDER_YZX )
847 intermediate_grid%nids = nest_grid%sd33 ; intermediate_grid%njds = nest_grid%sd31
848 intermediate_grid%nide = nest_grid%ed33 ; intermediate_grid%njde = nest_grid%sd31
850 intermediate_grid%nids = ids
851 intermediate_grid%nide = ide
852 intermediate_grid%njds = jds
853 intermediate_grid%njde = jde
855 intermediate_grid%sm31x = c_sm1x
856 intermediate_grid%em31x = c_em1x
857 intermediate_grid%sm32x = c_sm2x
858 intermediate_grid%em32x = c_em2x
859 intermediate_grid%sm33x = c_sm3x
860 intermediate_grid%em33x = c_em3x
861 intermediate_grid%sm31y = c_sm1y
862 intermediate_grid%em31y = c_em1y
863 intermediate_grid%sm32y = c_sm2y
864 intermediate_grid%em32y = c_em2y
865 intermediate_grid%sm33y = c_sm3y
866 intermediate_grid%em33y = c_em3y
869 intermediate_grid%sp31x = c_sp1x
870 intermediate_grid%ep31x = c_ep1x
871 intermediate_grid%sp32x = c_sp2x
872 intermediate_grid%ep32x = c_ep2x
873 intermediate_grid%sp33x = c_sp3x
874 intermediate_grid%ep33x = c_ep3x
875 intermediate_grid%sp31y = c_sp1y
876 intermediate_grid%ep31y = c_ep1y
877 intermediate_grid%sp32y = c_sp2y
878 intermediate_grid%ep32y = c_ep2y
879 intermediate_grid%sp33y = c_sp3y
880 intermediate_grid%ep33y = c_ep3y
883 #if ( ( defined(SGIALTIX) || defined(FUJITSU_FX10) || defined(KEEP_INT_AROUND) ) && (! defined(MOVE_NESTS) ) )
884 ! allocate space for the intermediate domain
885 ! CALL alloc_space_field ( intermediate_grid, intermediate_grid%id , 1, 2 , .TRUE., intercomm_active( intermediate_grid%id ), & ! use same id as nest
886 CALL alloc_space_field ( intermediate_grid, intermediate_grid%id , 1, 2 , .TRUE., nest_grid%active_this_task, & ! use same id as nest
887 c_sd1, c_ed1, c_sd2, c_ed2, c_sd3, c_ed3, &
888 c_sm1, c_em1, c_sm2, c_em2, c_sm3, c_em3, &
889 c_sp1, c_ep1, c_sp2, c_ep2, c_sp3, c_ep3, &
890 c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, &
891 c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y, &
892 c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, & ! x-xpose
893 c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y ) ! y-xpose
895 intermediate_grid%sd31 = c_sd1
896 intermediate_grid%ed31 = c_ed1
897 intermediate_grid%sp31 = c_sp1
898 intermediate_grid%ep31 = c_ep1
899 intermediate_grid%sm31 = c_sm1
900 intermediate_grid%em31 = c_em1
901 intermediate_grid%sd32 = c_sd2
902 intermediate_grid%ed32 = c_ed2
903 intermediate_grid%sp32 = c_sp2
904 intermediate_grid%ep32 = c_ep2
905 intermediate_grid%sm32 = c_sm2
906 intermediate_grid%em32 = c_em2
907 intermediate_grid%sd33 = c_sd3
908 intermediate_grid%ed33 = c_ed3
909 intermediate_grid%sp33 = c_sp3
910 intermediate_grid%ep33 = c_ep3
911 intermediate_grid%sm33 = c_sm3
912 intermediate_grid%em33 = c_em3
914 CALL med_add_config_info_to_grid ( intermediate_grid )
916 intermediate_grid%dx = parent%dx
917 intermediate_grid%dy = parent%dy
918 intermediate_grid%dt = parent%dt
922 END SUBROUTINE patch_domain_rsl_lite
924 SUBROUTINE compute_memory_dims_rsl_lite ( &
925 id , maxhalowidth , &
927 ids, ide, jds, jde, kds, kde, &
928 ims, ime, jms, jme, kms, kme, &
929 imsx, imex, jmsx, jmex, kmsx, kmex, &
930 imsy, imey, jmsy, jmey, kmsy, kmey, &
931 ips, ipe, jps, jpe, kps, kpe, &
932 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
933 ipsy, ipey, jpsy, jpey, kpsy, kpey )
936 INTEGER, INTENT(IN) :: id , maxhalowidth
937 INTEGER, INTENT(IN) :: shw, bdx, bdy
938 INTEGER, INTENT(IN) :: ids, ide, jds, jde, kds, kde
939 INTEGER, INTENT(OUT) :: ims, ime, jms, jme, kms, kme
940 INTEGER, INTENT(OUT) :: imsx, imex, jmsx, jmex, kmsx, kmex
941 INTEGER, INTENT(OUT) :: imsy, imey, jmsy, jmey, kmsy, kmey
942 INTEGER, INTENT(OUT) :: ips, ipe, jps, jpe, kps, kpe
943 INTEGER, INTENT(OUT) :: ipsx, ipex, jpsx, jpex, kpsx, kpex
944 INTEGER, INTENT(OUT) :: ipsy, ipey, jpsy, jpey, kpsy, kpey
946 INTEGER Px, Py, P, i, j, k, ierr
954 ! CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
955 CALL task_for_point ( i, j, ids, ide, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
957 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (c)')
958 IF ( Px .EQ. mytask_x ) THEN
960 IF ( ips .EQ. -1 ) ips = i
963 IF ( ierr .NE. 0 ) THEN
964 CALL tfp_message("<stdin>",__LINE__)
966 ! handle setting the memory dimensions where there are no X elements assigned to this proc
967 IF (ips .EQ. -1 ) THEN
976 ! CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
977 CALL task_for_point ( i, j, ids, ide, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
979 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (d)')
980 IF ( Py .EQ. mytask_y ) THEN
982 IF ( jps .EQ. -1 ) jps = j
985 IF ( ierr .NE. 0 ) THEN
986 CALL tfp_message("<stdin>",__LINE__)
988 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
989 IF (jps .EQ. -1 ) THEN
994 !begin: wig; 12-Mar-2008
995 ! This appears redundant with the conditionals above, but we get cases with only
996 ! one of the directions being set to "missing" when turning off extra processors.
997 ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
998 IF (ipe .EQ. -1 .or. jpe .EQ. -1) THEN
1004 !end: wig; 12-Mar-2008
1007 ! description of transpose decomposition strategy for RSL LITE. 20061231jm
1009 ! Here is the tranpose scheme that is implemented for RSL_LITE. Upper-case
1010 ! XY corresponds to the dimension of the processor mesh, lower-case xyz
1011 ! corresponds to grid dimension.
1015 ! XxYy <--> XzYy <--> XzYx <- note x decomposed over Y procs
1018 ! +------------------+ <- this edge is costly; see below
1020 ! The aim is to avoid all-to-all communication over whole
1021 ! communicator. Instead, when possible, use a transpose scheme that requires
1022 ! all-to-all within dimensional communicators; that is, communicators
1023 ! defined for the processes in a rank or column of the processor mesh. Note,
1024 ! however, it is not possible to create a ring of transposes between
1025 ! xy-yz-xz decompositions without at least one of the edges in the ring
1026 ! being fully all-to-all (in other words, one of the tranpose edges must
1027 ! rotate and not just transpose a plane of the model grid within the
1028 ! processor mesh). The issue is then, where should we put this costly edge
1029 ! in the tranpose scheme we chose? To avoid being completely arbitrary,
1030 ! we chose a scheme most natural for models that use parallel spectral
1031 ! transforms, where the costly edge is the one that goes from the xz to
1032 ! the xy decomposition. (May be implemented as just a two step transpose
1035 ! Additional notational convention, below. The 'x' or 'y' appended to the
1036 ! dimension start or end variable refers to which grid dimension is all
1037 ! on-processor in the given decomposition. That is ipsx and ipex are the
1038 ! start and end for the i-dimension in the zy decomposition where x is
1039 ! on-processor. ('z' is assumed for xy decomposition and not appended to
1040 ! the ips, ipe, etc. variable names).
1043 ! XzYy decomposition
1049 ! CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
1050 CALL task_for_point ( k, j, kds, kde, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
1052 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (e)')
1053 IF ( Px .EQ. mytask_x ) THEN
1055 IF ( kpsx .EQ. -1 ) kpsx = k
1058 IF ( ierr .NE. 0 ) THEN
1059 CALL tfp_message("<stdin>",__LINE__)
1062 ! handle case where no levels are assigned to this process
1063 ! no iterations. Do same for I and J. Need to handle memory alloc below.
1064 IF (kpsx .EQ. -1 ) THEN
1073 ! CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
1074 CALL task_for_point ( k, j, kds, kde, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
1076 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (f)')
1077 IF ( Py .EQ. mytask_y ) THEN
1079 IF ( jpsx .EQ. -1 ) jpsx = j
1082 IF ( ierr .NE. 0 ) THEN
1083 CALL tfp_message("<stdin>",__LINE__)
1085 IF (jpsx .EQ. -1 ) THEN
1090 !begin: wig; 12-Mar-2008
1091 ! This appears redundant with the conditionals above, but we get cases with only
1092 ! one of the directions being set to "missing" when turning off extra processors.
1093 ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
1094 IF (jpex .EQ. -1) THEN
1100 !end: wig; 12-Mar-2008
1102 ! XzYx decomposition (note, x grid dim is decomposed over Y processor dim)
1104 kpsy = kpsx ! same as above
1105 kpey = kpex ! same as above
1111 ! CALL task_for_point ( i, k, ids, ide, kds, kde, ntasks_y, ntasks_x, Py, Px, &
1112 CALL task_for_point ( i, k, ids, ide, kds, kde, nest_pes_y(id), nest_pes_x(id), Py, Px, &
1114 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (g)')
1115 IF ( Py .EQ. mytask_y ) THEN
1117 IF ( ipsy .EQ. -1 ) ipsy = i
1120 IF ( ierr .NE. 0 ) THEN
1121 CALL tfp_message("<stdin>",__LINE__)
1123 IF (ipsy .EQ. -1 ) THEN
1129 ! extend the patch dimensions out shw along edges of domain
1130 IF ( ips < ipe .and. jps < jpe ) THEN !wig; 11-Mar-2008
1131 IF ( mytask_x .EQ. 0 ) THEN
1135 ! IF ( mytask_x .EQ. ntasks_x-1 ) THEN
1136 IF ( mytask_x .EQ. nest_pes_x(id)-1 ) THEN
1140 IF ( mytask_y .EQ. 0 ) THEN
1144 ! IF ( mytask_y .EQ. ntasks_y-1 ) THEN
1145 IF ( mytask_y .EQ. nest_pes_y(id)-1 ) THEN
1149 END IF !wig; 11-Mar-2008
1161 ! handle setting the memory dimensions where there are no levels assigned to this proc
1162 IF ( kpsx .EQ. 0 .AND. kpex .EQ. -1 ) THEN
1166 IF ( kpsy .EQ. 0 .AND. kpey .EQ. -1 ) THEN
1171 IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
1175 ims = max( ips - max(shw,maxhalowidth), ids - bdx ) - 1
1176 ime = min( ipe + max(shw,maxhalowidth), ide + bdx ) + 1
1177 #ifdef INTEL_ALIGN64
1178 ! align on 64 byte boundaries if -align array64byte
1180 ime = ime + (CHUNK-mod(ime-ims+1,CHUNK))
1187 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
1188 IF ( ipsy .EQ. 0 .AND. ipey .EQ. -1 ) THEN
1196 IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
1200 jms = max( jps - max(shw,maxhalowidth), jds - bdy ) - 1
1201 jme = min( jpe + max(shw,maxhalowidth), jde + bdy ) + 1
1207 ! handle setting the memory dimensions where there are no X elements assigned to this proc
1208 IF ( jpsx .EQ. 0 .AND. jpex .EQ. -1 ) THEN
1218 END SUBROUTINE compute_memory_dims_rsl_lite
1220 ! internal, used below for switching the argument to MPI calls
1221 ! if reals are being autopromoted to doubles in the build of WRF
1222 INTEGER function getrealmpitype()
1225 INTEGER rtypesize, dtypesize, ierr
1226 CALL mpi_type_size ( MPI_REAL, rtypesize, ierr )
1227 CALL mpi_type_size ( MPI_DOUBLE_PRECISION, dtypesize, ierr )
1228 IF ( RWORDSIZE .EQ. rtypesize ) THEN
1229 getrealmpitype = MPI_REAL
1230 ELSE IF ( RWORDSIZE .EQ. dtypesize ) THEN
1231 getrealmpitype = MPI_DOUBLE_PRECISION
1233 CALL wrf_error_fatal ( 'RWORDSIZE or DWORDSIZE does not match any MPI type' )
1236 ! required dummy initialization for function that is never called
1240 END FUNCTION getrealmpitype
1242 INTEGER FUNCTION wrf_dm_max_int ( inval )
1245 INTEGER, intent(in) :: inval
1246 INTEGER :: ierr, retval
1247 CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_MAX, local_communicator, ierr )
1248 wrf_dm_max_int = retval
1250 INTEGER, intent(in) :: inval
1251 wrf_dm_max_int = inval
1253 END FUNCTION wrf_dm_max_int
1255 REAL FUNCTION wrf_dm_max_real ( inval )
1260 CALL wrf_get_dm_communicator(comm)
1261 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MAX, comm, ierr )
1262 wrf_dm_max_real = retval
1265 wrf_dm_max_real = inval
1267 END FUNCTION wrf_dm_max_real
1269 REAL FUNCTION wrf_dm_min_real ( inval )
1274 CALL wrf_get_dm_communicator(comm)
1275 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MIN, comm, ierr )
1276 wrf_dm_min_real = retval
1279 wrf_dm_min_real = inval
1281 END FUNCTION wrf_dm_min_real
1283 SUBROUTINE wrf_dm_min_reals ( inval, retval, n )
1290 CALL wrf_get_dm_communicator(comm)
1291 CALL mpi_allreduce ( inval, retval , n, getrealmpitype(), MPI_MIN, comm, ierr )
1293 retval(1:n) = inval(1:n)
1295 END SUBROUTINE wrf_dm_min_reals
1297 FUNCTION wrf_dm_sum_real8 ( inval )
1300 REAL*8 inval, retval, wrf_dm_sum_real8
1302 CALL wrf_get_dm_communicator(comm)
1303 CALL mpi_allreduce ( inval, retval , 1, MPI_REAL8, MPI_SUM, comm, ierr )
1304 wrf_dm_sum_real8 = retval
1306 REAL*8 wrf_dm_sum_real8,inval
1307 wrf_dm_sum_real8 = inval
1309 END FUNCTION wrf_dm_sum_real8
1311 REAL FUNCTION wrf_dm_sum_real ( inval )
1316 CALL wrf_get_dm_communicator(comm)
1317 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_SUM, comm, ierr )
1318 wrf_dm_sum_real = retval
1321 wrf_dm_sum_real = inval
1323 END FUNCTION wrf_dm_sum_real
1325 SUBROUTINE wrf_dm_sum_reals (inval, retval)
1327 REAL, INTENT(IN) :: inval(:)
1328 REAL, INTENT(OUT) :: retval(:)
1331 CALL wrf_get_dm_communicator(comm)
1332 CALL mpi_allreduce ( inval, retval, SIZE(inval), getrealmpitype(), MPI_SUM, comm, ierr )
1336 END SUBROUTINE wrf_dm_sum_reals
1338 INTEGER FUNCTION wrf_dm_sum_integer ( inval )
1341 INTEGER inval, retval
1343 CALL wrf_get_dm_communicator(comm)
1344 CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_SUM, comm, ierr )
1345 wrf_dm_sum_integer = retval
1348 wrf_dm_sum_integer = inval
1350 END FUNCTION wrf_dm_sum_integer
1352 SUBROUTINE wrf_dm_sum_integers (inval, retval)
1354 INTEGER, INTENT(IN) :: inval(:)
1355 INTEGER, INTENT(OUT) :: retval(:)
1358 CALL wrf_get_dm_communicator(comm)
1359 CALL mpi_allreduce ( inval, retval, SIZE(inval), MPI_INTEGER, MPI_SUM, comm, ierr )
1363 END SUBROUTINE wrf_dm_sum_integers
1366 INTEGER FUNCTION wrf_dm_bxor_integer ( inval )
1369 INTEGER inval, retval
1371 CALL wrf_get_dm_communicator(comm)
1372 CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_BXOR, comm, ierr )
1373 wrf_dm_bxor_integer = retval
1376 wrf_dm_bxor_integer = inval
1378 END FUNCTION wrf_dm_bxor_integer
1381 LOGICAL FUNCTION wrf_dm_lor_logical ( inval )
1384 LOGICAL inval, retval
1386 CALL wrf_get_dm_communicator(comm)
1387 CALL mpi_allreduce ( inval, retval , 1, MPI_LOGICAL, MPI_LOR, comm, ierr )
1388 wrf_dm_lor_logical = retval
1391 wrf_dm_lor_logical = inval
1393 END FUNCTION wrf_dm_lor_logical
1396 LOGICAL FUNCTION wrf_dm_land_logical ( inval )
1399 LOGICAL inval, retval
1401 CALL wrf_get_dm_communicator(comm)
1402 CALL mpi_allreduce ( inval, retval , 1, MPI_LOGICAL, MPI_LAND, comm, ierr )
1403 wrf_dm_land_logical = retval
1406 wrf_dm_land_logical = inval
1408 END FUNCTION wrf_dm_land_logical
1411 SUBROUTINE wrf_dm_maxval_real ( val, idex, jdex )
1415 INTEGER :: idex, jdex, i, comm
1416 INTEGER :: bcast(2),mrank
1417 REAL :: inreduce(2),outreduce(2)
1419 inreduce=(/ val, real(mytask) /)
1420 bcast=(/ idex,jdex /)
1421 CALL wrf_get_dm_communicator(comm)
1422 call MPI_Allreduce(inreduce,outreduce,1,MPI_2REAL,&
1426 call MPI_Bcast(bcast,2,MPI_REAL,mrank,comm,i)
1432 INTEGER idex, jdex, ierr
1434 END SUBROUTINE wrf_dm_maxval_real
1436 SUBROUTINE wrf_dm_minval_real ( val, idex, jdex )
1440 INTEGER :: idex, jdex, i, comm
1441 INTEGER :: bcast(2),mrank
1442 REAL :: inreduce(2),outreduce(2)
1444 inreduce=(/ val, real(mytask) /)
1445 bcast=(/ idex,jdex /)
1446 CALL wrf_get_dm_communicator(comm)
1447 call MPI_Allreduce(inreduce,outreduce,1,MPI_2REAL,&
1451 call MPI_Bcast(bcast,2,MPI_REAL,mrank,comm,i)
1459 END SUBROUTINE wrf_dm_minval_real
1461 #ifndef PROMOTE_FLOAT
1462 SUBROUTINE wrf_dm_maxval_doubleprecision ( val, idex, jdex )
1465 DOUBLE PRECISION val
1466 INTEGER :: idex, jdex, i, comm
1467 INTEGER :: bcast(2),mrank
1468 DOUBLE PRECISION :: inreduce(2),outreduce(2)
1470 inreduce=(/ val, dble(mytask) /)
1471 bcast=(/ idex,jdex /)
1472 CALL wrf_get_dm_communicator(comm)
1473 call MPI_Allreduce(inreduce,outreduce,1,MPI_2DOUBLE_PRECISION,&
1477 call MPI_Bcast(bcast,2,MPI_DOUBLE_PRECISION,mrank,comm,i)
1482 DOUBLE PRECISION val
1483 INTEGER idex, jdex, ierr
1485 END SUBROUTINE wrf_dm_maxval_doubleprecision
1487 SUBROUTINE wrf_dm_minval_doubleprecision ( val, idex, jdex )
1490 DOUBLE PRECISION val
1491 INTEGER :: idex, jdex, i, comm
1492 INTEGER :: bcast(2),mrank
1493 DOUBLE PRECISION :: inreduce(2),outreduce(2)
1495 inreduce=(/ val, dble(mytask) /)
1496 bcast=(/ idex,jdex /)
1497 CALL wrf_get_dm_communicator(comm)
1498 call MPI_Allreduce(inreduce,outreduce,1,MPI_2DOUBLE_PRECISION,&
1502 call MPI_Bcast(bcast,2,MPI_DOUBLE_PRECISION,mrank,comm,i)
1507 DOUBLE PRECISION val
1508 INTEGER idex, jdex, ierr
1510 END SUBROUTINE wrf_dm_minval_doubleprecision
1513 SUBROUTINE wrf_dm_maxval_integer ( val, idex, jdex )
1517 INTEGER :: idex, jdex, i, comm
1518 INTEGER :: bcast(2),mrank
1519 INTEGER :: inreduce(2),outreduce(2)
1521 inreduce=(/ val, mytask /)
1522 bcast=(/ idex,jdex /)
1523 CALL wrf_get_dm_communicator(comm)
1524 call MPI_Allreduce(inreduce,outreduce,1,MPI_2INTEGER,&
1528 call MPI_Bcast(bcast,2,MPI_INTEGER,mrank,comm,i)
1534 INTEGER idex, jdex, ierr
1536 END SUBROUTINE wrf_dm_maxval_integer
1538 SUBROUTINE wrf_dm_minval_integer ( val, idex, jdex )
1542 INTEGER :: idex, jdex, i, comm
1543 INTEGER :: bcast(2),mrank
1544 INTEGER :: inreduce(2),outreduce(2)
1546 inreduce=(/ val, mytask /)
1547 bcast=(/ idex,jdex /)
1548 CALL wrf_get_dm_communicator(comm)
1549 call MPI_Allreduce(inreduce,outreduce,1,MPI_2INTEGER,&
1553 call MPI_Bcast(bcast,2,MPI_INTEGER,mrank,comm,i)
1559 INTEGER idex, jdex, ierr
1561 END SUBROUTINE wrf_dm_minval_integer
1563 SUBROUTINE hwrf_coupler_init
1564 END SUBROUTINE hwrf_coupler_init
1566 SUBROUTINE split_communicator
1570 ! INTEGER mpi_comm_here, mpi_comm_local, comdup, comdup2, origmytask, mytask, ntasks, ierr, io_status
1571 INTEGER mpi_comm_here, mpi_comm_local, comdup, comdup2, origmytask, ierr, io_status
1572 INTEGER mpi_comm_me_and_mom
1574 INTEGER mytask_local,ntasks_local,num_compute_tasks
1575 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
1576 INTEGER thread_support_provided, thread_support_requested
1578 INTEGER i, j, k, x, y, n_x, n_y
1580 INTEGER, ALLOCATABLE :: icolor(:),icolor2(:),idomain(:)
1583 ! Communicator definition Domains
1585 ! 6 pe Example Comm PEs (1)
1586 ! COMM_WORLD 0 1 2 3 4 5 / ! 1 0 1 2 3 4 5 (2) (3)
1591 ! Notes: 1. No requirement that any communicator be all tasks
1592 ! 2. A task may be a member of an arbitrary number
1593 ! of local communicators (But you may not want to do this)
1596 ! Namelist Split Settings (for 3 comms, 4 domains)
1597 ! Revised namelist semantics -- no need for binding nests to separately defined communicators
1599 ! (domain_id) 1 2 3 4
1601 ! comm_start 0 0 2 0
1602 ! nest_pes_x 2 1 2 1
1603 ! nest_pes_y 3 2 2 2
1606 !! Namelist Split Settings (for 3 comms, 4 domains)
1607 !! (comm_id) 1 2 3 ...
1612 !! Domain definitions
1613 !! (domain_id) 1 2 3 4
1614 !! parent_id - 1 1 2
1615 !! comm_domain 1 2 3 2
1616 !! * nest_pes_x 2 1 2 1
1617 !! * nest_pes_y 3 2 2 2
1619 !! [* nest_pes_x is comm_pes_x(comm_domain(domain_id))]
1623 ! for parallel nesting, 201408, jm
1625 INTEGER :: intercomm
1626 INTEGER :: domain_id,par_id,nest_id,kid_id
1627 INTEGER :: mytask_me_and_mom, ntasks_me_and_mom, remote_leader
1628 LOGICAL :: inthisone
1629 LOGICAL :: mytask_is_nest, mytask_is_par,isperiodic(3)
1631 LOGICAL :: quilting_is_turned_off
1633 !!!!! needed to sneak-peek the registry to get parent_id
1634 ! define as temporaries
1635 #include "namelist_defines.inc"
1637 ! Statements that specify the namelists
1638 #include "namelist_statements.inc"
1640 CALL MPI_INITIALIZED( mpi_inited, ierr )
1641 IF ( .NOT. mpi_inited ) THEN
1642 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
1643 thread_support_requested = MPI_THREAD_FUNNELED
1644 CALL mpi_init_thread ( thread_support_requested, thread_support_provided, ierr )
1645 IF ( thread_support_provided .lt. thread_support_requested ) THEN
1646 CALL WRF_ERROR_FATAL( "failed to initialize MPI thread support")
1648 mpi_comm_here = MPI_COMM_WORLD
1650 #if ( DA_CORE != 1 )
1651 IF ( coupler_on ) THEN
1652 CALL cpl_init( mpi_comm_here )
1655 CALL mpi_init ( ierr )
1656 mpi_comm_here = MPI_COMM_WORLD
1657 #if ( DA_CORE != 1 )
1661 CALL wrf_set_dm_communicator( mpi_comm_here )
1662 CALL wrf_termio_dup( mpi_comm_here )
1665 CALL wrf_set_dm_communicator( local_communicator )
1668 ! this should have been reset by init_module_wrf_quilt to be just the compute tasks
1669 CALL wrf_get_dm_communicator( mpi_comm_here )
1671 CALL MPI_Comm_rank ( mpi_comm_here, mytask_local, ierr ) ;
1672 CALL MPI_Comm_size ( mpi_comm_here, ntasks_local, ierr ) ;
1673 mpi_comm_allcompute = mpi_comm_here
1675 IF ( mytask_local .EQ. 0 ) THEN
1677 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1678 READ ( UNIT = 27 , NML = domains , IOSTAT=io_status )
1681 nio_tasks_per_group = 0
1682 poll_servers = .false.
1683 READ ( 27 , NML = namelist_quilt, IOSTAT=io_status )
1686 CALL mpi_bcast( nio_tasks_per_group , max_domains , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1687 CALL mpi_bcast( nio_groups , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1688 CALL mpi_bcast( max_dom, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1689 CALL mpi_bcast( parent_id, max_domains , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1690 CALL quilting_disabled( quilting_is_turned_off )
1691 IF ( quilting_is_turned_off ) THEN
1693 nio_tasks_per_group = 0
1696 num_io_tasks = nio_tasks_per_group(1)*nio_groups
1698 CALL nl_set_max_dom(1,max_dom) ! quilting wants to see this too
1700 IF ( mytask_local .EQ. 0 ) THEN
1701 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1702 ! get a sneak peek an nproc_x and nproc_y
1705 READ ( 27 , NML = domains, IOSTAT=io_status )
1707 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1708 tasks_per_split = ntasks_local
1709 ! we need to sneak-peek the parent_id namelist setting, ,which is in the "domains" section
1710 ! of the namelist. That namelist is registry generated, so the registry-generated information
1711 ! is #included above.
1712 nest_pes_x = 0 ! dimensions of communicator in X and y
1714 IF ( nproc_x .EQ. -1 .OR. nproc_y .EQ. -1 ) THEN
1715 CALL compute_mesh( ntasks_local-num_io_tasks, n_x, n_y )
1720 comm_start = 0 ! make it so everyone will use same communicator if the dm_task_split namelist is not specified or is empty
1721 nest_pes_x(1:max_dom) = n_x
1722 nest_pes_y(1:max_dom) = n_y
1723 READ ( 27 , NML = dm_task_split, IOSTAT=io_status )
1726 CALL mpi_bcast( io_status, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1727 IF ( io_status .NE. 0 ) THEN
1728 ! or if dm_task_split was read but was emptly, do nothing: dm_task_split not specified, everyone uses same communicator (see above)
1730 CALL mpi_bcast( tasks_per_split, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1731 CALL mpi_bcast( nproc_x, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1732 CALL mpi_bcast( nproc_y, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1733 CALL mpi_bcast( comm_start, max_domains , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1734 CALL mpi_bcast( nest_pes_x, max_domains , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1735 CALL mpi_bcast( nest_pes_y, max_domains , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1740 IF ( 1 .le. parent_id(i) .AND. parent_id(i) .LE. max_domains ) THEN
1741 which_kid(i) = nkids(parent_id(i))
1742 nkids(parent_id(i)) = nkids(parent_id(i)) + 1
1744 WRITE(wrf_err_message,*)'invalid parent id for domain ',i
1745 CALL wrf_error_fatal(TRIM(wrf_err_message))
1749 num_compute_tasks = -99
1750 DO nest_id = 1,max_dom
1751 IF ( nest_id .EQ. 1 ) THEN
1752 nest_task_offsets(nest_id) = comm_start(nest_id)
1754 IF ( comm_start(nest_id) .LT. comm_start(parent_id(nest_id)) ) THEN
1755 WRITE(wrf_err_message,&
1756 "('nest domain ',i3,'comm_start (',i3,') lt parent ',i3,' comm_start (',i3,')')") &
1757 nest_id,comm_start,parent_id(nest_id),comm_start(parent_id(nest_id))
1758 CALL wrf_error_fatal(TRIM(wrf_err_message))
1759 ELSE IF ( comm_start(nest_id) .LT. &
1760 comm_start(parent_id(nest_id)) &
1761 +nest_pes_x(parent_id(nest_id))*nest_pes_y(parent_id(nest_id))) THEN
1762 nest_task_offsets(nest_id) = comm_start(nest_id)-comm_start(parent_id(nest_id))
1764 nest_task_offsets(nest_id) = nest_pes_x(parent_id(nest_id))*nest_pes_y(parent_id(nest_id))
1767 IF ((comm_start(nest_id)+nest_pes_x(nest_id)*nest_pes_y(nest_id)) .GT. num_compute_tasks ) THEN
1768 num_compute_tasks = (comm_start(nest_id)+nest_pes_x(nest_id)*nest_pes_y(nest_id))
1773 !jm Additional code here to set up communicator for this domain and tables
1774 !jm mapping individual domain task IDs to the original local communicator
1775 !jm that is unsplit over nest domains. from now on what we are calling
1776 !jm local_communicator will be the communicator that is used by the local
1777 !jm nests. The communicator that spans all the nests will be renamed to
1778 !jm intercomm_communicator.
1779 !jm Design note: exploring the idea of using MPI intercommunicators. They
1780 !jm only work in pairs so we'd have a lot of intercommunicators to set up
1781 !jm and keep around. We'd also have to have additional communicator arguments
1782 !jm to all the nesting routines in and around the RSL nesting parts.
1783 CALL MPI_Comm_rank ( mpi_comm_here, mytask_local, ierr ) ;
1784 CALL MPI_Comm_rank ( mpi_comm_here, origmytask, ierr ) ;
1785 CALL mpi_comm_size ( mpi_comm_here, ntasks_local, ierr ) ;
1786 ALLOCATE( icolor(ntasks_local) )
1787 ALLOCATE( icolor2(ntasks_local) )
1788 ALLOCATE( idomain(ntasks_local) )
1790 ! split off the separate local communicators
1792 ! construct list of local communicators my task is in
1793 comms_i_am_in = MPI_UNDEFINED
1797 DO j = comm_start(i), comm_start(i)+nest_pes_x(i)*nest_pes_y(i)-1
1798 IF ( j+1 .GT. ntasks_local ) THEN
1799 WRITE(wrf_err_message,*)"check comm_start, nest_pes_x, nest_pes_y settings in namelist for comm ",i
1800 CALL wrf_error_fatal(wrf_err_message)
1804 IF ( icolor(mytask_local+1) .EQ. 1 ) inthisone = .TRUE.
1805 CALL MPI_Comm_dup(mpi_comm_here,comdup,ierr)
1806 CALL MPI_Comm_split(comdup,icolor(mytask_local+1),mytask_local,mpi_comm_local,ierr)
1807 IF ( inthisone ) THEN
1808 dims(1) = nest_pes_y(i) ! rows
1809 dims(2) = nest_pes_x(i) ! columns
1810 isperiodic(1) = .false.
1811 isperiodic(2) = .false.
1812 CALL mpi_cart_create( mpi_comm_local, 2, dims, isperiodic, .false., comms_i_am_in(i), ierr )
1816 ! assign domains to communicators
1817 local_communicator = MPI_UNDEFINED
1818 CALL wrf_set_dm_quilt_comm( mpi_comm_here ) ! used by module_io_quilt_old.F
1820 local_communicator_store(i) = comms_i_am_in(i)
1821 domain_active_this_task(i) = ( local_communicator_store(i) .NE. MPI_UNDEFINED )
1822 IF ( local_communicator_store(i) .NE. MPI_UNDEFINED ) THEN
1823 CALL MPI_Comm_size( local_communicator_store(i), ntasks_store(i), ierr )
1824 CALL MPI_Comm_rank( local_communicator_store(i), mytask_store(i), ierr )
1825 CALL mpi_cart_coords( local_communicator_store(i), mytask_store(i), 2, coords, ierr )
1826 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_cart_coords fails ')
1827 mytask_y_store(i) = coords(1) ! col task (1)
1828 mytask_x_store(i) = coords(2) ! col task (x)
1829 CALL MPI_Comm_dup( local_communicator_store(i), comdup2, ierr )
1830 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_dup fails ')
1832 CALL MPI_Comm_split(comdup2,mytask_y_store(i),mytask_store(i),local_communicator_x_store(i),ierr)
1833 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for y ')
1835 CALL MPI_Comm_split(comdup2,mytask_x_store(i),mytask_store(i),local_communicator_y_store(i),ierr)
1836 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for x ')
1838 CALL MPI_Comm_size( local_communicator_x_store(i), ntasks_x_store(i), ierr )
1839 CALL MPI_Comm_rank( local_communicator_x_store(i), mytask_x_store(i), ierr )
1840 CALL MPI_Comm_size( local_communicator_y_store(i), ntasks_y_store(i), ierr )
1841 CALL MPI_Comm_rank( local_communicator_y_store(i), mytask_y_store(i), ierr )
1845 intercomm_active = .FALSE.
1846 ! iterate over parent-nest pairs
1847 ! split off a new communicator from the big one that includes the tasks from the parent and nest communicators
1848 ! starting with the parent tasks followed by the nest tasks
1849 ! if a task is in both (ie. the communicators overlap) set the offset at the start of the first nest task
1850 ! in this way, we will handle cases where the parent and nest are decomposed over the same set of tasks
1851 ! (in that case, the offset would be the first task of the parent-nest communicator and that communicator)
1853 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1854 ntasks_local = num_compute_tasks
1855 DO nest_id = 2, max_dom
1856 par_id = parent_id(nest_id)
1858 DO j = 1,ntasks_local !iterate over all the tasks in the "big" communicator
1859 IF ( local_communicator_store( par_id ) .NE. MPI_UNDEFINED .OR. local_communicator_store( nest_id ) .NE. MPI_UNDEFINED ) icolor2(j)=1
1861 ! set mpi_comm_me_and_mom to be a communicator that has my parents tasks and mine
1863 mytask_is_nest = .FALSE.
1864 mytask_is_par = .FALSE.
1865 DO j = 1,ntasks_local
1867 IF ( comm_start(nest_id) .LE. j-1 .AND. j-1 .LT. comm_start(nest_id) + nest_pes_x(nest_id)*nest_pes_y(nest_id) ) THEN
1869 if ( j-1 .EQ. mytask_local ) mytask_is_nest=.TRUE.
1871 IF ( comm_start(par_id ) .LE. j-1 .AND. j-1 .LT. comm_start(par_id ) + nest_pes_x(par_id )*nest_pes_y(par_id ) ) THEN
1873 if ( j-1 .EQ. mytask_local ) mytask_is_par=.TRUE.
1877 i = icolor2(mytask_local+1)
1878 CALL MPI_Comm_dup(mpi_comm_here,comdup,ierr)
1879 CALL MPI_Comm_split(comdup,i,origmytask,mpi_comm_me_and_mom,ierr)
1881 IF ( mytask_is_nest ) THEN
1882 intercomm_active(nest_id) = .TRUE.
1883 mpi_comm_to_mom(nest_id) = mpi_comm_me_and_mom
1885 IF ( mytask_is_par ) THEN
1886 intercomm_active(par_id) = .TRUE.
1887 mpi_comm_to_kid(which_kid(nest_id),par_id) = mpi_comm_me_and_mom
1890 DEALLOCATE( icolor )
1891 DEALLOCATE( icolor2 )
1892 DEALLOCATE( idomain )
1894 ELSE IF ( ( tasks_per_split .LE. ntasks_local .AND. tasks_per_split .LE. 0 ) ) THEN
1895 domain_active_this_task(1) = .TRUE.
1896 IF ( mod( ntasks_local, tasks_per_split ) .NE. 0 ) THEN
1897 CALL wrf_message( 'WARNING: tasks_per_split does not evenly divide ntasks. Some tasks will be wasted.' )
1900 ALLOCATE( icolor(ntasks_local) )
1902 DO WHILE ( j .LT. ntasks_local / tasks_per_split )
1903 DO i = 1, tasks_per_split
1904 icolor( i + j * tasks_per_split ) = j
1909 CALL MPI_Comm_dup(mpi_comm_here,comdup,ierr)
1910 CALL MPI_Comm_split(comdup,icolor(mytask_local+1),mytask_local,mpi_comm_local,ierr)
1911 CALL wrf_set_dm_communicator( mpi_comm_local )
1912 CALL store_communicators_for_domain(1)
1913 DEALLOCATE( icolor )
1915 domain_active_this_task(1) = .TRUE.
1916 mpi_comm_local = mpi_comm_here
1917 CALL wrf_set_dm_communicator( mpi_comm_local )
1918 CALL store_communicators_for_domain(1)
1921 CALL instate_communicators_for_domain(1)
1924 ! for serial (non-MPI) builds
1926 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
1927 INTEGER thread_support_provided, thread_support_requested
1929 INTEGER i, j, k, x, y, n_x, n_y
1932 ! for parallel nesting, 201408, jm
1934 INTEGER :: io_status
1935 INTEGER :: domain_id,par_id,nest_id,kid_id
1937 !!!!! needed to sneak-peek the registry to get parent_id
1938 ! define as temporaries
1939 #include "namelist_defines.inc"
1941 ! Statements that specify the namelists
1942 #include "namelist_statements.inc"
1945 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1946 READ ( UNIT = 27 , NML = domains , IOSTAT=io_status )
1952 IF ( 1 .le. parent_id(i) .AND. parent_id(i) .LE. max_domains ) THEN
1953 which_kid(i) = nkids(parent_id(i))
1954 nkids(parent_id(i)) = nkids(parent_id(i)) + 1
1956 WRITE(wrf_err_message,*)'invalid parent id for domain ',i
1957 CALL wrf_error_fatal(TRIM(wrf_err_message))
1961 intercomm_active = .TRUE.
1962 domain_active_this_task = .TRUE.
1983 CALL instate_communicators_for_domain(1)
1985 END SUBROUTINE split_communicator
1987 SUBROUTINE init_module_dm
1990 INTEGER mpi_comm_local, mpi_comm_here, ierr, mytask, nproc
1992 CALL mpi_initialized( mpi_inited, ierr )
1993 IF ( .NOT. mpi_inited ) THEN
1994 ! If MPI has not been initialized then initialize it and
1995 ! make comm_world the communicator
1996 ! Otherwise, something else (e.g. split_communicator) has already
1997 ! initialized MPI, so just grab the communicator that
1998 ! should already be stored and use that.
1999 CALL mpi_init ( ierr )
2000 mpi_comm_here = MPI_COMM_WORLD
2001 CALL wrf_set_dm_communicator ( mpi_comm_here )
2003 CALL wrf_get_dm_communicator( mpi_comm_local )
2005 END SUBROUTINE init_module_dm
2008 SUBROUTINE wrf_dm_move_nest ( parent, nest, dx, dy )
2009 USE module_domain, ONLY : domain
2011 TYPE (domain), INTENT(INOUT) :: parent, nest
2012 INTEGER, INTENT(IN) :: dx,dy
2014 END SUBROUTINE wrf_dm_move_nest
2016 !------------------------------------------------------------------------------
2017 SUBROUTINE get_full_obs_vector( nsta, nerrf, niobf, &
2020 mp_local_cobmask, errf )
2022 !------------------------------------------------------------------------------
2023 ! PURPOSE: Do MPI allgatherv operation across processors to get the
2024 ! errors at each observation point on all processors.
2026 !------------------------------------------------------------------------------
2028 INTEGER, INTENT(IN) :: nsta ! Observation index.
2029 INTEGER, INTENT(IN) :: nerrf ! Number of error fields.
2030 INTEGER, INTENT(IN) :: niobf ! Number of observations.
2031 LOGICAL, INTENT(IN) :: MP_LOCAL_UOBMASK(NIOBF)
2032 LOGICAL, INTENT(IN) :: MP_LOCAL_VOBMASK(NIOBF)
2033 LOGICAL, INTENT(IN) :: MP_LOCAL_COBMASK(NIOBF)
2034 REAL, INTENT(INOUT) :: errf(nerrf, niobf)
2038 ! Local declarations
2039 integer i, n, nlocal_dot, nlocal_crs
2040 REAL UVT_BUFFER(NIOBF) ! Buffer for holding U, V, or T
2041 REAL QRK_BUFFER(NIOBF) ! Buffer for holding Q or RKO
2042 REAL SFP_BUFFER(NIOBF) ! Buffer for holding Surface pressure
2043 REAL PBL_BUFFER(NIOBF) ! Buffer for holding (real) KPBL index
2044 REAL QATOB_BUFFER(NIOBF) ! Buffer for holding QV at the ob location
2045 INTEGER N_BUFFER(NIOBF)
2046 REAL FULL_BUFFER(NIOBF)
2047 INTEGER IFULL_BUFFER(NIOBF)
2048 INTEGER, ALLOCATABLE , DIMENSION(:) :: IDISPLACEMENT
2049 INTEGER, ALLOCATABLE , DIMENSION(:) :: ICOUNT
2051 INTEGER :: MPI_COMM_COMP ! MPI group communicator
2052 INTEGER :: NPROCS ! Number of processors
2053 INTEGER :: IERR ! Error code from MPI routines
2055 ! Get communicator for MPI operations.
2056 CALL WRF_GET_DM_COMMUNICATOR(MPI_COMM_COMP)
2058 ! Get rank of monitor processor and broadcast to others.
2059 CALL MPI_COMM_SIZE( MPI_COMM_COMP, NPROCS, IERR )
2060 ALLOCATE (IDISPLACEMENT(NPROCS))
2061 ALLOCATE (ICOUNT(NPROCS))
2066 IF ( MP_LOCAL_UOBMASK(N) ) THEN ! USE U-POINT MASK
2067 NLOCAL_DOT = NLOCAL_DOT + 1
2068 UVT_BUFFER(NLOCAL_DOT) = ERRF(1,N) ! U WIND COMPONENT
2069 SFP_BUFFER(NLOCAL_DOT) = ERRF(7,N) ! SURFACE PRESSURE
2070 QRK_BUFFER(NLOCAL_DOT) = ERRF(9,N) ! RKO
2071 N_BUFFER(NLOCAL_DOT) = N
2074 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
2075 ICOUNT,1,MPI_INTEGER, &
2079 IDISPLACEMENT(1) = 0
2081 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
2083 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
2084 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2085 MPI_INTEGER, MPI_COMM_COMP, IERR)
2087 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
2088 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2089 MPI_REAL, MPI_COMM_COMP, IERR)
2091 ERRF(1,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2093 ! SURF PRESS AT U-POINTS
2094 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
2095 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2096 MPI_REAL, MPI_COMM_COMP, IERR)
2098 ERRF(7,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2101 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_DOT, MPI_REAL, &
2102 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2103 MPI_REAL, MPI_COMM_COMP, IERR)
2105 ERRF(9,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2111 IF ( MP_LOCAL_VOBMASK(N) ) THEN ! USE V-POINT MASK
2112 NLOCAL_DOT = NLOCAL_DOT + 1
2113 UVT_BUFFER(NLOCAL_DOT) = ERRF(2,N) ! V WIND COMPONENT
2114 SFP_BUFFER(NLOCAL_DOT) = ERRF(8,N) ! SURFACE PRESSURE
2115 N_BUFFER(NLOCAL_DOT) = N
2118 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
2119 ICOUNT,1,MPI_INTEGER, &
2123 IDISPLACEMENT(1) = 0
2125 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
2127 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
2128 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2129 MPI_INTEGER, MPI_COMM_COMP, IERR)
2131 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
2132 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2133 MPI_REAL, MPI_COMM_COMP, IERR)
2135 ERRF(2,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2137 ! SURF PRESS AT V-POINTS
2138 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
2139 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2140 MPI_REAL, MPI_COMM_COMP, IERR)
2142 ERRF(8,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2145 ! DO THE CROSS FIELDS, T AND Q
2148 IF ( MP_LOCAL_COBMASK(N) ) THEN ! USE MASS-POINT MASK
2149 NLOCAL_CRS = NLOCAL_CRS + 1
2150 UVT_BUFFER(NLOCAL_CRS) = ERRF(3,N) ! TEMPERATURE
2151 QRK_BUFFER(NLOCAL_CRS) = ERRF(4,N) ! MOISTURE
2152 PBL_BUFFER(NLOCAL_CRS) = ERRF(5,N) ! KPBL
2153 SFP_BUFFER(NLOCAL_CRS) = ERRF(6,N) ! SURFACE PRESSURE
2154 QATOB_BUFFER(NLOCAL_CRS) = ERRF(10,N) ! Model Mixing ratio itself (NOT ERROR)
2155 N_BUFFER(NLOCAL_CRS) = N
2158 CALL MPI_ALLGATHER(NLOCAL_CRS,1,MPI_INTEGER, &
2159 ICOUNT,1,MPI_INTEGER, &
2161 IDISPLACEMENT(1) = 0
2163 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
2165 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_CRS, MPI_INTEGER, &
2166 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2167 MPI_INTEGER, MPI_COMM_COMP, IERR)
2169 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_CRS, MPI_REAL, &
2170 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2171 MPI_REAL, MPI_COMM_COMP, IERR)
2174 ERRF(3,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2177 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_CRS, MPI_REAL, &
2178 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2179 MPI_REAL, MPI_COMM_COMP, IERR)
2181 ERRF(4,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2184 CALL MPI_ALLGATHERV( PBL_BUFFER, NLOCAL_CRS, MPI_REAL, &
2185 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2186 MPI_REAL, MPI_COMM_COMP, IERR)
2188 ERRF(5,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2190 ! SURF PRESS AT MASS POINTS
2191 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_CRS, MPI_REAL, &
2192 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2193 MPI_REAL, MPI_COMM_COMP, IERR)
2195 ERRF(6,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2198 ! Water vapor mixing ratio at the mass points (NOT THE ERROR)
2199 CALL MPI_ALLGATHERV( QATOB_BUFFER, NLOCAL_CRS, MPI_REAL, &
2200 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2201 MPI_REAL, MPI_COMM_COMP, IERR)
2203 ERRF(10,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2206 DEALLOCATE (IDISPLACEMENT)
2209 END SUBROUTINE get_full_obs_vector
2213 SUBROUTINE wrf_dm_maxtile_real ( val , tile)
2215 REAL val, val_all( ntasks )
2220 ! Collective operation. Each processor calls passing a local value and its index; on return
2221 ! all processors are passed back the maximum of all values passed and its tile number.
2227 CALL wrf_get_dm_communicator ( comm )
2228 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), comm, ierr )
2232 IF ( val_all(i) .GT. val ) THEN
2238 END SUBROUTINE wrf_dm_maxtile_real
2241 SUBROUTINE wrf_dm_mintile_real ( val , tile)
2243 REAL val, val_all( ntasks )
2248 ! Collective operation. Each processor calls passing a local value and its index; on return
2249 ! all processors are passed back the minimum of all values passed and its tile number.
2255 CALL wrf_get_dm_communicator ( comm )
2256 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), comm, ierr )
2260 IF ( val_all(i) .LT. val ) THEN
2266 END SUBROUTINE wrf_dm_mintile_real
2269 SUBROUTINE wrf_dm_mintile_double ( val , tile)
2271 DOUBLE PRECISION val, val_all( ntasks )
2276 ! Collective operation. Each processor calls passing a local value and its index; on return
2277 ! all processors are passed back the minimum of all values passed and its tile number.
2283 CALL wrf_get_dm_communicator ( comm )
2284 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, comm, ierr )
2288 IF ( val_all(i) .LT. val ) THEN
2294 END SUBROUTINE wrf_dm_mintile_double
2297 SUBROUTINE wrf_dm_tile_val_int ( val , tile)
2299 INTEGER val, val_all( ntasks )
2304 ! Collective operation. Get value from input tile.
2310 CALL wrf_get_dm_communicator ( comm )
2311 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, comm, ierr )
2314 END SUBROUTINE wrf_dm_tile_val_int
2316 SUBROUTINE wrf_get_hostname ( str )
2320 CALL rsl_lite_get_hostname( tmp, 512, n, cs )
2325 END SUBROUTINE wrf_get_hostname
2327 SUBROUTINE wrf_get_hostid ( hostid )
2330 INTEGER i, sz, n, cs
2331 CALL rsl_lite_get_hostname( tmp, 512, n, cs )
2334 END SUBROUTINE wrf_get_hostid
2336 END MODULE module_dm
2339 SUBROUTINE push_communicators_for_domain( id )
2341 INTEGER, INTENT(IN) :: id ! if specified also does an instate for grid id
2342 ! Only required for distrbuted memory parallel runs
2343 #if ( defined( DM_PARALLEL ) && ( ! defined( STUBMPI ) ) )
2344 IF ( communicator_stack_cursor .GE. max_domains ) CALL wrf_error_fatal("push_communicators_for_domain would excede stacksize")
2345 communicator_stack_cursor = communicator_stack_cursor + 1
2347 id_stack(communicator_stack_cursor) = current_id
2348 local_communicator_stack( communicator_stack_cursor ) = local_communicator
2349 local_communicator_periodic_stack( communicator_stack_cursor ) = local_communicator_periodic
2350 local_iocommunicator_stack( communicator_stack_cursor ) = local_iocommunicator
2351 local_communicator_x_stack( communicator_stack_cursor ) = local_communicator_x
2352 local_communicator_y_stack( communicator_stack_cursor ) = local_communicator_y
2353 ntasks_stack( communicator_stack_cursor ) = ntasks
2354 ntasks_y_stack( communicator_stack_cursor ) = ntasks_y
2355 ntasks_x_stack( communicator_stack_cursor ) = ntasks_x
2356 mytask_stack( communicator_stack_cursor ) = mytask
2357 mytask_x_stack( communicator_stack_cursor ) = mytask_x
2358 mytask_y_stack( communicator_stack_cursor ) = mytask_y
2360 CALL instate_communicators_for_domain( id )
2362 END SUBROUTINE push_communicators_for_domain
2363 SUBROUTINE pop_communicators_for_domain
2366 ! Only required for distrbuted memory parallel runs
2367 #if ( defined( DM_PARALLEL ) && ( ! defined( STUBMPI ) ) )
2368 IF ( communicator_stack_cursor .LT. 1 ) CALL wrf_error_fatal("pop_communicators_for_domain on empty stack")
2369 current_id = id_stack(communicator_stack_cursor)
2370 local_communicator = local_communicator_stack( communicator_stack_cursor )
2371 local_communicator_periodic = local_communicator_periodic_stack( communicator_stack_cursor )
2372 local_iocommunicator = local_iocommunicator_stack( communicator_stack_cursor )
2373 local_communicator_x = local_communicator_x_stack( communicator_stack_cursor )
2374 local_communicator_y = local_communicator_y_stack( communicator_stack_cursor )
2375 ntasks = ntasks_stack( communicator_stack_cursor )
2376 ntasks_y = ntasks_y_stack( communicator_stack_cursor )
2377 ntasks_x = ntasks_x_stack( communicator_stack_cursor )
2378 mytask = mytask_stack( communicator_stack_cursor )
2379 mytask_x = mytask_x_stack( communicator_stack_cursor )
2380 mytask_y = mytask_y_stack( communicator_stack_cursor )
2381 communicator_stack_cursor = communicator_stack_cursor - 1
2383 END SUBROUTINE pop_communicators_for_domain
2384 SUBROUTINE instate_communicators_for_domain( id )
2386 ! Only required for distrbuted memory parallel runs
2387 #if ( defined( DM_PARALLEL ) && ( ! defined( STUBMPI ) ) )
2389 INTEGER, INTENT(IN) :: id
2392 local_communicator = local_communicator_store( id )
2393 local_communicator_periodic = local_communicator_periodic_store( id )
2394 local_iocommunicator = local_iocommunicator_store( id )
2395 local_communicator_x = local_communicator_x_store( id )
2396 local_communicator_y = local_communicator_y_store( id )
2397 ntasks = ntasks_store( id )
2398 mytask = mytask_store( id )
2399 ntasks_x = ntasks_x_store( id )
2400 ntasks_y = ntasks_y_store( id )
2401 mytask_x = mytask_x_store( id )
2402 mytask_y = mytask_y_store( id )
2404 END SUBROUTINE instate_communicators_for_domain
2405 SUBROUTINE store_communicators_for_domain( id )
2407 ! Only required for distrbuted memory parallel runs
2408 #if ( defined( DM_PARALLEL ) && ( ! defined( STUBMPI ) ) )
2410 INTEGER, INTENT(IN) :: id
2411 local_communicator_store( id ) = local_communicator
2412 local_communicator_periodic_store( id ) = local_communicator_periodic
2413 local_iocommunicator_store( id ) = local_iocommunicator
2414 local_communicator_x_store( id ) = local_communicator_x
2415 local_communicator_y_store( id ) = local_communicator_y
2416 ntasks_store( id ) = ntasks
2417 ntasks_x_store( id ) = ntasks_x
2418 ntasks_y_store( id ) = ntasks_y
2419 mytask_store( id ) = mytask
2420 mytask_x_store( id ) = mytask_x
2421 mytask_y_store( id ) = mytask_y
2423 END SUBROUTINE store_communicators_for_domain
2425 !=========================================================================
2426 ! wrf_dm_patch_domain has to be outside the module because it is called
2427 ! by a routine in module_domain but depends on module domain
2429 SUBROUTINE wrf_dm_patch_domain ( id , domdesc , parent_id , parent_domdesc , &
2430 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
2431 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
2432 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
2433 sp1x , ep1x , sm1x , em1x , &
2434 sp2x , ep2x , sm2x , em2x , &
2435 sp3x , ep3x , sm3x , em3x , &
2436 sp1y , ep1y , sm1y , em1y , &
2437 sp2y , ep2y , sm2y , em2y , &
2438 sp3y , ep3y , sm3y , em3y , &
2440 USE module_domain, ONLY : domain, head_grid, find_grid_by_id
2441 USE module_dm, ONLY : patch_domain_rsl_lite !, push_communicators_for_domain, pop_communicators_for_domain
2444 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
2445 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
2446 sm1 , em1 , sm2 , em2 , sm3 , em3
2447 INTEGER :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
2448 sm1x , em1x , sm2x , em2x , sm3x , em3x
2449 INTEGER :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
2450 sm1y , em1y , sm2y , em2y , sm3y , em3y
2451 INTEGER, INTENT(INOUT):: id , domdesc , parent_id , parent_domdesc
2453 TYPE(domain), POINTER :: parent
2454 TYPE(domain), POINTER :: grid_ptr
2456 ! this is necessary because we cannot pass parent directly into
2457 ! wrf_dm_patch_domain because creating the correct interface definitions
2458 ! would generate a circular USE reference between module_domain and module_dm
2459 ! see comment this date in module_domain for more information. JM 20020416
2462 grid_ptr => head_grid
2463 CALL find_grid_by_id( parent_id , grid_ptr , parent )
2465 CALL push_communicators_for_domain(id)
2467 CALL patch_domain_rsl_lite ( id , parent, parent_id , &
2468 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
2469 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
2470 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
2471 sp1x , ep1x , sm1x , em1x , &
2472 sp2x , ep2x , sm2x , em2x , &
2473 sp3x , ep3x , sm3x , em3x , &
2474 sp1y , ep1y , sm1y , em1y , &
2475 sp2y , ep2y , sm2y , em2y , &
2476 sp3y , ep3y , sm3y , em3y , &
2479 CALL pop_communicators_for_domain
2482 END SUBROUTINE wrf_dm_patch_domain
2484 SUBROUTINE wrf_termio_dup( comm )
2486 INTEGER, INTENT(IN) :: comm
2487 INTEGER mytask, ntasks
2491 CALL mpi_comm_size(comm, ntasks, ierr )
2492 CALL mpi_comm_rank(comm, mytask, ierr )
2493 write(0,*)'starting wrf task ',mytask,' of ',ntasks
2494 CALL rsl_error_dup1( mytask, ntasks )
2499 END SUBROUTINE wrf_termio_dup
2501 SUBROUTINE wrf_get_myproc( myproc )
2502 USE module_dm , ONLY : mytask
2507 END SUBROUTINE wrf_get_myproc
2509 SUBROUTINE wrf_get_nproc( nproc )
2510 USE module_dm , ONLY : ntasks
2515 END SUBROUTINE wrf_get_nproc
2517 SUBROUTINE wrf_get_nprocx( nprocx )
2518 USE module_dm , ONLY : ntasks_x
2523 END SUBROUTINE wrf_get_nprocx
2525 SUBROUTINE wrf_get_nprocy( nprocy )
2526 USE module_dm , ONLY : ntasks_y
2531 END SUBROUTINE wrf_get_nprocy
2533 SUBROUTINE wrf_dm_bcast_bytes ( buf , size )
2534 USE module_dm , ONLY : local_communicator
2543 CHARACTER*1 BUF(size)
2546 CALL BYTE_BCAST ( buf , size, local_communicator )
2549 END SUBROUTINE wrf_dm_bcast_bytes
2551 SUBROUTINE wrf_dm_bcast_string( BUF, N1 )
2555 ! Collective operation. Given a string and a size in characters on task zero, broadcast and return that buffer on all tasks.
2560 INTEGER ibuf(256),i,n
2563 ! Root task is required to have the correct value of N1, other tasks
2564 ! might not have the correct value.
2565 CALL wrf_dm_bcast_integer( n , 1 )
2566 IF (n .GT. 256) n = 256
2569 ibuf(I) = ichar(buf(I:I))
2571 CALL wrf_dm_bcast_integer( ibuf, n )
2574 buf(i:i) = char(ibuf(i))
2579 END SUBROUTINE wrf_dm_bcast_string
2581 SUBROUTINE wrf_dm_bcast_string_comm( BUF, N1, COMM )
2586 ! Collective operation. Given a string and a size in characters on task zero, broadcast and return that buffer on all tasks.
2591 INTEGER ibuf(256),i,n
2594 ! Root task is required to have the correct value of N1, other tasks
2595 ! might not have the correct value.
2596 CALL BYTE_BCAST( n, IWORDSIZE, COMM )
2597 IF (n .GT. 256) n = 256
2600 ibuf(I) = ichar(buf(I:I))
2602 CALL BYTE_BCAST( ibuf, N*IWORDSIZE, COMM )
2605 buf(i:i) = char(ibuf(i))
2610 END SUBROUTINE wrf_dm_bcast_string_comm
2612 SUBROUTINE wrf_dm_bcast_integer( BUF, N1 )
2616 CALL wrf_dm_bcast_bytes ( BUF , N1 * IWORDSIZE )
2618 END SUBROUTINE wrf_dm_bcast_integer
2620 SUBROUTINE wrf_dm_bcast_double( BUF, N1 )
2623 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
2624 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
2625 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
2626 ! since we were not indexing the globbuf and Field arrays it does not matter
2628 CALL wrf_dm_bcast_bytes ( BUF , N1 * DWORDSIZE )
2630 END SUBROUTINE wrf_dm_bcast_double
2632 SUBROUTINE wrf_dm_bcast_real( BUF, N1 )
2636 CALL wrf_dm_bcast_bytes ( BUF , N1 * RWORDSIZE )
2638 END SUBROUTINE wrf_dm_bcast_real
2640 SUBROUTINE wrf_dm_bcast_logical( BUF, N1 )
2644 CALL wrf_dm_bcast_bytes ( BUF , N1 * LWORDSIZE )
2646 END SUBROUTINE wrf_dm_bcast_logical
2648 SUBROUTINE write_68( grid, v , s , &
2649 ids, ide, jds, jde, kds, kde, &
2650 ims, ime, jms, jme, kms, kme, &
2651 its, ite, jts, jte, kts, kte )
2652 USE module_domain, ONLY : domain
2654 TYPE(domain) , INTENT (INOUT) :: grid
2656 INTEGER ids, ide, jds, jde, kds, kde, &
2657 ims, ime, jms, jme, kms, kme, &
2658 its, ite, jts, jte, kts, kte
2659 REAL, DIMENSION( ims:ime , kms:kme, jms:jme ) :: v
2663 logical, external :: wrf_dm_on_monitor
2664 real globbuf( ids:ide, kds:kde, jds:jde )
2665 character*3 ord, stag
2667 if ( kds == kde ) then
2670 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
2671 ids, ide, jds, jde, kds, kde, &
2672 ims, ime, jms, jme, kms, kme, &
2673 its, ite, jts, jte, kts, kte )
2678 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
2679 ids, ide, kds, kde, jds, jde, &
2680 ims, ime, kms, kme, jms, jme, &
2681 its, ite, kts, kte, jts, jte )
2685 if ( wrf_dm_on_monitor() ) THEN
2686 WRITE(68,*) ide-ids+1, jde-jds+1 , s
2689 WRITE(68,*) globbuf(i,1,j)
2697 SUBROUTINE wrf_abort
2699 #if ( DA_CORE != 1 )
2700 USE module_cpl, ONLY : coupler_on, cpl_abort
2707 #if ( DA_CORE != 1 )
2708 IF ( coupler_on ) THEN
2709 CALL cpl_abort( 'wrf_abort', 'look for abort message in rsl* files' )
2712 CALL mpi_abort(MPI_COMM_WORLD,1,ierr)
2713 #if ( DA_CORE != 1 )
2719 END SUBROUTINE wrf_abort
2721 SUBROUTINE wrf_dm_shutdown
2725 CALL MPI_FINALIZE( ierr )
2728 END SUBROUTINE wrf_dm_shutdown
2730 LOGICAL FUNCTION wrf_dm_on_monitor()
2734 INTEGER tsk, ierr, mpi_comm_local
2735 CALL wrf_get_dm_communicator( mpi_comm_local )
2736 IF ( mpi_comm_local .NE. MPI_UNDEFINED ) THEN
2737 CALL mpi_comm_rank ( mpi_comm_local, tsk , ierr )
2738 wrf_dm_on_monitor = tsk .EQ. 0
2740 wrf_dm_on_monitor = .FALSE.
2743 wrf_dm_on_monitor = .TRUE.
2746 END FUNCTION wrf_dm_on_monitor
2748 SUBROUTINE rsl_comm_iter_init(shw,ps,pe)
2750 INTEGER iter, plus_send_start, plus_recv_start, &
2751 minus_send_start, minus_recv_start
2752 COMMON /rcii/ iter, plus_send_start, plus_recv_start, &
2753 minus_send_start, minus_recv_start
2755 minus_send_start = ps
2756 minus_recv_start = ps-1
2757 plus_send_start = pe
2758 plus_recv_start = pe+1
2759 END SUBROUTINE rsl_comm_iter_init
2761 LOGICAL FUNCTION rsl_comm_iter ( id , is_intermediate, &
2762 shw , xy , ds, de_in, ps, pe, nds,nde, &
2763 sendbeg_m, sendw_m, sendbeg_p, sendw_p, &
2764 recvbeg_m, recvw_m, recvbeg_p, recvw_p )
2765 USE module_dm, ONLY : ntasks_x, ntasks_y, mytask_x, mytask_y, minx, miny, &
2766 nest_pes_x, nest_pes_y
2768 INTEGER, INTENT(IN) :: id,shw,xy,ds,de_in,ps,pe,nds,nde
2769 LOGICAL, INTENT(IN) :: is_intermediate ! treated differently, coarse but with same decomp as nest
2770 INTEGER, INTENT(OUT) :: sendbeg_m, sendw_m, sendbeg_p, sendw_p
2771 INTEGER, INTENT(OUT) :: recvbeg_m, recvw_m, recvbeg_p, recvw_p
2772 INTEGER k, kn, ni, nj, de, Px, Py, nt, ntx, nty, me, lb, ub, ierr
2775 INTEGER iter, plus_send_start, plus_recv_start, &
2776 minus_send_start, minus_recv_start
2777 INTEGER parent_grid_ratio, parent_start
2778 COMMON /rcii/ iter, plus_send_start, plus_recv_start, &
2779 minus_send_start, minus_recv_start
2782 ntx = nest_pes_x(id)
2783 nty = nest_pes_y(id)
2784 IF ( xy .EQ. 1 ) THEN ! X/I axis
2787 dum = 2 * nty ! dummy dimension length for tfp to decompose without getting div 0
2788 IF ( is_intermediate ) THEN
2789 CALL nl_get_i_parent_start(id,parent_start)
2790 CALL nl_get_parent_grid_ratio(id,parent_grid_ratio)
2795 dum = 2 * ntx ! dummy dimension length for tfp to decompose without getting div 0
2796 IF ( is_intermediate ) THEN
2797 CALL nl_get_j_parent_start(id,parent_start)
2798 CALL nl_get_parent_grid_ratio(id,parent_grid_ratio)
2808 IF ( me .GT. 0 ) THEN
2809 lb = minus_send_start
2813 IF ( xy .eq. 1 ) THEN
2814 IF ( is_intermediate ) THEN
2815 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2816 CALL task_for_point (kn,1,nds,nde,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2817 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (h)')
2819 CALL task_for_point (k,1,ds,de,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2820 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (i)')
2822 IF ( Px .NE. me+(iter-1) ) THEN
2826 IF ( is_intermediate ) THEN
2827 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2828 CALL task_for_point (1,kn,1,dum,nds,nde,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2829 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (h)')
2831 CALL task_for_point (1,k,1,dum,ds,de,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2832 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (i)')
2834 IF ( Py .NE. me+(iter-1) ) THEN
2838 minus_send_start = minus_send_start+1
2839 sendw_m = sendw_m + 1
2845 IF ( me .GT. 0 ) THEN
2846 ub = minus_recv_start
2848 DO k = minus_recv_start,ps-shw,-1
2850 IF ( xy .eq. 1 ) THEN
2851 IF ( is_intermediate ) THEN
2852 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2853 CALL task_for_point (kn,1,nds,nde,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2854 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (j)')
2856 CALL task_for_point (k,1,ds,de,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2857 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (k)')
2859 IF ( Px .NE. me-iter ) THEN
2863 IF ( is_intermediate ) THEN
2864 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2865 CALL task_for_point (1,kn,1,dum,nds,nde,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2866 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (j)')
2868 CALL task_for_point (1,k,1,dum,ds,de,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2869 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (k)')
2871 IF ( Py .NE. me-iter ) THEN
2875 minus_recv_start = minus_recv_start-1
2876 recvw_m = recvw_m + 1
2883 IF ( ( xy .eq. 1 .and. me .LT. ntx-1 ) .OR. ( xy .eq. 0 .and. me .LT. nty-1 ) ) THEN
2884 ub = plus_send_start
2885 sendbeg_p = pe - ub + 1
2886 DO k = ub,pe-shw+1,-1
2888 IF ( xy .eq. 1 ) THEN
2889 IF ( is_intermediate ) THEN
2890 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2891 CALL task_for_point (kn,1,nds,nde,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2892 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (l)')
2894 CALL task_for_point (k,1,ds,de,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2895 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (m)')
2897 IF ( Px .NE. me-(iter-1) ) THEN
2901 IF ( is_intermediate ) THEN
2902 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2903 CALL task_for_point (1,kn,1,dum,nds,nde,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2904 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (l)')
2906 CALL task_for_point (1,k,1,dum,ds,de,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2907 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (m)')
2909 IF ( Py .NE. me-(iter-1) ) THEN
2913 plus_send_start = plus_send_start - 1
2914 sendw_p = sendw_p + 1
2920 IF ( ( xy .eq. 1 .and. me .LT. ntx-1 ) .OR. ( xy .eq. 0 .and. me .LT. nty-1 ) ) THEN
2921 lb = plus_recv_start
2925 IF ( xy .eq. 1 ) THEN
2926 IF ( is_intermediate ) THEN
2927 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2928 CALL task_for_point (kn,1,nds,nde,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2929 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (n)')
2932 CALL task_for_point (k,1,ds,de,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2934 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (o)')
2936 IF ( Px .NE. me+iter ) THEN
2940 IF ( is_intermediate ) THEN
2941 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2942 CALL task_for_point (1,kn,1,dum,nds,nde,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2943 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (n)')
2945 CALL task_for_point (1,k,1,dum,ds,de,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2946 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (o)')
2948 IF ( Py .NE. me+iter ) THEN
2952 plus_recv_start = plus_recv_start + 1
2953 recvw_p = recvw_p + 1
2957 if ( iter .eq. 1 ) then
2962 sendw_m = 0 ; sendw_p = 0 ; recvw_m = 0 ; recvw_p = 0
2963 sendbeg_m = 1 ; if ( me .GT. 0 ) sendw_m = shw ;
2964 sendbeg_p = 1 ; if ( me .LT. nt-1 ) sendw_p = shw
2965 recvbeg_m = 1 ; if ( me .GT. 0 ) recvw_m = shw ;
2966 recvbeg_p = 1 ; if ( me .LT. nt-1 ) recvw_p = shw ;
2968 ! write(0,*)'shw ', shw , ' xy ',xy
2969 ! write(0,*)' ds, de, ps, pe, nds,nde ',ds, de, ps, pe, nds,nde
2970 ! write(0,*)'sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p '
2971 ! write(0,*)sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p
2974 ! write(0,*)'shw ', shw , ' xy ',xy,' plus_send_start ',plus_send_start,' minus_send_start ', minus_send_start
2975 ! write(0,*)' ds, de, ps, pe, nds,nde ',ds, de, ps, pe, nds,nde
2976 ! write(0,*)'sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p '
2977 ! write(0,*)sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p
2979 rsl_comm_iter = went
2980 END FUNCTION rsl_comm_iter
2982 INTEGER FUNCTION wrf_dm_monitor_rank()
2984 wrf_dm_monitor_rank = 0
2986 END FUNCTION wrf_dm_monitor_rank
2988 ! return the global communicator if id <= 0
2989 SUBROUTINE wrf_get_dm_communicator_for_id ( id, communicator )
2990 USE module_dm , ONLY : local_communicator_store, mpi_comm_allcompute
2992 INTEGER , INTENT(IN) :: id
2993 INTEGER , INTENT(OUT) :: communicator
2994 IF ( id .le. 0 ) THEN
2995 communicator = mpi_comm_allcompute
2997 communicator = local_communicator_store(id)
3000 END SUBROUTINE wrf_get_dm_communicator_for_id
3002 SUBROUTINE wrf_get_dm_communicator ( communicator )
3003 USE module_dm , ONLY : local_communicator
3005 INTEGER , INTENT(OUT) :: communicator
3006 communicator = local_communicator
3008 END SUBROUTINE wrf_get_dm_communicator
3010 SUBROUTINE wrf_get_dm_communicator_x ( communicator )
3011 USE module_dm , ONLY : local_communicator_x
3013 INTEGER , INTENT(OUT) :: communicator
3014 communicator = local_communicator_x
3016 END SUBROUTINE wrf_get_dm_communicator_x
3018 SUBROUTINE wrf_get_dm_communicator_y ( communicator )
3019 USE module_dm , ONLY : local_communicator_y
3021 INTEGER , INTENT(OUT) :: communicator
3022 communicator = local_communicator_y
3024 END SUBROUTINE wrf_get_dm_communicator_y
3026 SUBROUTINE wrf_get_dm_iocommunicator ( iocommunicator )
3027 USE module_dm , ONLY : local_iocommunicator
3029 INTEGER , INTENT(OUT) :: iocommunicator
3030 iocommunicator = local_iocommunicator
3032 END SUBROUTINE wrf_get_dm_iocommunicator
3034 SUBROUTINE wrf_set_dm_communicator ( communicator )
3035 USE module_dm , ONLY : local_communicator
3037 INTEGER , INTENT(IN) :: communicator
3038 local_communicator = communicator
3040 END SUBROUTINE wrf_set_dm_communicator
3042 SUBROUTINE wrf_set_dm_iocommunicator ( iocommunicator )
3043 USE module_dm , ONLY : local_iocommunicator
3045 INTEGER , INTENT(IN) :: iocommunicator
3046 local_iocommunicator = iocommunicator
3048 END SUBROUTINE wrf_set_dm_iocommunicator
3050 SUBROUTINE wrf_get_dm_ntasks_x ( retval )
3051 USE module_dm , ONLY : ntasks_x
3053 INTEGER , INTENT(OUT) :: retval
3056 END SUBROUTINE wrf_get_dm_ntasks_x
3058 SUBROUTINE wrf_get_dm_ntasks_y ( retval )
3059 USE module_dm , ONLY : ntasks_y
3061 INTEGER , INTENT(OUT) :: retval
3064 END SUBROUTINE wrf_get_dm_ntasks_y
3067 SUBROUTINE wrf_set_dm_quilt_comm ( communicator )
3068 USE module_dm , ONLY : local_quilt_comm
3070 INTEGER , INTENT(IN) :: communicator
3071 local_quilt_comm = communicator
3073 END SUBROUTINE wrf_set_dm_quilt_comm
3075 SUBROUTINE wrf_get_dm_quilt_comm ( communicator )
3076 USE module_dm , ONLY : local_quilt_comm
3078 INTEGER , INTENT(OUT) :: communicator
3079 communicator = local_quilt_comm
3081 END SUBROUTINE wrf_get_dm_quilt_comm
3084 !!!!!!!!!!!!!!!!!!!!!!! PATCH TO GLOBAL !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
3086 SUBROUTINE wrf_patch_to_global_real (buf,globbuf,domdesc,stagger,ordering,&
3087 DS1,DE1,DS2,DE2,DS3,DE3,&
3088 MS1,ME1,MS2,ME2,MS3,ME3,&
3089 PS1,PE1,PS2,PE2,PS3,PE3 )
3091 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3092 MS1,ME1,MS2,ME2,MS3,ME3,&
3093 PS1,PE1,PS2,PE2,PS3,PE3
3094 CHARACTER *(*) stagger,ordering
3099 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,RWORDSIZE,&
3100 DS1,DE1,DS2,DE2,DS3,DE3,&
3101 MS1,ME1,MS2,ME2,MS3,ME3,&
3102 PS1,PE1,PS2,PE2,PS3,PE3 )
3105 END SUBROUTINE wrf_patch_to_global_real
3107 SUBROUTINE wrf_patch_to_global_double (buf,globbuf,domdesc,stagger,ordering,&
3108 DS1,DE1,DS2,DE2,DS3,DE3,&
3109 MS1,ME1,MS2,ME2,MS3,ME3,&
3110 PS1,PE1,PS2,PE2,PS3,PE3 )
3112 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3113 MS1,ME1,MS2,ME2,MS3,ME3,&
3114 PS1,PE1,PS2,PE2,PS3,PE3
3115 CHARACTER *(*) stagger,ordering
3117 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
3118 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
3119 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
3120 ! since we were not indexing the globbuf and Field arrays it does not matter
3124 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,DWORDSIZE,&
3125 DS1,DE1,DS2,DE2,DS3,DE3,&
3126 MS1,ME1,MS2,ME2,MS3,ME3,&
3127 PS1,PE1,PS2,PE2,PS3,PE3 )
3130 END SUBROUTINE wrf_patch_to_global_double
3133 SUBROUTINE wrf_patch_to_global_integer (buf,globbuf,domdesc,stagger,ordering,&
3134 DS1,DE1,DS2,DE2,DS3,DE3,&
3135 MS1,ME1,MS2,ME2,MS3,ME3,&
3136 PS1,PE1,PS2,PE2,PS3,PE3 )
3138 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3139 MS1,ME1,MS2,ME2,MS3,ME3,&
3140 PS1,PE1,PS2,PE2,PS3,PE3
3141 CHARACTER *(*) stagger,ordering
3146 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,IWORDSIZE,&
3147 DS1,DE1,DS2,DE2,DS3,DE3,&
3148 MS1,ME1,MS2,ME2,MS3,ME3,&
3149 PS1,PE1,PS2,PE2,PS3,PE3 )
3152 END SUBROUTINE wrf_patch_to_global_integer
3155 SUBROUTINE wrf_patch_to_global_logical (buf,globbuf,domdesc,stagger,ordering,&
3156 DS1,DE1,DS2,DE2,DS3,DE3,&
3157 MS1,ME1,MS2,ME2,MS3,ME3,&
3158 PS1,PE1,PS2,PE2,PS3,PE3 )
3160 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3161 MS1,ME1,MS2,ME2,MS3,ME3,&
3162 PS1,PE1,PS2,PE2,PS3,PE3
3163 CHARACTER *(*) stagger,ordering
3168 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,LWORDSIZE,&
3169 DS1,DE1,DS2,DE2,DS3,DE3,&
3170 MS1,ME1,MS2,ME2,MS3,ME3,&
3171 PS1,PE1,PS2,PE2,PS3,PE3 )
3174 END SUBROUTINE wrf_patch_to_global_logical
3177 # define FRSTELEM (1)
3182 SUBROUTINE wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,typesize,&
3183 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
3184 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
3185 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
3186 USE module_driver_constants
3188 USE module_wrf_error, ONLY : wrf_at_debug_level
3189 USE module_dm, ONLY : local_communicator, ntasks
3192 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
3193 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
3194 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
3195 CHARACTER *(*) stagger,ordering
3196 INTEGER domdesc,typesize,ierr
3200 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3201 MS1,ME1,MS2,ME2,MS3,ME3,&
3202 PS1,PE1,PS2,PE2,PS3,PE3
3203 INTEGER ids,ide,jds,jde,kds,kde,&
3204 ims,ime,jms,jme,kms,kme,&
3205 ips,ipe,jps,jpe,kps,kpe
3206 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
3208 INTEGER i, j, k, ndim
3209 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
3210 ! allocated further down, after the D indices are potentially recalculated for staggering
3211 REAL, ALLOCATABLE :: tmpbuf( : )
3212 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
3214 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
3215 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
3216 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
3218 SELECT CASE ( TRIM(ordering) )
3222 ndim = 3 ! where appropriate
3225 SELECT CASE ( TRIM(ordering) )
3227 ! the non-staggered variables come in at one-less than
3228 ! domain dimensions, but code wants full domain spec, so
3229 ! adjust if not staggered
3230 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
3231 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
3232 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
3234 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
3235 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
3236 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
3238 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
3239 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
3240 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
3242 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
3243 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
3244 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
3248 ! moved to here to be after the potential recalculations of D dims
3249 IF ( wrf_dm_on_monitor() ) THEN
3250 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
3252 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
3254 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_patch_to_global_generic')
3256 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
3257 Patch(2,1) = ps2 ; Patch(2,2) = pe2
3258 Patch(3,1) = ps3 ; Patch(3,2) = pe3
3260 IF ( typesize .EQ. RWORDSIZE ) THEN
3261 CALL just_patch_r ( buf , locbuf , size(locbuf)*RWORDSIZE/typesize, &
3262 PS1, PE1, PS2, PE2, PS3, PE3 , &
3263 MS1, ME1, MS2, ME2, MS3, ME3 )
3264 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
3265 CALL just_patch_i ( buf , locbuf , size(locbuf)*RWORDSIZE/typesize, &
3266 PS1, PE1, PS2, PE2, PS3, PE3 , &
3267 MS1, ME1, MS2, ME2, MS3, ME3 )
3268 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
3269 CALL just_patch_d ( buf , locbuf , size(locbuf)*RWORDSIZE/typesize, &
3270 PS1, PE1, PS2, PE2, PS3, PE3 , &
3271 MS1, ME1, MS2, ME2, MS3, ME3 )
3272 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
3273 CALL just_patch_l ( buf , locbuf , size(locbuf)*RWORDSIZE/typesize, &
3274 PS1, PE1, PS2, PE2, PS3, PE3 , &
3275 MS1, ME1, MS2, ME2, MS3, ME3 )
3278 ! defined in external/io_quilt
3279 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
3283 CALL collect_on_comm0 ( local_communicator , typesize , &
3284 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1), &
3285 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) )
3287 ndim = len(TRIM(ordering))
3289 IF ( wrf_at_debug_level(500) ) THEN
3293 IF ( ndim .GE. 2 .AND. wrf_dm_on_monitor() ) THEN
3295 IF ( typesize .EQ. RWORDSIZE ) THEN
3296 CALL patch_2_outbuf_r ( tmpbuf FRSTELEM , globbuf , &
3297 DS1, DE1, DS2, DE2, DS3, DE3 , &
3299 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
3300 CALL patch_2_outbuf_i ( tmpbuf FRSTELEM , globbuf , &
3301 DS1, DE1, DS2, DE2, DS3, DE3 , &
3303 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
3304 CALL patch_2_outbuf_d ( tmpbuf FRSTELEM , globbuf , &
3305 DS1, DE1, DS2, DE2, DS3, DE3 , &
3307 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
3308 CALL patch_2_outbuf_l ( tmpbuf FRSTELEM , globbuf , &
3309 DS1, DE1, DS2, DE2, DS3, DE3 , &
3315 IF ( wrf_at_debug_level(500) ) THEN
3316 CALL end_timing('wrf_patch_to_global_generic')
3318 DEALLOCATE( tmpbuf )
3321 END SUBROUTINE wrf_patch_to_global_generic
3323 SUBROUTINE just_patch_i ( inbuf , outbuf, noutbuf, &
3324 PS1,PE1,PS2,PE2,PS3,PE3, &
3325 MS1,ME1,MS2,ME2,MS3,ME3 )
3327 INTEGER , INTENT(IN) :: noutbuf
3328 INTEGER , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
3329 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3330 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3331 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(IN) :: inbuf
3333 INTEGER :: i,j,k,n , icurs
3338 outbuf( icurs ) = inbuf( i, j, k )
3344 END SUBROUTINE just_patch_i
3346 SUBROUTINE just_patch_r ( inbuf , outbuf, noutbuf, &
3347 PS1,PE1,PS2,PE2,PS3,PE3, &
3348 MS1,ME1,MS2,ME2,MS3,ME3 )
3350 INTEGER , INTENT(IN) :: noutbuf
3351 REAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
3352 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3353 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3354 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
3356 INTEGER :: i,j,k , icurs
3361 outbuf( icurs ) = inbuf( i, j, k )
3367 END SUBROUTINE just_patch_r
3369 SUBROUTINE just_patch_d ( inbuf , outbuf, noutbuf, &
3370 PS1,PE1,PS2,PE2,PS3,PE3, &
3371 MS1,ME1,MS2,ME2,MS3,ME3 )
3373 INTEGER , INTENT(IN) :: noutbuf
3374 DOUBLE PRECISION , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
3375 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3376 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3377 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
3379 INTEGER :: i,j,k,n , icurs
3384 outbuf( icurs ) = inbuf( i, j, k )
3390 END SUBROUTINE just_patch_d
3392 SUBROUTINE just_patch_l ( inbuf , outbuf, noutbuf, &
3393 PS1,PE1,PS2,PE2,PS3,PE3, &
3394 MS1,ME1,MS2,ME2,MS3,ME3 )
3396 INTEGER , INTENT(IN) :: noutbuf
3397 LOGICAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
3398 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3399 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3400 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
3402 INTEGER :: i,j,k,n , icurs
3407 outbuf( icurs ) = inbuf( i, j, k )
3413 END SUBROUTINE just_patch_l
3416 SUBROUTINE patch_2_outbuf_r( inbuf, outbuf, &
3417 DS1,DE1,DS2,DE2,DS3,DE3, &
3419 USE module_dm, ONLY : ntasks
3421 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
3422 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3423 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
3425 INTEGER :: i,j,k,n , icurs
3428 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3429 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3430 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3431 outbuf( i, j, k ) = inbuf( icurs )
3439 END SUBROUTINE patch_2_outbuf_r
3441 SUBROUTINE patch_2_outbuf_i( inbuf, outbuf, &
3442 DS1,DE1,DS2,DE2,DS3,DE3,&
3444 USE module_dm, ONLY : ntasks
3446 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
3447 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3448 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
3450 INTEGER :: i,j,k,n , icurs
3453 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3454 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3455 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3456 outbuf( i, j, k ) = inbuf( icurs )
3463 END SUBROUTINE patch_2_outbuf_i
3465 SUBROUTINE patch_2_outbuf_d( inbuf, outbuf, &
3466 DS1,DE1,DS2,DE2,DS3,DE3,&
3468 USE module_dm, ONLY : ntasks
3470 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
3471 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3472 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
3474 INTEGER :: i,j,k,n , icurs
3477 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3478 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3479 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3480 outbuf( i, j, k ) = inbuf( icurs )
3487 END SUBROUTINE patch_2_outbuf_d
3489 SUBROUTINE patch_2_outbuf_l( inbuf, outbuf, &
3490 DS1,DE1,DS2,DE2,DS3,DE3,&
3492 USE module_dm, ONLY : ntasks
3494 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
3495 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3496 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
3498 INTEGER :: i,j,k,n , icurs
3501 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3502 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3503 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3504 outbuf( i, j, k ) = inbuf( icurs )
3511 END SUBROUTINE patch_2_outbuf_l
3513 !!!!!!!!!!!!!!!!!!!!!!! GLOBAL TO PATCH !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
3515 SUBROUTINE wrf_global_to_patch_real (globbuf,buf,domdesc,stagger,ordering,&
3516 DS1,DE1,DS2,DE2,DS3,DE3,&
3517 MS1,ME1,MS2,ME2,MS3,ME3,&
3518 PS1,PE1,PS2,PE2,PS3,PE3 )
3520 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3521 MS1,ME1,MS2,ME2,MS3,ME3,&
3522 PS1,PE1,PS2,PE2,PS3,PE3
3523 CHARACTER *(*) stagger,ordering
3528 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,RWORDSIZE,&
3529 DS1,DE1,DS2,DE2,DS3,DE3,&
3530 MS1,ME1,MS2,ME2,MS3,ME3,&
3531 PS1,PE1,PS2,PE2,PS3,PE3 )
3533 END SUBROUTINE wrf_global_to_patch_real
3535 SUBROUTINE wrf_global_to_patch_double (globbuf,buf,domdesc,stagger,ordering,&
3536 DS1,DE1,DS2,DE2,DS3,DE3,&
3537 MS1,ME1,MS2,ME2,MS3,ME3,&
3538 PS1,PE1,PS2,PE2,PS3,PE3 )
3540 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3541 MS1,ME1,MS2,ME2,MS3,ME3,&
3542 PS1,PE1,PS2,PE2,PS3,PE3
3543 CHARACTER *(*) stagger,ordering
3545 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
3546 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
3547 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
3548 ! since we were not indexing the globbuf and Field arrays it does not matter
3552 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,DWORDSIZE,&
3553 DS1,DE1,DS2,DE2,DS3,DE3,&
3554 MS1,ME1,MS2,ME2,MS3,ME3,&
3555 PS1,PE1,PS2,PE2,PS3,PE3 )
3557 END SUBROUTINE wrf_global_to_patch_double
3560 SUBROUTINE wrf_global_to_patch_integer (globbuf,buf,domdesc,stagger,ordering,&
3561 DS1,DE1,DS2,DE2,DS3,DE3,&
3562 MS1,ME1,MS2,ME2,MS3,ME3,&
3563 PS1,PE1,PS2,PE2,PS3,PE3 )
3565 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3566 MS1,ME1,MS2,ME2,MS3,ME3,&
3567 PS1,PE1,PS2,PE2,PS3,PE3
3568 CHARACTER *(*) stagger,ordering
3573 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,IWORDSIZE,&
3574 DS1,DE1,DS2,DE2,DS3,DE3,&
3575 MS1,ME1,MS2,ME2,MS3,ME3,&
3576 PS1,PE1,PS2,PE2,PS3,PE3 )
3578 END SUBROUTINE wrf_global_to_patch_integer
3580 SUBROUTINE wrf_global_to_patch_logical (globbuf,buf,domdesc,stagger,ordering,&
3581 DS1,DE1,DS2,DE2,DS3,DE3,&
3582 MS1,ME1,MS2,ME2,MS3,ME3,&
3583 PS1,PE1,PS2,PE2,PS3,PE3 )
3585 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3586 MS1,ME1,MS2,ME2,MS3,ME3,&
3587 PS1,PE1,PS2,PE2,PS3,PE3
3588 CHARACTER *(*) stagger,ordering
3593 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,LWORDSIZE,&
3594 DS1,DE1,DS2,DE2,DS3,DE3,&
3595 MS1,ME1,MS2,ME2,MS3,ME3,&
3596 PS1,PE1,PS2,PE2,PS3,PE3 )
3598 END SUBROUTINE wrf_global_to_patch_logical
3600 SUBROUTINE wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,typesize,&
3601 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
3602 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
3603 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
3604 USE module_dm, ONLY : local_communicator, ntasks
3605 USE module_driver_constants
3607 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
3608 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
3609 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
3610 CHARACTER *(*) stagger,ordering
3611 INTEGER domdesc,typesize,ierr
3615 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3616 MS1,ME1,MS2,ME2,MS3,ME3,&
3617 PS1,PE1,PS2,PE2,PS3,PE3
3618 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
3620 INTEGER i,j,k,ord,ord2d,ndim
3621 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
3622 REAL, ALLOCATABLE :: tmpbuf( : )
3623 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
3625 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
3626 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
3627 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
3629 SELECT CASE ( TRIM(ordering) )
3633 ndim = 3 ! where appropriate
3636 SELECT CASE ( TRIM(ordering) )
3638 ! the non-staggered variables come in at one-less than
3639 ! domain dimensions, but code wants full domain spec, so
3640 ! adjust if not staggered
3641 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
3642 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
3643 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
3645 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
3646 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
3647 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
3649 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
3650 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
3651 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
3653 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
3654 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
3655 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
3659 ! moved to here to be after the potential recalculations of D dims
3660 IF ( wrf_dm_on_monitor() ) THEN
3661 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
3663 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
3665 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_global_to_patch_generic')
3667 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
3668 Patch(2,1) = ps2 ; Patch(2,2) = pe2
3669 Patch(3,1) = ps3 ; Patch(3,2) = pe3
3671 ! defined in external/io_quilt
3672 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
3675 ndim = len(TRIM(ordering))
3677 IF ( wrf_dm_on_monitor() .AND. ndim .GE. 2 ) THEN
3678 IF ( typesize .EQ. RWORDSIZE ) THEN
3679 CALL outbuf_2_patch_r ( globbuf , tmpbuf FRSTELEM , &
3680 DS1, DE1, DS2, DE2, DS3, DE3 , &
3681 MS1, ME1, MS2, ME2, MS3, ME3 , &
3683 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
3684 CALL outbuf_2_patch_i ( globbuf , tmpbuf FRSTELEM , &
3685 DS1, DE1, DS2, DE2, DS3, DE3 , &
3687 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
3688 CALL outbuf_2_patch_d ( globbuf , tmpbuf FRSTELEM , &
3689 DS1, DE1, DS2, DE2, DS3, DE3 , &
3691 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
3692 CALL outbuf_2_patch_l ( globbuf , tmpbuf FRSTELEM , &
3693 DS1, DE1, DS2, DE2, DS3, DE3 , &
3698 CALL dist_on_comm0 ( local_communicator , typesize , &
3699 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) , &
3700 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1) )
3702 IF ( typesize .EQ. RWORDSIZE ) THEN
3703 CALL all_sub_r ( locbuf , buf , &
3704 PS1, PE1, PS2, PE2, PS3, PE3 , &
3705 MS1, ME1, MS2, ME2, MS3, ME3 )
3707 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
3708 CALL all_sub_i ( locbuf , buf , &
3709 PS1, PE1, PS2, PE2, PS3, PE3 , &
3710 MS1, ME1, MS2, ME2, MS3, ME3 )
3711 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
3712 CALL all_sub_d ( locbuf , buf , &
3713 PS1, PE1, PS2, PE2, PS3, PE3 , &
3714 MS1, ME1, MS2, ME2, MS3, ME3 )
3715 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
3716 CALL all_sub_l ( locbuf , buf , &
3717 PS1, PE1, PS2, PE2, PS3, PE3 , &
3718 MS1, ME1, MS2, ME2, MS3, ME3 )
3722 DEALLOCATE ( tmpbuf )
3725 END SUBROUTINE wrf_global_to_patch_generic
3727 SUBROUTINE all_sub_i ( inbuf , outbuf, &
3728 PS1,PE1,PS2,PE2,PS3,PE3, &
3729 MS1,ME1,MS2,ME2,MS3,ME3 )
3731 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
3732 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3733 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3734 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
3736 INTEGER :: i,j,k,n , icurs
3741 outbuf( i, j, k ) = inbuf ( icurs )
3747 END SUBROUTINE all_sub_i
3749 SUBROUTINE all_sub_r ( inbuf , outbuf, &
3750 PS1,PE1,PS2,PE2,PS3,PE3, &
3751 MS1,ME1,MS2,ME2,MS3,ME3 )
3753 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
3754 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3755 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3756 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
3758 INTEGER :: i,j,k,n , icurs
3763 outbuf( i, j, k ) = inbuf ( icurs )
3770 END SUBROUTINE all_sub_r
3772 SUBROUTINE all_sub_d ( inbuf , outbuf, &
3773 PS1,PE1,PS2,PE2,PS3,PE3, &
3774 MS1,ME1,MS2,ME2,MS3,ME3 )
3776 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
3777 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3778 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3779 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
3781 INTEGER :: i,j,k,n , icurs
3786 outbuf( i, j, k ) = inbuf ( icurs )
3792 END SUBROUTINE all_sub_d
3794 SUBROUTINE all_sub_l ( inbuf , outbuf, &
3795 PS1,PE1,PS2,PE2,PS3,PE3, &
3796 MS1,ME1,MS2,ME2,MS3,ME3 )
3798 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
3799 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3800 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3801 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
3803 INTEGER :: i,j,k,n , icurs
3808 outbuf( i, j, k ) = inbuf ( icurs )
3814 END SUBROUTINE all_sub_l
3816 SUBROUTINE outbuf_2_patch_r( inbuf, outbuf, &
3817 DS1,DE1,DS2,DE2,DS3,DE3, &
3818 MS1, ME1, MS2, ME2, MS3, ME3 , &
3820 USE module_dm, ONLY : ntasks
3822 REAL , DIMENSION(*) , INTENT(OUT) :: outbuf
3823 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3824 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3825 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
3827 INTEGER :: i,j,k,n , icurs
3831 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3832 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3833 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3834 outbuf( icurs ) = inbuf( i,j,k )
3841 END SUBROUTINE outbuf_2_patch_r
3843 SUBROUTINE outbuf_2_patch_i( inbuf, outbuf, &
3844 DS1,DE1,DS2,DE2,DS3,DE3,&
3846 USE module_dm, ONLY : ntasks
3848 INTEGER , DIMENSION(*) , INTENT(OUT) :: outbuf
3849 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3850 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
3852 INTEGER :: i,j,k,n , icurs
3855 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3856 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3857 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3858 outbuf( icurs ) = inbuf( i,j,k )
3865 END SUBROUTINE outbuf_2_patch_i
3867 SUBROUTINE outbuf_2_patch_d( inbuf, outbuf, &
3868 DS1,DE1,DS2,DE2,DS3,DE3,&
3870 USE module_dm, ONLY : ntasks
3872 DOUBLE PRECISION , DIMENSION(*) , INTENT(OUT) :: outbuf
3873 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3874 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
3876 INTEGER :: i,j,k,n , icurs
3879 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3880 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3881 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3882 outbuf( icurs ) = inbuf( i,j,k )
3889 END SUBROUTINE outbuf_2_patch_d
3891 SUBROUTINE outbuf_2_patch_l( inbuf, outbuf, &
3892 DS1,DE1,DS2,DE2,DS3,DE3,&
3894 USE module_dm, ONLY : ntasks
3896 LOGICAL , DIMENSION(*) , INTENT(OUT) :: outbuf
3897 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3898 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
3900 INTEGER :: i,j,k,n , icurs
3903 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3904 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3905 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3906 outbuf( icurs ) = inbuf( i,j,k )
3913 END SUBROUTINE outbuf_2_patch_l
3916 SUBROUTINE wrf_dm_nestexchange_init
3917 CALL rsl_lite_nesting_reset
3918 END SUBROUTINE wrf_dm_nestexchange_init
3921 SUBROUTINE wrf_gatherv_real (Field, field_ofst, &
3922 my_count , & ! sendcount
3923 globbuf, glob_ofst , & ! recvbuf
3924 counts , & ! recvcounts
3927 communicator , & ! communicator
3929 USE module_dm, ONLY : getrealmpitype
3931 INTEGER field_ofst, glob_ofst
3932 INTEGER my_count, communicator, root, ierr
3933 INTEGER , DIMENSION(*) :: counts, displs
3934 REAL, DIMENSION(*) :: Field, globbuf
3938 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
3939 my_count , & ! sendcount
3940 getrealmpitype() , & ! sendtype
3941 globbuf( glob_ofst ) , & ! recvbuf
3942 counts , & ! recvcounts
3944 getrealmpitype() , & ! recvtype
3946 communicator , & ! communicator
3950 END SUBROUTINE wrf_gatherv_real
3952 SUBROUTINE wrf_gatherv_double (Field, field_ofst, &
3953 my_count , & ! sendcount
3954 globbuf, glob_ofst , & ! recvbuf
3955 counts , & ! recvcounts
3958 communicator , & ! communicator
3962 INTEGER field_ofst, glob_ofst
3963 INTEGER my_count, communicator, root, ierr
3964 INTEGER , DIMENSION(*) :: counts, displs
3965 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
3966 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
3967 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
3968 ! if we were not indexing the globbuf and Field arrays it would not even matter
3969 REAL, DIMENSION(*) :: Field, globbuf
3973 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
3974 my_count , & ! sendcount
3975 MPI_DOUBLE_PRECISION , & ! sendtype
3976 globbuf( glob_ofst ) , & ! recvbuf
3977 counts , & ! recvcounts
3979 MPI_DOUBLE_PRECISION , & ! recvtype
3981 communicator , & ! communicator
3985 END SUBROUTINE wrf_gatherv_double
3987 SUBROUTINE wrf_gatherv_integer (Field, field_ofst, &
3988 my_count , & ! sendcount
3989 globbuf, glob_ofst , & ! recvbuf
3990 counts , & ! recvcounts
3993 communicator , & ! communicator
3996 INTEGER field_ofst, glob_ofst
3997 INTEGER my_count, communicator, root, ierr
3998 INTEGER , DIMENSION(*) :: counts, displs
3999 INTEGER, DIMENSION(*) :: Field, globbuf
4003 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
4004 my_count , & ! sendcount
4005 MPI_INTEGER , & ! sendtype
4006 globbuf( glob_ofst ) , & ! recvbuf
4007 counts , & ! recvcounts
4009 MPI_INTEGER , & ! recvtype
4011 communicator , & ! communicator
4015 END SUBROUTINE wrf_gatherv_integer
4018 SUBROUTINE wrf_scatterv_real ( &
4019 globbuf, glob_ofst , & ! recvbuf
4020 counts , & ! recvcounts
4021 Field, field_ofst, &
4022 my_count , & ! sendcount
4025 communicator , & ! communicator
4027 USE module_dm, ONLY : getrealmpitype
4029 INTEGER field_ofst, glob_ofst
4030 INTEGER my_count, communicator, root, ierr
4031 INTEGER , DIMENSION(*) :: counts, displs
4032 REAL, DIMENSION(*) :: Field, globbuf
4036 CALL mpi_scatterv( &
4037 globbuf( glob_ofst ) , & ! recvbuf
4038 counts , & ! recvcounts
4040 getrealmpitype() , & ! recvtype
4041 Field( field_ofst ), & ! sendbuf
4042 my_count , & ! sendcount
4043 getrealmpitype() , & ! sendtype
4045 communicator , & ! communicator
4049 END SUBROUTINE wrf_scatterv_real
4051 SUBROUTINE wrf_scatterv_double ( &
4052 globbuf, glob_ofst , & ! recvbuf
4053 counts , & ! recvcounts
4054 Field, field_ofst, &
4055 my_count , & ! sendcount
4058 communicator , & ! communicator
4061 INTEGER field_ofst, glob_ofst
4062 INTEGER my_count, communicator, root, ierr
4063 INTEGER , DIMENSION(*) :: counts, displs
4064 REAL, DIMENSION(*) :: Field, globbuf
4067 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
4068 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
4069 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
4070 ! if we were not indexing the globbuf and Field arrays it would not even matter
4072 CALL mpi_scatterv( &
4073 globbuf( glob_ofst ) , & ! recvbuf
4074 counts , & ! recvcounts
4076 MPI_DOUBLE_PRECISION , & ! recvtype
4077 Field( field_ofst ), & ! sendbuf
4078 my_count , & ! sendcount
4079 MPI_DOUBLE_PRECISION , & ! sendtype
4081 communicator , & ! communicator
4085 END SUBROUTINE wrf_scatterv_double
4087 SUBROUTINE wrf_scatterv_integer ( &
4088 globbuf, glob_ofst , & ! recvbuf
4089 counts , & ! recvcounts
4090 Field, field_ofst, &
4091 my_count , & ! sendcount
4094 communicator , & ! communicator
4097 INTEGER field_ofst, glob_ofst
4098 INTEGER my_count, communicator, root, ierr
4099 INTEGER , DIMENSION(*) :: counts, displs
4100 INTEGER, DIMENSION(*) :: Field, globbuf
4104 CALL mpi_scatterv( &
4105 globbuf( glob_ofst ) , & ! recvbuf
4106 counts , & ! recvcounts
4108 MPI_INTEGER , & ! recvtype
4109 Field( field_ofst ), & ! sendbuf
4110 my_count , & ! sendcount
4111 MPI_INTEGER , & ! sendtype
4113 communicator , & ! communicator
4117 END SUBROUTINE wrf_scatterv_integer
4118 ! end new stuff 20070124
4120 SUBROUTINE wrf_dm_gatherv ( v, elemsize , km_s, km_e, wordsz )
4122 INTEGER elemsize, km_s, km_e, wordsz
4124 IF ( wordsz .EQ. DWORDSIZE ) THEN
4125 CALL wrf_dm_gatherv_double(v, elemsize , km_s, km_e)
4127 CALL wrf_dm_gatherv_single(v, elemsize , km_s, km_e)
4129 END SUBROUTINE wrf_dm_gatherv
4131 SUBROUTINE wrf_dm_gatherv_double ( v, elemsize , km_s, km_e )
4133 INTEGER elemsize, km_s, km_e
4136 # ifndef USE_MPI_IN_PLACE
4137 REAL*8 v_local((km_e-km_s+1)*elemsize)
4139 INTEGER, DIMENSION(:), ALLOCATABLE :: recvcounts, displs
4140 INTEGER send_type, myproc, nproc, local_comm, ierr, i
4142 send_type = MPI_DOUBLE_PRECISION
4143 CALL wrf_get_dm_communicator ( local_comm )
4144 CALL wrf_get_nproc( nproc )
4145 CALL wrf_get_myproc( myproc )
4146 ALLOCATE( recvcounts(nproc), displs(nproc) )
4147 i = (km_e-km_s+1)*elemsize
4148 CALL mpi_allgather( i,1,MPI_INTEGER,recvcounts,1,MPI_INTEGER,local_comm,ierr) ;
4150 CALL mpi_allgather( i,1,MPI_INTEGER,displs,1,MPI_INTEGER,local_comm,ierr) ;
4151 # ifdef USE_MPI_IN_PLACE
4152 CALL mpi_allgatherv( MPI_IN_PLACE, &
4154 DO i = 1,elemsize*(km_e-km_s+1)
4155 v_local(i) = v(i+elemsize*km_s-1)
4157 CALL mpi_allgatherv( v_local, &
4159 (km_e-km_s+1)*elemsize, &
4167 DEALLOCATE(recvcounts)
4171 END SUBROUTINE wrf_dm_gatherv_double
4173 SUBROUTINE wrf_dm_gatherv_single ( v, elemsize , km_s, km_e )
4175 INTEGER elemsize, km_s, km_e
4178 # ifndef USE_MPI_IN_PLACE
4179 REAL*4 v_local((km_e-km_s+1)*elemsize)
4181 INTEGER, DIMENSION(:), ALLOCATABLE :: recvcounts, displs
4182 INTEGER send_type, myproc, nproc, local_comm, ierr, i
4184 send_type = MPI_REAL
4185 CALL wrf_get_dm_communicator ( local_comm )
4186 CALL wrf_get_nproc( nproc )
4187 CALL wrf_get_myproc( myproc )
4188 ALLOCATE( recvcounts(nproc), displs(nproc) )
4189 i = (km_e-km_s+1)*elemsize
4190 CALL mpi_allgather( i,1,MPI_INTEGER,recvcounts,1,MPI_INTEGER,local_comm,ierr) ;
4192 CALL mpi_allgather( i,1,MPI_INTEGER,displs,1,MPI_INTEGER,local_comm,ierr) ;
4193 # ifdef USE_MPI_IN_PLACE
4194 CALL mpi_allgatherv( MPI_IN_PLACE, &
4196 DO i = 1,elemsize*(km_e-km_s+1)
4197 v_local(i) = v(i+elemsize*km_s-1)
4199 CALL mpi_allgatherv( v_local, &
4201 (km_e-km_s+1)*elemsize, &
4209 DEALLOCATE(recvcounts)
4213 END SUBROUTINE wrf_dm_gatherv_single
4215 SUBROUTINE wrf_dm_decomp1d( nt, km_s, km_e )
4217 INTEGER, INTENT(IN) :: nt
4218 INTEGER, INTENT(OUT) :: km_s, km_e
4220 INTEGER nn, nnp, na, nb
4221 INTEGER myproc, nproc
4223 CALL wrf_get_myproc(myproc)
4224 CALL wrf_get_nproc(nproc)
4225 nn = nt / nproc ! min number done by this task
4227 if ( myproc .lt. mod( nt, nproc ) ) nnp = nnp + 1 ! distribute remainder
4229 na = min( myproc, mod(nt,nproc) ) ! Number of blocks with remainder that precede this one
4230 nb = max( 0, myproc - na ) ! number of blocks without a remainder that precede this one
4231 km_s = na * ( nn+1) + nb * nn ! starting iteration for this task
4232 km_e = km_s + nnp - 1 ! ending iteration for this task
4233 END SUBROUTINE wrf_dm_decomp1d
4236 SUBROUTINE wrf_dm_define_comms ( grid )
4237 USE module_domain, ONLY : domain
4239 TYPE(domain) , INTENT (INOUT) :: grid
4241 END SUBROUTINE wrf_dm_define_comms
4243 SUBROUTINE tfp_message( fname, lno )
4248 WRITE(mess,*)'tfp_message: ',trim(fname),lno
4249 CALL wrf_message(mess)
4250 # ifdef ALLOW_OVERDECOMP
4251 CALL task_for_point_message ! defined in RSL_LITE/task_for_point.c
4253 CALL wrf_error_fatal(mess)
4256 END SUBROUTINE tfp_message
4258 SUBROUTINE set_dm_debug
4259 USE module_dm, ONLY : dm_debug_flag
4261 dm_debug_flag = .TRUE.
4262 END SUBROUTINE set_dm_debug
4263 SUBROUTINE reset_dm_debug
4264 USE module_dm, ONLY : dm_debug_flag
4266 dm_debug_flag = .FALSE.
4267 END SUBROUTINE reset_dm_debug
4268 SUBROUTINE get_dm_debug ( arg )
4269 USE module_dm, ONLY : dm_debug_flag
4273 END SUBROUTINE get_dm_debug