1 #define NEST_FULL_INFLUENCE(A,B) A=B
6 USE module_driver_constants
9 USE module_cpl, ONLY : coupler_on, cpl_init
16 INTEGER, PARAMETER :: MPI_UNDEFINED = -1
19 INTEGER, PARAMETER :: max_halo_width = 6 ! 5
21 INTEGER :: ips_save, ipe_save, jps_save, jpe_save, itrace
22 INTEGER :: lats_to_mic, minx, miny
24 INTEGER :: communicator_stack_cursor = 0
25 INTEGER :: current_id = 1
26 INTEGER, DIMENSION(max_domains) :: ntasks_stack, ntasks_y_stack &
27 , ntasks_x_stack, mytask_stack &
28 , mytask_x_stack, mytask_y_stack &
30 INTEGER, DIMENSION(max_domains) :: ntasks_store, ntasks_y_store &
31 , ntasks_x_store, mytask_store &
32 , mytask_x_store, mytask_y_store
33 INTEGER ntasks, ntasks_y, ntasks_x, mytask, mytask_x, mytask_y
35 INTEGER, DIMENSION(max_domains) :: local_communicator_stack, local_communicator_periodic_stack &
36 ,local_iocommunicator_stack &
37 ,local_communicator_x_stack, local_communicator_y_stack
38 INTEGER, DIMENSION(max_domains) :: local_communicator_store, local_communicator_periodic_store &
39 ,local_iocommunicator_store &
40 ,local_communicator_x_store, local_communicator_y_store
42 INTEGER :: mpi_comm_allcompute = MPI_UNDEFINED
43 INTEGER :: local_communicator = MPI_UNDEFINED
44 INTEGER :: local_communicator_periodic = MPI_UNDEFINED
45 INTEGER :: local_iocommunicator = MPI_UNDEFINED
46 INTEGER :: local_communicator_x = MPI_UNDEFINED
47 INTEGER :: local_communicator_y = MPI_UNDEFINED ! subcommunicators for rows and cols of mesh
48 INTEGER :: local_quilt_comm = MPI_UNDEFINED ! added 20151212 jm
49 LOGICAL :: dm_debug_flag = .FALSE.
50 ! for parallel nesting, 201408, jm
51 INTEGER intercomm_to_mom( max_domains ), intercomm_to_kid( max_nests, max_domains )
52 INTEGER mpi_comm_to_mom( max_domains ), mpi_comm_to_kid( max_nests, max_domains )
53 INTEGER which_kid(max_domains), nkids(max_domains)
54 INTEGER nest_task_offsets(max_domains)
55 LOGICAL intercomm_active( max_domains )
56 LOGICAL domain_active_this_task( max_domains )
57 ! see comments below (search for "Communicator definition")
58 INTEGER tasks_per_split
59 INTEGER comm_start(max_domains) ! set in dm_task_split
60 ! INTEGER comm_pes (max_domains) ! either this may be set in dm_task_split
61 ! INTEGER comm_pes_x(max_domains) ! or these may be set in dm_task_split
62 ! INTEGER comm_pes_y(max_domains) ! " " may be set in dm_task_split
63 ! INTEGER comm_domain(max_domains) ! set in dm_task_split
64 INTEGER nest_pes_x(max_domains) ! set in dm_task_split
65 INTEGER nest_pes_y(max_domains) ! set in dm_task_split
66 INTEGER comms_i_am_in (max_domains) ! list of local communicators this task is a member of
67 INTEGER loc_comm(max_domains)
69 INTEGER nio_tasks_per_group(max_domains), nio_groups, num_io_tasks
70 NAMELIST /dm_task_split/ tasks_per_split, comm_start, nest_pes_x, nest_pes_y
71 NAMELIST /namelist_quilt/ nio_tasks_per_group, nio_groups, poll_servers
75 integer :: c_ipsy, c_ipey, c_kpsy, c_kpey, c_kpsx, c_kpex, c_ipex, c_ipsx, c_jpex, c_jpsx, c_jpey, c_jpsy
76 integer :: c_imsy, c_imey, c_kmsy, c_kmey, c_kmsx, c_kmex, c_imex, c_imsx, c_jmex, c_jmsx, c_jmey, c_jmsy
80 INTERFACE wrf_dm_maxval
81 #if ( defined(PROMOTE_FLOAT) || ( RWORDSIZE == DWORDSIZE ) )
82 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer
84 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer, wrf_dm_maxval_doubleprecision
88 INTERFACE wrf_dm_minval ! gopal's doing
89 #if ( defined(PROMOTE_FLOAT) || ( RWORDSIZE == DWORDSIZE ) )
90 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer
92 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer, wrf_dm_minval_doubleprecision
98 SUBROUTINE MPASPECT( P, MINM, MINN, PROCMIN_M, PROCMIN_N )
100 INTEGER P, M, N, MINI, MINM, MINN, PROCMIN_M, PROCMIN_N
105 IF ( MOD( P, M ) .EQ. 0 ) THEN
107 IF ( ABS(M-N) .LT. MINI &
108 .AND. M .GE. PROCMIN_M &
109 .AND. N .GE. PROCMIN_N &
117 IF ( MINM .LT. PROCMIN_M .OR. MINN .LT. PROCMIN_N ) THEN
118 WRITE( wrf_err_message , * )'MPASPECT: UNABLE TO GENERATE PROCESSOR MESH. STOPPING.'
119 CALL wrf_message ( TRIM ( wrf_err_message ) )
120 WRITE( wrf_err_message , * )' PROCMIN_M ', PROCMIN_M
121 CALL wrf_message ( TRIM ( wrf_err_message ) )
122 WRITE( wrf_err_message , * )' PROCMIN_N ', PROCMIN_N
123 CALL wrf_message ( TRIM ( wrf_err_message ) )
124 WRITE( wrf_err_message , * )' P ', P
125 CALL wrf_message ( TRIM ( wrf_err_message ) )
126 WRITE( wrf_err_message , * )' MINM ', MINM
127 CALL wrf_message ( TRIM ( wrf_err_message ) )
128 WRITE( wrf_err_message , * )' MINN ', MINN
129 CALL wrf_message ( TRIM ( wrf_err_message ) )
130 CALL wrf_error_fatal ( 'module_dm: mpaspect' )
133 END SUBROUTINE MPASPECT
135 SUBROUTINE compute_mesh( ntasks , ntasks_x, ntasks_y )
137 INTEGER, INTENT(IN) :: ntasks
138 INTEGER, INTENT(OUT) :: ntasks_x, ntasks_y
140 CALL nl_get_nproc_x ( 1, ntasks_x )
141 CALL nl_get_nproc_y ( 1, ntasks_y )
142 CALL nl_get_lats_to_mic ( 1, lats_to_mic )
143 ! check if user has specified in the namelist
144 IF ( ntasks_x .GT. 0 .OR. ntasks_y .GT. 0 ) THEN
145 ! if only ntasks_x is specified then make it 1-d decomp in i
146 IF ( ntasks_x .GT. 0 .AND. ntasks_y .EQ. -1 ) THEN
147 ntasks_y = ntasks / ntasks_x
148 ! if only ntasks_y is specified then make it 1-d decomp in j
149 ELSE IF ( ntasks_x .EQ. -1 .AND. ntasks_y .GT. 0 ) THEN
150 ntasks_x = ntasks / ntasks_y
152 ! make sure user knows what they're doing
153 IF ( ntasks_x * ntasks_y .NE. ntasks ) THEN
154 WRITE( wrf_err_message , * )'WRF_DM_INITIALIZE (RSL_LITE): nproc_x * nproc_y in namelist ne ',ntasks
155 CALL wrf_error_fatal ( wrf_err_message )
157 ELSE IF ( lats_to_mic .GT. 0 ) THEN
158 ntasks_x = ntasks / 2
160 IF ( ntasks_x * ntasks_y .NE. ntasks ) THEN
161 WRITE( wrf_err_message , * )&
162 'WRF_DM_INITIALIZE (lats_to_mic > 0) nproc_x (',ntasks_x,')* nproc_y (',ntasks_y,&
163 ') in namelist ne ',ntasks
164 CALL wrf_error_fatal ( wrf_err_message )
167 ! When neither is specified, work out mesh with MPASPECT
168 ! Pass nproc_ln and nproc_nt so that number of procs in
169 ! i-dim (nproc_ln) is equal or lesser.
170 CALL mpaspect ( ntasks, ntasks_x, ntasks_y, 1, 1 )
172 ntasks_store(1) = ntasks
173 ntasks_x_store(1) = ntasks_x
174 ntasks_y_store(1) = ntasks_y
175 END SUBROUTINE compute_mesh
177 SUBROUTINE wrf_dm_initialize
180 INTEGER :: local_comm_per, local_comm_x, local_comm_y, local_comm2, new_local_comm, group, newgroup, p, p1, ierr,itmp
181 INTEGER, ALLOCATABLE, DIMENSION(:) :: ranks
183 INTEGER, DIMENSION(2) :: dims, coords
184 LOGICAL, DIMENSION(2) :: isperiodic
185 LOGICAL :: reorder_mesh
187 CALL instate_communicators_for_domain(1)
189 CALL wrf_get_dm_communicator ( new_local_comm )
190 dims(1) = nest_pes_y(1) ! rows
191 dims(2) = nest_pes_x(1) ! columns
192 isperiodic(1) = .true.
193 isperiodic(2) = .true.
194 CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_comm_per, ierr )
195 local_communicator_periodic_store(1) = local_comm_per
196 ! set all the domains' periodic communicators to this one <- kludge, 20151223, splitting domains won't work for period bc's
197 local_communicator_periodic_store = local_comm_per
198 local_communicator_periodic = local_comm_per
209 intercomm_active = .TRUE.
210 domain_active_this_task = .TRUE.
212 CALL nl_set_nproc_x ( 1, ntasks_x )
213 CALL nl_set_nproc_y ( 1, ntasks_y )
214 WRITE( wrf_err_message , * )'Ntasks in X ',ntasks_x,', ntasks in Y ',ntasks_y
215 CALL wrf_message( wrf_err_message )
216 #if ( defined( DM_PARALLEL ) && ( ! defined( STUBMPI ) ) )
217 CALL MPI_BARRIER( local_communicator, ierr )
220 END SUBROUTINE wrf_dm_initialize
222 SUBROUTINE get_dm_max_halo_width( id, width )
224 INTEGER, INTENT(IN) :: id
225 INTEGER, INTENT(OUT) :: width
226 IF ( id .EQ. 1 ) THEN ! this is coarse domain
227 width = max_halo_width
229 width = max_halo_width + 3
232 END SUBROUTINE get_dm_max_halo_width
234 SUBROUTINE patch_domain_rsl_lite( id , parent, parent_id, &
235 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
236 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
237 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
238 sp1x , ep1x , sm1x , em1x , &
239 sp2x , ep2x , sm2x , em2x , &
240 sp3x , ep3x , sm3x , em3x , &
241 sp1y , ep1y , sm1y , em1y , &
242 sp2y , ep2y , sm2y , em2y , &
243 sp3y , ep3y , sm3y , em3y , &
246 #if ( ( defined(SGIALTIX) || defined(FUJITSU_FX10) || defined(KEEP_INT_AROUND) ) && (! defined(MOVE_NESTS) ) )
247 USE module_domain, ONLY : domain, head_grid, find_grid_by_id, alloc_space_field
249 USE module_domain, ONLY : domain, head_grid, find_grid_by_id
253 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
254 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
255 sm1 , em1 , sm2 , em2 , sm3 , em3
256 INTEGER, INTENT(OUT) :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
257 sm1x , em1x , sm2x , em2x , sm3x , em3x
258 INTEGER, INTENT(OUT) :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
259 sm1y , em1y , sm2y , em2y , sm3y , em3y
260 INTEGER, INTENT(IN) :: id, parent_id
261 TYPE(domain),POINTER :: parent
264 INTEGER :: ids, ide, jds, jde, kds, kde
265 INTEGER :: ims, ime, jms, jme, kms, kme
266 INTEGER :: ips, ipe, jps, jpe, kps, kpe
267 INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex
268 INTEGER :: ipsx, ipex, jpsx, jpex, kpsx, kpex
269 INTEGER :: imsy, imey, jmsy, jmey, kmsy, kmey
270 INTEGER :: ipsy, ipey, jpsy, jpey, kpsy, kpey
272 INTEGER :: c_sd1 , c_ed1 , c_sd2 , c_ed2 , c_sd3 , c_ed3
273 INTEGER :: c_sp1 , c_ep1 , c_sp2 , c_ep2 , c_sp3 , c_ep3 , &
274 c_sm1 , c_em1 , c_sm2 , c_em2 , c_sm3 , c_em3
275 INTEGER :: c_sp1x , c_ep1x , c_sp2x , c_ep2x , c_sp3x , c_ep3x , &
276 c_sm1x , c_em1x , c_sm2x , c_em2x , c_sm3x , c_em3x
277 INTEGER :: c_sp1y , c_ep1y , c_sp2y , c_ep2y , c_sp3y , c_ep3y , &
278 c_sm1y , c_em1y , c_sm2y , c_em2y , c_sm3y , c_em3y
280 INTEGER :: c_ids, c_ide, c_jds, c_jde, c_kds, c_kde
281 INTEGER :: c_ims, c_ime, c_jms, c_jme, c_kms, c_kme
282 INTEGER :: c_ips, c_ipe, c_jps, c_jpe, c_kps, c_kpe
284 INTEGER :: idim , jdim , kdim , rem , a, b
285 INTEGER :: i, j, ni, nj, Px, Py, P
287 INTEGER :: parent_grid_ratio, i_parent_start, j_parent_start
289 INTEGER :: idim_cd, jdim_cd, ierr
293 INTEGER :: e_we, e_sn
296 TYPE(domain), POINTER :: intermediate_grid
297 TYPE(domain), POINTER :: nest_grid
298 CHARACTER*256 :: mess
300 INTEGER parent_max_halo_width
301 INTEGER thisdomain_max_halo_width
305 CALL nl_get_lats_to_mic( 1, lats_to_mic )
306 IF ( lats_to_mic .GT. 0 ) THEN
307 minx = -99 ! code to task_for_point to do split decomposition over MIC and host
308 miny = lats_to_mic ! number of latitudes that should be assigned to MIC
316 SELECT CASE ( model_data_order )
317 ! need to finish other cases
318 CASE ( DATA_ORDER_ZXY )
319 ids = sd2 ; ide = ed2
320 jds = sd3 ; jde = ed3
321 kds = sd1 ; kde = ed1
322 CASE ( DATA_ORDER_XYZ )
323 ids = sd1 ; ide = ed1
324 jds = sd2 ; jde = ed2
325 kds = sd3 ; kde = ed3
326 CASE ( DATA_ORDER_XZY )
327 ids = sd1 ; ide = ed1
328 jds = sd3 ; jde = ed3
329 kds = sd2 ; kde = ed2
330 CASE ( DATA_ORDER_YXZ)
331 ids = sd2 ; ide = ed2
332 jds = sd1 ; jde = ed1
333 kds = sd3 ; kde = ed3
336 CALL nl_get_max_dom( 1 , max_dom )
338 CALL get_dm_max_halo_width( id , thisdomain_max_halo_width )
339 IF ( id .GT. 1 ) THEN
340 CALL get_dm_max_halo_width( parent%id , parent_max_halo_width )
343 CALL compute_memory_dims_rsl_lite ( id, thisdomain_max_halo_width, 0 , bdx, bdy, &
344 ids, ide, jds, jde, kds, kde, &
345 ims, ime, jms, jme, kms, kme, &
346 imsx, imex, jmsx, jmex, kmsx, kmex, &
347 imsy, imey, jmsy, jmey, kmsy, kmey, &
348 ips, ipe, jps, jpe, kps, kpe, &
349 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
350 ipsy, ipey, jpsy, jpey, kpsy, kpey )
352 ! ensure that the every parent domain point has a full set of nested points under it
353 ! even at the borders. Do this by making sure the number of nest points is a multiple of
354 ! the nesting ratio. Note that this is important mostly to the intermediate domain, which
355 ! is the subject of the scatter gather comms with the parent
357 IF ( id .GT. 1 ) THEN
358 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
359 if ( mod(ime,parent_grid_ratio) .NE. 0 ) ime = ime + parent_grid_ratio - mod(ime,parent_grid_ratio)
360 if ( mod(jme,parent_grid_ratio) .NE. 0 ) jme = jme + parent_grid_ratio - mod(jme,parent_grid_ratio)
363 SELECT CASE ( model_data_order )
364 CASE ( DATA_ORDER_ZXY )
365 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
366 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
367 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
368 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
369 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
370 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
371 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
372 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
373 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
374 CASE ( DATA_ORDER_ZYX )
375 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
376 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
377 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
378 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
379 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
380 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
381 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
382 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
383 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
384 CASE ( DATA_ORDER_XYZ )
385 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
386 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
387 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
388 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
389 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
390 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
391 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
392 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
393 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
394 CASE ( DATA_ORDER_YXZ)
395 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
396 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
397 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
398 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
399 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
400 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
401 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
402 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
403 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
404 CASE ( DATA_ORDER_XZY )
405 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
406 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
407 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
408 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
409 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
410 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
411 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
412 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
413 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
414 CASE ( DATA_ORDER_YZX )
415 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
416 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
417 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
418 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
419 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
420 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
421 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
422 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
423 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
427 WRITE(wrf_err_message,*)'*************************************'
428 CALL wrf_message( TRIM(wrf_err_message) )
429 WRITE(wrf_err_message,*)'Parent domain'
430 CALL wrf_message( TRIM(wrf_err_message) )
431 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
432 CALL wrf_message( TRIM(wrf_err_message) )
433 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
434 CALL wrf_message( TRIM(wrf_err_message) )
435 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
436 CALL wrf_message( TRIM(wrf_err_message) )
437 WRITE(wrf_err_message,*)'*************************************'
438 CALL wrf_message( TRIM(wrf_err_message) )
441 IF ( id .GT. 1 ) THEN
443 CALL nl_get_shw( id, shw )
444 CALL nl_get_i_parent_start( id , i_parent_start )
445 CALL nl_get_j_parent_start( id , j_parent_start )
446 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
448 SELECT CASE ( model_data_order )
449 CASE ( DATA_ORDER_ZXY )
453 c_kds = sd1 ; c_kde = ed1
454 CASE ( DATA_ORDER_ZYX )
458 c_kds = sd1 ; c_kde = ed1
459 CASE ( DATA_ORDER_XYZ )
463 c_kds = sd3 ; c_kde = ed3
464 CASE ( DATA_ORDER_YXZ)
468 c_kds = sd3 ; c_kde = ed3
469 CASE ( DATA_ORDER_XZY )
473 c_kds = sd2 ; c_kde = ed2
474 CASE ( DATA_ORDER_YZX )
478 c_kds = sd2 ; c_kde = ed2
481 idim_cd = idim / parent_grid_ratio + 1 + 2*shw + 1
482 jdim_cd = jdim / parent_grid_ratio + 1 + 2*shw + 1
484 c_ids = i_parent_start-shw ; c_ide = c_ids + idim_cd - 1
485 c_jds = j_parent_start-shw ; c_jde = c_jds + jdim_cd - 1
488 call nl_get_e_we( id -1, e_we )
489 call nl_get_e_sn( id -1, e_sn )
491 if ( c_ids .le. 0 ) c_ids = 1
492 if ( c_ide .gt. e_we) c_ide = e_we
493 if ( c_jds .le. 0 ) c_jds = 1
494 if ( c_jde .gt. e_sn) c_jde = e_sn
496 ! we want the intermediate domain to be decomposed the
497 ! the same as the underlying nest. So try this:
500 nj = ( c_jds - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
503 ni = ( i - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
504 !jm CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
505 CALL task_for_point ( ni, nj, ids, ide, jds, jde, nest_pes_x(id), nest_pes_y(id),Px,Py, &
507 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (a)')
508 IF ( Px .EQ. mytask_x ) THEN
510 IF ( c_ips .EQ. -1 ) c_ips = i
513 IF ( ierr .NE. 0 ) THEN
514 CALL tfp_message("<stdin>",__LINE__)
516 IF (c_ips .EQ. -1 ) THEN
522 ni = ( c_ids - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
525 nj = ( j - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
526 ! CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
527 CALL task_for_point ( ni, nj, ids, ide, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
529 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (b)')
532 IF ( Py .EQ. mytask_y ) THEN
534 IF ( c_jps .EQ. -1 ) c_jps = j
537 IF ( ierr .NE. 0 ) THEN
538 CALL tfp_message("<stdin>",__LINE__)
540 IF (c_jps .EQ. -1 ) THEN
546 IF (c_ipe .EQ. -1 .or. c_jpe .EQ. -1) THEN
555 nj = ( c_jds - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
558 ! CALL task_for_point ( k, nj, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
559 CALL task_for_point ( k, nj, kds, kde, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
561 IF ( Px .EQ. mytask_x ) THEN
563 IF ( c_kpsx .EQ. -1 ) c_kpsx = k
566 IF ( ierr .NE. 0 ) THEN
567 CALL tfp_message("<stdin>",__LINE__)
569 IF (c_kpsx .EQ. -1 ) THEN
578 nj = ( j - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
579 ! CALL task_for_point ( k, nj, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
580 CALL task_for_point ( k, nj, kds, kde, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
582 IF ( Py .EQ. mytask_y ) THEN
584 IF ( c_jpsx .EQ. -1 ) c_jpsx = j
587 IF ( ierr .NE. 0 ) THEN
588 CALL tfp_message("<stdin>",__LINE__)
590 IF (c_jpsx .EQ. -1 ) THEN
595 IF (c_ipex .EQ. -1 .or. c_jpex .EQ. -1) THEN
602 c_kpsy = c_kpsx ! same as above
603 c_kpey = c_kpex ! same as above
609 ni = ( i - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
610 ! CALL task_for_point ( ni, k, ids, ide, kds, kde, ntasks_y, ntasks_x, Py, Px, &
611 CALL task_for_point ( ni, k, ids, ide, kds, kde, nest_pes_y(id), nest_pes_x(id), Py, Px, &
612 1, 1, ierr ) ! x and y for proc mesh reversed
613 IF ( Py .EQ. mytask_y ) THEN
615 IF ( c_ipsy .EQ. -1 ) c_ipsy = i
618 IF ( ierr .NE. 0 ) THEN
619 CALL tfp_message("<stdin>",__LINE__)
621 IF (c_ipsy .EQ. -1 ) THEN
628 IF ( c_ips <= c_ipe ) THEN
629 ! extend the patch dimensions out shw along edges of domain
630 IF ( mytask_x .EQ. 0 ) THEN
633 c_ipsy = c_ipsy - shw
636 ! IF ( mytask_x .EQ. ntasks_x-1 ) THEN
637 IF ( mytask_x .EQ. nest_pes_x(id)-1 ) THEN
640 c_ipey = c_ipey + shw
643 c_ims = max( c_ips - max(shw,thisdomain_max_halo_width), c_ids - bdx ) - 1
644 c_ime = min( c_ipe + max(shw,thisdomain_max_halo_width), c_ide + bdx ) + 1
652 IF ( c_jps <= c_jpe ) THEN
653 ! extend the patch dimensions out shw along edges of domain
654 IF ( mytask_y .EQ. 0 ) THEN
657 c_jpsx = c_jpsx - shw
660 ! IF ( mytask_y .EQ. ntasks_y-1 ) THEN
661 IF ( mytask_y .EQ. nest_pes_y(id)-1 ) THEN
664 c_jpex = c_jpex + shw
667 c_jms = max( c_jps - max(shw,thisdomain_max_halo_width), c_jds - bdx ) - 1
668 c_jme = min( c_jpe + max(shw,thisdomain_max_halo_width), c_jde + bdx ) + 1
679 ! Default initializations
680 c_sm1x = 1 ; c_em1x = 1 ; c_sm2x = 1 ; c_em2x = 1 ; c_sm3x = 1 ; c_em3x = 1
681 c_sm1y = 1 ; c_em1y = 1 ; c_sm2y = 1 ; c_em2y = 1 ; c_sm3y = 1 ; c_em3y = 1
689 IF ( c_kpsx .EQ. 0 .AND. c_kpex .EQ. -1 ) THEN
693 IF ( c_kpsy .EQ. 0 .AND. c_kpey .EQ. -1 ) THEN
702 IF ( c_ipsy .EQ. 0 .AND. c_ipey .EQ. -1 ) THEN
715 IF ( c_jpsx .EQ. 0 .AND. c_jpex .EQ. -1 ) THEN
752 WRITE(wrf_err_message,*)'*************************************'
753 CALL wrf_message( TRIM(wrf_err_message) )
754 WRITE(wrf_err_message,*)'Nesting domain'
755 CALL wrf_message( TRIM(wrf_err_message) )
756 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
757 CALL wrf_message( TRIM(wrf_err_message) )
758 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
759 CALL wrf_message( TRIM(wrf_err_message) )
760 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
761 CALL wrf_message( TRIM(wrf_err_message) )
762 WRITE(wrf_err_message,*)'INTERMEDIATE domain'
763 CALL wrf_message( TRIM(wrf_err_message) )
764 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',c_ids,c_ide,c_jds,c_jde
765 CALL wrf_message( TRIM(wrf_err_message) )
766 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',c_ims,c_ime,c_jms,c_jme
767 CALL wrf_message( TRIM(wrf_err_message) )
768 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',c_ips,c_ipe,c_jps,c_jpe
769 CALL wrf_message( TRIM(wrf_err_message) )
770 WRITE(wrf_err_message,*)'*************************************'
771 CALL wrf_message( TRIM(wrf_err_message) )
773 SELECT CASE ( model_data_order )
774 CASE ( DATA_ORDER_ZXY )
775 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
776 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
777 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
778 CASE ( DATA_ORDER_ZYX )
779 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
780 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
781 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
782 CASE ( DATA_ORDER_XYZ )
783 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
784 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
785 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
786 CASE ( DATA_ORDER_YXZ)
787 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
788 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
789 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
790 CASE ( DATA_ORDER_XZY )
791 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
792 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
793 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
794 CASE ( DATA_ORDER_YZX )
795 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
796 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
797 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
800 ALLOCATE ( intermediate_grid )
801 ALLOCATE ( intermediate_grid%parents( max_parents ) )
802 ALLOCATE ( intermediate_grid%nests( max_nests ) )
803 intermediate_grid%allocated=.false.
804 NULLIFY( intermediate_grid%sibling )
806 NULLIFY( intermediate_grid%nests(i)%ptr )
808 NULLIFY (intermediate_grid%next)
809 NULLIFY (intermediate_grid%same_level)
810 NULLIFY (intermediate_grid%i_start)
811 NULLIFY (intermediate_grid%j_start)
812 NULLIFY (intermediate_grid%i_end)
813 NULLIFY (intermediate_grid%j_end)
814 intermediate_grid%id = id ! these must be the same. Other parts of code depend on it (see gen_comms.c)
815 intermediate_grid%num_nests = 0
816 intermediate_grid%num_siblings = 0
817 intermediate_grid%num_parents = 1
818 intermediate_grid%max_tiles = 0
819 intermediate_grid%num_tiles_spec = 0
820 #if ( EM_CORE == 1 && DA_CORE != 1 )
821 intermediate_grid%active_this_task = .true.
823 CALL find_grid_by_id ( id, head_grid, nest_grid )
825 nest_grid%intermediate_grid => intermediate_grid ! nest grid now has a pointer to this baby
826 intermediate_grid%parents(1)%ptr => nest_grid ! the intermediate grid considers nest its parent
827 intermediate_grid%num_parents = 1
829 intermediate_grid%is_intermediate = .TRUE.
830 SELECT CASE ( model_data_order )
831 CASE ( DATA_ORDER_ZXY )
832 intermediate_grid%nids = nest_grid%sd32 ; intermediate_grid%njds = nest_grid%sd33
833 intermediate_grid%nide = nest_grid%ed32 ; intermediate_grid%njde = nest_grid%sd33
834 CASE ( DATA_ORDER_ZYX )
835 intermediate_grid%nids = nest_grid%sd33 ; intermediate_grid%njds = nest_grid%sd32
836 intermediate_grid%nide = nest_grid%ed33 ; intermediate_grid%njde = nest_grid%sd32
837 CASE ( DATA_ORDER_XYZ )
838 intermediate_grid%nids = nest_grid%sd31 ; intermediate_grid%njds = nest_grid%sd32
839 intermediate_grid%nide = nest_grid%ed31 ; intermediate_grid%njde = nest_grid%sd32
840 CASE ( DATA_ORDER_YXZ)
841 intermediate_grid%nids = nest_grid%sd32 ; intermediate_grid%njds = nest_grid%sd31
842 intermediate_grid%nide = nest_grid%ed32 ; intermediate_grid%njde = nest_grid%sd31
843 CASE ( DATA_ORDER_XZY )
844 intermediate_grid%nids = nest_grid%sd31 ; intermediate_grid%njds = nest_grid%sd33
845 intermediate_grid%nide = nest_grid%ed31 ; intermediate_grid%njde = nest_grid%sd33
846 CASE ( DATA_ORDER_YZX )
847 intermediate_grid%nids = nest_grid%sd33 ; intermediate_grid%njds = nest_grid%sd31
848 intermediate_grid%nide = nest_grid%ed33 ; intermediate_grid%njde = nest_grid%sd31
850 intermediate_grid%nids = ids
851 intermediate_grid%nide = ide
852 intermediate_grid%njds = jds
853 intermediate_grid%njde = jde
855 intermediate_grid%sm31x = c_sm1x
856 intermediate_grid%em31x = c_em1x
857 intermediate_grid%sm32x = c_sm2x
858 intermediate_grid%em32x = c_em2x
859 intermediate_grid%sm33x = c_sm3x
860 intermediate_grid%em33x = c_em3x
861 intermediate_grid%sm31y = c_sm1y
862 intermediate_grid%em31y = c_em1y
863 intermediate_grid%sm32y = c_sm2y
864 intermediate_grid%em32y = c_em2y
865 intermediate_grid%sm33y = c_sm3y
866 intermediate_grid%em33y = c_em3y
869 intermediate_grid%sp31x = c_sp1x
870 intermediate_grid%ep31x = c_ep1x
871 intermediate_grid%sp32x = c_sp2x
872 intermediate_grid%ep32x = c_ep2x
873 intermediate_grid%sp33x = c_sp3x
874 intermediate_grid%ep33x = c_ep3x
875 intermediate_grid%sp31y = c_sp1y
876 intermediate_grid%ep31y = c_ep1y
877 intermediate_grid%sp32y = c_sp2y
878 intermediate_grid%ep32y = c_ep2y
879 intermediate_grid%sp33y = c_sp3y
880 intermediate_grid%ep33y = c_ep3y
883 #if ( ( defined(SGIALTIX) || defined(FUJITSU_FX10) || defined(KEEP_INT_AROUND) ) && (! defined(MOVE_NESTS) ) )
884 ! allocate space for the intermediate domain
885 ! CALL alloc_space_field ( intermediate_grid, intermediate_grid%id , 1, 2 , .TRUE., intercomm_active( intermediate_grid%id ), & ! use same id as nest
886 CALL alloc_space_field ( intermediate_grid, intermediate_grid%id , 1, 2 , .TRUE., nest_grid%active_this_task, & ! use same id as nest
887 c_sd1, c_ed1, c_sd2, c_ed2, c_sd3, c_ed3, &
888 c_sm1, c_em1, c_sm2, c_em2, c_sm3, c_em3, &
889 c_sp1, c_ep1, c_sp2, c_ep2, c_sp3, c_ep3, &
890 c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, &
891 c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y, &
892 c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, & ! x-xpose
893 c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y ) ! y-xpose
895 intermediate_grid%sd31 = c_sd1
896 intermediate_grid%ed31 = c_ed1
897 intermediate_grid%sp31 = c_sp1
898 intermediate_grid%ep31 = c_ep1
899 intermediate_grid%sm31 = c_sm1
900 intermediate_grid%em31 = c_em1
901 intermediate_grid%sd32 = c_sd2
902 intermediate_grid%ed32 = c_ed2
903 intermediate_grid%sp32 = c_sp2
904 intermediate_grid%ep32 = c_ep2
905 intermediate_grid%sm32 = c_sm2
906 intermediate_grid%em32 = c_em2
907 intermediate_grid%sd33 = c_sd3
908 intermediate_grid%ed33 = c_ed3
909 intermediate_grid%sp33 = c_sp3
910 intermediate_grid%ep33 = c_ep3
911 intermediate_grid%sm33 = c_sm3
912 intermediate_grid%em33 = c_em3
914 CALL med_add_config_info_to_grid ( intermediate_grid )
916 intermediate_grid%dx = parent%dx
917 intermediate_grid%dy = parent%dy
918 intermediate_grid%dt = parent%dt
922 END SUBROUTINE patch_domain_rsl_lite
924 SUBROUTINE compute_memory_dims_rsl_lite ( &
925 id , maxhalowidth , &
927 ids, ide, jds, jde, kds, kde, &
928 ims, ime, jms, jme, kms, kme, &
929 imsx, imex, jmsx, jmex, kmsx, kmex, &
930 imsy, imey, jmsy, jmey, kmsy, kmey, &
931 ips, ipe, jps, jpe, kps, kpe, &
932 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
933 ipsy, ipey, jpsy, jpey, kpsy, kpey )
936 INTEGER, INTENT(IN) :: id , maxhalowidth
937 INTEGER, INTENT(IN) :: shw, bdx, bdy
938 INTEGER, INTENT(IN) :: ids, ide, jds, jde, kds, kde
939 INTEGER, INTENT(OUT) :: ims, ime, jms, jme, kms, kme
940 INTEGER, INTENT(OUT) :: imsx, imex, jmsx, jmex, kmsx, kmex
941 INTEGER, INTENT(OUT) :: imsy, imey, jmsy, jmey, kmsy, kmey
942 INTEGER, INTENT(OUT) :: ips, ipe, jps, jpe, kps, kpe
943 INTEGER, INTENT(OUT) :: ipsx, ipex, jpsx, jpex, kpsx, kpex
944 INTEGER, INTENT(OUT) :: ipsy, ipey, jpsy, jpey, kpsy, kpey
946 INTEGER Px, Py, P, i, j, k, ierr
954 ! CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
955 CALL task_for_point ( i, j, ids, ide, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
957 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (c)')
958 IF ( Px .EQ. mytask_x ) THEN
960 IF ( ips .EQ. -1 ) ips = i
963 IF ( ierr .NE. 0 ) THEN
964 CALL tfp_message("<stdin>",__LINE__)
966 ! handle setting the memory dimensions where there are no X elements assigned to this proc
967 IF (ips .EQ. -1 ) THEN
976 ! CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
977 CALL task_for_point ( i, j, ids, ide, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
979 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (d)')
980 IF ( Py .EQ. mytask_y ) THEN
982 IF ( jps .EQ. -1 ) jps = j
985 IF ( ierr .NE. 0 ) THEN
986 CALL tfp_message("<stdin>",__LINE__)
988 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
989 IF (jps .EQ. -1 ) THEN
994 !begin: wig; 12-Mar-2008
995 ! This appears redundant with the conditionals above, but we get cases with only
996 ! one of the directions being set to "missing" when turning off extra processors.
997 ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
998 IF (ipe .EQ. -1 .or. jpe .EQ. -1) THEN
1004 !end: wig; 12-Mar-2008
1007 ! description of transpose decomposition strategy for RSL LITE. 20061231jm
1009 ! Here is the tranpose scheme that is implemented for RSL_LITE. Upper-case
1010 ! XY corresponds to the dimension of the processor mesh, lower-case xyz
1011 ! corresponds to grid dimension.
1015 ! XxYy <--> XzYy <--> XzYx <- note x decomposed over Y procs
1018 ! +------------------+ <- this edge is costly; see below
1020 ! The aim is to avoid all-to-all communication over whole
1021 ! communicator. Instead, when possible, use a transpose scheme that requires
1022 ! all-to-all within dimensional communicators; that is, communicators
1023 ! defined for the processes in a rank or column of the processor mesh. Note,
1024 ! however, it is not possible to create a ring of transposes between
1025 ! xy-yz-xz decompositions without at least one of the edges in the ring
1026 ! being fully all-to-all (in other words, one of the tranpose edges must
1027 ! rotate and not just transpose a plane of the model grid within the
1028 ! processor mesh). The issue is then, where should we put this costly edge
1029 ! in the tranpose scheme we chose? To avoid being completely arbitrary,
1030 ! we chose a scheme most natural for models that use parallel spectral
1031 ! transforms, where the costly edge is the one that goes from the xz to
1032 ! the xy decomposition. (May be implemented as just a two step transpose
1035 ! Additional notational convention, below. The 'x' or 'y' appended to the
1036 ! dimension start or end variable refers to which grid dimension is all
1037 ! on-processor in the given decomposition. That is ipsx and ipex are the
1038 ! start and end for the i-dimension in the zy decomposition where x is
1039 ! on-processor. ('z' is assumed for xy decomposition and not appended to
1040 ! the ips, ipe, etc. variable names).
1043 ! XzYy decomposition
1049 ! CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
1050 CALL task_for_point ( k, j, kds, kde, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
1052 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (e)')
1053 IF ( Px .EQ. mytask_x ) THEN
1055 IF ( kpsx .EQ. -1 ) kpsx = k
1058 IF ( ierr .NE. 0 ) THEN
1059 CALL tfp_message("<stdin>",__LINE__)
1062 ! handle case where no levels are assigned to this process
1063 ! no iterations. Do same for I and J. Need to handle memory alloc below.
1064 IF (kpsx .EQ. -1 ) THEN
1073 ! CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
1074 CALL task_for_point ( k, j, kds, kde, jds, jde, nest_pes_x(id), nest_pes_y(id), Px, Py, &
1076 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (f)')
1077 IF ( Py .EQ. mytask_y ) THEN
1079 IF ( jpsx .EQ. -1 ) jpsx = j
1082 IF ( ierr .NE. 0 ) THEN
1083 CALL tfp_message("<stdin>",__LINE__)
1085 IF (jpsx .EQ. -1 ) THEN
1090 !begin: wig; 12-Mar-2008
1091 ! This appears redundant with the conditionals above, but we get cases with only
1092 ! one of the directions being set to "missing" when turning off extra processors.
1093 ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
1094 IF (jpex .EQ. -1) THEN
1100 !end: wig; 12-Mar-2008
1102 ! XzYx decomposition (note, x grid dim is decomposed over Y processor dim)
1104 kpsy = kpsx ! same as above
1105 kpey = kpex ! same as above
1111 ! CALL task_for_point ( i, k, ids, ide, kds, kde, ntasks_y, ntasks_x, Py, Px, &
1112 CALL task_for_point ( i, k, ids, ide, kds, kde, nest_pes_y(id), nest_pes_x(id), Py, Px, &
1114 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (g)')
1115 IF ( Py .EQ. mytask_y ) THEN
1117 IF ( ipsy .EQ. -1 ) ipsy = i
1120 IF ( ierr .NE. 0 ) THEN
1121 CALL tfp_message("<stdin>",__LINE__)
1123 IF (ipsy .EQ. -1 ) THEN
1129 ! extend the patch dimensions out shw along edges of domain
1130 IF ( ips < ipe .and. jps < jpe ) THEN !wig; 11-Mar-2008
1131 IF ( mytask_x .EQ. 0 ) THEN
1135 ! IF ( mytask_x .EQ. ntasks_x-1 ) THEN
1136 IF ( mytask_x .EQ. nest_pes_x(id)-1 ) THEN
1140 IF ( mytask_y .EQ. 0 ) THEN
1144 ! IF ( mytask_y .EQ. ntasks_y-1 ) THEN
1145 IF ( mytask_y .EQ. nest_pes_y(id)-1 ) THEN
1149 END IF !wig; 11-Mar-2008
1161 ! handle setting the memory dimensions where there are no levels assigned to this proc
1162 IF ( kpsx .EQ. 0 .AND. kpex .EQ. -1 ) THEN
1166 IF ( kpsy .EQ. 0 .AND. kpey .EQ. -1 ) THEN
1171 IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
1175 ims = max( ips - max(shw,maxhalowidth), ids - bdx ) - 1
1176 ime = min( ipe + max(shw,maxhalowidth), ide + bdx ) + 1
1177 #ifdef INTEL_ALIGN64
1178 ! align on 64 byte boundaries if -align array64byte
1180 ime = ime + (CHUNK-mod(ime-ims+1,CHUNK))
1187 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
1188 IF ( ipsy .EQ. 0 .AND. ipey .EQ. -1 ) THEN
1196 IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
1200 jms = max( jps - max(shw,maxhalowidth), jds - bdy ) - 1
1201 jme = min( jpe + max(shw,maxhalowidth), jde + bdy ) + 1
1207 ! handle setting the memory dimensions where there are no X elements assigned to this proc
1208 IF ( jpsx .EQ. 0 .AND. jpex .EQ. -1 ) THEN
1218 END SUBROUTINE compute_memory_dims_rsl_lite
1220 ! internal, used below for switching the argument to MPI calls
1221 ! if reals are being autopromoted to doubles in the build of WRF
1222 INTEGER function getrealmpitype()
1225 INTEGER rtypesize, dtypesize, ierr
1226 CALL mpi_type_size ( MPI_REAL, rtypesize, ierr )
1227 CALL mpi_type_size ( MPI_DOUBLE_PRECISION, dtypesize, ierr )
1228 IF ( RWORDSIZE .EQ. rtypesize ) THEN
1229 getrealmpitype = MPI_REAL
1230 ELSE IF ( RWORDSIZE .EQ. dtypesize ) THEN
1231 getrealmpitype = MPI_DOUBLE_PRECISION
1233 CALL wrf_error_fatal ( 'RWORDSIZE or DWORDSIZE does not match any MPI type' )
1236 ! required dummy initialization for function that is never called
1240 END FUNCTION getrealmpitype
1242 INTEGER FUNCTION wrf_dm_max_int ( inval )
1245 INTEGER, intent(in) :: inval
1246 INTEGER :: ierr, retval
1247 CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_MAX, local_communicator, ierr )
1248 wrf_dm_max_int = retval
1250 INTEGER, intent(in) :: inval
1251 wrf_dm_max_int = inval
1253 END FUNCTION wrf_dm_max_int
1255 REAL FUNCTION wrf_dm_max_real ( inval )
1260 CALL wrf_get_dm_communicator(comm)
1261 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MAX, comm, ierr )
1262 wrf_dm_max_real = retval
1265 wrf_dm_max_real = inval
1267 END FUNCTION wrf_dm_max_real
1269 REAL FUNCTION wrf_dm_min_real ( inval )
1274 CALL wrf_get_dm_communicator(comm)
1275 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MIN, comm, ierr )
1276 wrf_dm_min_real = retval
1279 wrf_dm_min_real = inval
1281 END FUNCTION wrf_dm_min_real
1283 SUBROUTINE wrf_dm_min_reals ( inval, retval, n )
1290 CALL wrf_get_dm_communicator(comm)
1291 CALL mpi_allreduce ( inval, retval , n, getrealmpitype(), MPI_MIN, comm, ierr )
1293 retval(1:n) = inval(1:n)
1295 END SUBROUTINE wrf_dm_min_reals
1297 FUNCTION wrf_dm_sum_real8 ( inval )
1300 REAL*8 inval, retval, wrf_dm_sum_real8
1302 CALL wrf_get_dm_communicator(comm)
1303 CALL mpi_allreduce ( inval, retval , 1, MPI_REAL8, MPI_SUM, comm, ierr )
1304 wrf_dm_sum_real8 = retval
1306 REAL*8 wrf_dm_sum_real8,inval
1307 wrf_dm_sum_real8 = inval
1309 END FUNCTION wrf_dm_sum_real8
1311 REAL FUNCTION wrf_dm_sum_real ( inval )
1316 CALL wrf_get_dm_communicator(comm)
1317 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_SUM, comm, ierr )
1318 wrf_dm_sum_real = retval
1321 wrf_dm_sum_real = inval
1323 END FUNCTION wrf_dm_sum_real
1325 SUBROUTINE wrf_dm_sum_reals (inval, retval)
1327 REAL, INTENT(IN) :: inval(:)
1328 REAL, INTENT(OUT) :: retval(:)
1331 CALL wrf_get_dm_communicator(comm)
1332 CALL mpi_allreduce ( inval, retval, SIZE(inval), getrealmpitype(), MPI_SUM, comm, ierr )
1336 END SUBROUTINE wrf_dm_sum_reals
1338 INTEGER FUNCTION wrf_dm_sum_integer ( inval )
1341 INTEGER inval, retval
1343 CALL wrf_get_dm_communicator(comm)
1344 CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_SUM, comm, ierr )
1345 wrf_dm_sum_integer = retval
1348 wrf_dm_sum_integer = inval
1350 END FUNCTION wrf_dm_sum_integer
1352 SUBROUTINE wrf_dm_sum_integers (inval, retval)
1354 INTEGER, INTENT(IN) :: inval(:)
1355 INTEGER, INTENT(OUT) :: retval(:)
1358 CALL wrf_get_dm_communicator(comm)
1359 CALL mpi_allreduce ( inval, retval, SIZE(inval), MPI_INTEGER, MPI_SUM, comm, ierr )
1363 END SUBROUTINE wrf_dm_sum_integers
1366 INTEGER FUNCTION wrf_dm_bxor_integer ( inval )
1369 INTEGER inval, retval
1371 CALL wrf_get_dm_communicator(comm)
1372 CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_BXOR, comm, ierr )
1373 wrf_dm_bxor_integer = retval
1376 wrf_dm_bxor_integer = inval
1378 END FUNCTION wrf_dm_bxor_integer
1381 LOGICAL FUNCTION wrf_dm_lor_logical ( inval )
1384 LOGICAL inval, retval
1386 CALL wrf_get_dm_communicator(comm)
1387 CALL mpi_allreduce ( inval, retval , 1, MPI_LOGICAL, MPI_LOR, comm, ierr )
1388 wrf_dm_lor_logical = retval
1391 wrf_dm_lor_logical = inval
1393 END FUNCTION wrf_dm_lor_logical
1396 LOGICAL FUNCTION wrf_dm_land_logical ( inval )
1399 LOGICAL inval, retval
1401 CALL wrf_get_dm_communicator(comm)
1402 CALL mpi_allreduce ( inval, retval , 1, MPI_LOGICAL, MPI_LAND, comm, ierr )
1403 wrf_dm_land_logical = retval
1406 wrf_dm_land_logical = inval
1408 END FUNCTION wrf_dm_land_logical
1411 SUBROUTINE wrf_dm_maxval_real ( val, idex, jdex )
1415 INTEGER :: idex, jdex, i, comm
1416 INTEGER :: bcast(2),mrank
1417 REAL :: inreduce(2),outreduce(2)
1419 inreduce=(/ val, real(mytask) /)
1420 bcast=(/ idex,jdex /)
1421 CALL wrf_get_dm_communicator(comm)
1422 call MPI_Allreduce(inreduce,outreduce,1,MPI_2REAL,&
1426 call MPI_Bcast(bcast,2,MPI_REAL,mrank,comm,i)
1432 INTEGER idex, jdex, ierr
1434 END SUBROUTINE wrf_dm_maxval_real
1436 SUBROUTINE wrf_dm_minval_real ( val, idex, jdex )
1440 INTEGER :: idex, jdex, i, comm
1441 INTEGER :: bcast(2),mrank
1442 REAL :: inreduce(2),outreduce(2)
1444 inreduce=(/ val, real(mytask) /)
1445 bcast=(/ idex,jdex /)
1446 CALL wrf_get_dm_communicator(comm)
1447 call MPI_Allreduce(inreduce,outreduce,1,MPI_2REAL,&
1451 call MPI_Bcast(bcast,2,MPI_REAL,mrank,comm,i)
1459 END SUBROUTINE wrf_dm_minval_real
1461 #ifndef PROMOTE_FLOAT
1462 SUBROUTINE wrf_dm_maxval_doubleprecision ( val, idex, jdex )
1465 DOUBLE PRECISION val
1466 INTEGER :: idex, jdex, i, comm
1467 INTEGER :: bcast(2),mrank
1468 DOUBLE PRECISION :: inreduce(2),outreduce(2)
1470 inreduce=(/ val, dble(mytask) /)
1471 bcast=(/ idex,jdex /)
1472 CALL wrf_get_dm_communicator(comm)
1473 call MPI_Allreduce(inreduce,outreduce,1,MPI_2DOUBLE_PRECISION,&
1477 call MPI_Bcast(bcast,2,MPI_DOUBLE_PRECISION,mrank,comm,i)
1482 DOUBLE PRECISION val
1483 INTEGER idex, jdex, ierr
1485 END SUBROUTINE wrf_dm_maxval_doubleprecision
1487 SUBROUTINE wrf_dm_minval_doubleprecision ( val, idex, jdex )
1490 DOUBLE PRECISION val
1491 INTEGER :: idex, jdex, i, comm
1492 INTEGER :: bcast(2),mrank
1493 DOUBLE PRECISION :: inreduce(2),outreduce(2)
1495 inreduce=(/ val, dble(mytask) /)
1496 bcast=(/ idex,jdex /)
1497 CALL wrf_get_dm_communicator(comm)
1498 call MPI_Allreduce(inreduce,outreduce,1,MPI_2DOUBLE_PRECISION,&
1502 call MPI_Bcast(bcast,2,MPI_DOUBLE_PRECISION,mrank,comm,i)
1507 DOUBLE PRECISION val
1508 INTEGER idex, jdex, ierr
1510 END SUBROUTINE wrf_dm_minval_doubleprecision
1513 SUBROUTINE wrf_dm_maxval_integer ( val, idex, jdex )
1517 INTEGER :: idex, jdex, i, comm
1518 INTEGER :: bcast(2),mrank
1519 INTEGER :: inreduce(2),outreduce(2)
1521 inreduce=(/ val, mytask /)
1522 bcast=(/ idex,jdex /)
1523 CALL wrf_get_dm_communicator(comm)
1524 call MPI_Allreduce(inreduce,outreduce,1,MPI_2INTEGER,&
1528 call MPI_Bcast(bcast,2,MPI_INTEGER,mrank,comm,i)
1534 INTEGER idex, jdex, ierr
1536 END SUBROUTINE wrf_dm_maxval_integer
1538 SUBROUTINE wrf_dm_minval_integer ( val, idex, jdex )
1542 INTEGER :: idex, jdex, i, comm
1543 INTEGER :: bcast(2),mrank
1544 INTEGER :: inreduce(2),outreduce(2)
1546 inreduce=(/ val, mytask /)
1547 bcast=(/ idex,jdex /)
1548 CALL wrf_get_dm_communicator(comm)
1549 call MPI_Allreduce(inreduce,outreduce,1,MPI_2INTEGER,&
1553 call MPI_Bcast(bcast,2,MPI_INTEGER,mrank,comm,i)
1559 INTEGER idex, jdex, ierr
1561 END SUBROUTINE wrf_dm_minval_integer
1563 SUBROUTINE hwrf_coupler_init
1564 END SUBROUTINE hwrf_coupler_init
1566 SUBROUTINE split_communicator
1570 ! INTEGER mpi_comm_here, mpi_comm_local, comdup, comdup2, origmytask, mytask, ntasks, ierr, io_status
1571 INTEGER mpi_comm_here, mpi_comm_local, comdup, comdup2, origmytask, ierr, io_status
1572 INTEGER mpi_comm_me_and_mom
1574 INTEGER mytask_local,ntasks_local,num_compute_tasks
1575 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
1576 INTEGER thread_support_provided, thread_support_requested
1578 INTEGER i, j, k, x, y, n_x, n_y
1580 INTEGER, ALLOCATABLE :: icolor(:),icolor2(:),idomain(:)
1583 ! Communicator definition Domains
1585 ! 6 pe Example Comm PEs (1)
1586 ! COMM_WORLD 0 1 2 3 4 5 / ! 1 0 1 2 3 4 5 (2) (3)
1591 ! Notes: 1. No requirement that any communicator be all tasks
1592 ! 2. A task may be a member of an arbitrary number
1593 ! of local communicators (But you may not want to do this)
1596 ! Namelist Split Settings (for 3 comms, 4 domains)
1597 ! Revised namelist semantics -- no need for binding nests to separately defined communicators
1599 ! (domain_id) 1 2 3 4
1601 ! comm_start 0 0 2 0
1602 ! nest_pes_x 2 1 2 1
1603 ! nest_pes_y 3 2 2 2
1606 !! Namelist Split Settings (for 3 comms, 4 domains)
1607 !! (comm_id) 1 2 3 ...
1612 !! Domain definitions
1613 !! (domain_id) 1 2 3 4
1614 !! parent_id - 1 1 2
1615 !! comm_domain 1 2 3 2
1616 !! * nest_pes_x 2 1 2 1
1617 !! * nest_pes_y 3 2 2 2
1619 !! [* nest_pes_x is comm_pes_x(comm_domain(domain_id))]
1623 ! for parallel nesting, 201408, jm
1625 INTEGER :: intercomm
1626 INTEGER :: domain_id,par_id,nest_id,kid_id
1627 INTEGER :: mytask_me_and_mom, ntasks_me_and_mom, remote_leader
1628 LOGICAL :: inthisone
1629 LOGICAL :: mytask_is_nest, mytask_is_par,isperiodic(3)
1631 LOGICAL :: quilting_is_turned_off
1633 !!!!! needed to sneak-peek the registry to get parent_id
1634 ! define as temporaries
1635 #include "namelist_defines.inc"
1637 ! Statements that specify the namelists
1638 #include "namelist_statements.inc"
1640 CALL MPI_INITIALIZED( mpi_inited, ierr )
1641 IF ( .NOT. mpi_inited ) THEN
1642 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
1643 thread_support_requested = MPI_THREAD_FUNNELED
1644 CALL mpi_init_thread ( thread_support_requested, thread_support_provided, ierr )
1645 IF ( thread_support_provided .lt. thread_support_requested ) THEN
1646 CALL WRF_ERROR_FATAL( "failed to initialize MPI thread support")
1648 mpi_comm_here = MPI_COMM_WORLD
1650 #if ( DA_CORE != 1 )
1651 IF ( coupler_on ) THEN
1652 CALL cpl_init( mpi_comm_here )
1655 CALL mpi_init ( ierr )
1656 mpi_comm_here = MPI_COMM_WORLD
1657 #if ( DA_CORE != 1 )
1661 CALL wrf_set_dm_communicator( mpi_comm_here )
1662 CALL wrf_termio_dup( mpi_comm_here )
1665 CALL wrf_set_dm_communicator( local_communicator )
1668 ! this should have been reset by init_module_wrf_quilt to be just the compute tasks
1669 CALL wrf_get_dm_communicator( mpi_comm_here )
1671 CALL MPI_Comm_rank ( mpi_comm_here, mytask_local, ierr ) ;
1672 CALL MPI_Comm_size ( mpi_comm_here, ntasks_local, ierr ) ;
1673 mpi_comm_allcompute = mpi_comm_here
1675 IF ( mytask_local .EQ. 0 ) THEN
1677 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1678 READ ( UNIT = 27 , NML = domains , IOSTAT=io_status )
1681 nio_tasks_per_group = 0
1682 poll_servers = .false.
1683 READ ( 27 , NML = namelist_quilt, IOSTAT=io_status )
1686 CALL mpi_bcast( nio_tasks_per_group , max_domains , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1687 CALL mpi_bcast( nio_groups , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1688 CALL mpi_bcast( max_dom, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1689 CALL mpi_bcast( parent_id, max_domains , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1690 CALL quilting_disabled( quilting_is_turned_off )
1691 IF ( quilting_is_turned_off ) THEN
1693 nio_tasks_per_group = 0
1696 num_io_tasks = nio_tasks_per_group(1)*nio_groups
1698 CALL nl_set_max_dom(1,max_dom) ! quilting wants to see this too
1700 IF ( mytask_local .EQ. 0 ) THEN
1701 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1702 ! get a sneak peek an nproc_x and nproc_y
1705 READ ( 27 , NML = domains, IOSTAT=io_status )
1707 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1708 tasks_per_split = ntasks_local
1709 ! we need to sneak-peek the parent_id namelist setting, ,which is in the "domains" section
1710 ! of the namelist. That namelist is registry generated, so the registry-generated information
1711 ! is #included above.
1712 nest_pes_x = 0 ! dimensions of communicator in X and y
1714 IF ( nproc_x .EQ. -1 .OR. nproc_y .EQ. -1 ) THEN
1715 CALL compute_mesh( ntasks_local-num_io_tasks, n_x, n_y )
1720 comm_start = 0 ! make it so everyone will use same communicator if the dm_task_split namelist is not specified or is empty
1721 nest_pes_x(1:max_dom) = n_x
1722 nest_pes_y(1:max_dom) = n_y
1723 READ ( 27 , NML = dm_task_split, IOSTAT=io_status )
1726 CALL mpi_bcast( io_status, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1727 IF ( io_status .NE. 0 ) THEN
1728 ! or if dm_task_split was read but was emptly, do nothing: dm_task_split not specified, everyone uses same communicator (see above)
1730 CALL mpi_bcast( tasks_per_split, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1731 CALL mpi_bcast( nproc_x, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1732 CALL mpi_bcast( nproc_y, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1733 CALL mpi_bcast( comm_start, max_domains , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1734 CALL mpi_bcast( nest_pes_x, max_domains , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1735 CALL mpi_bcast( nest_pes_y, max_domains , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1740 IF ( 1 .le. parent_id(i) .AND. parent_id(i) .LE. max_domains ) THEN
1741 which_kid(i) = nkids(parent_id(i))
1742 nkids(parent_id(i)) = nkids(parent_id(i)) + 1
1744 WRITE(wrf_err_message,*)'invalid parent id for domain ',i
1745 CALL wrf_error_fatal(TRIM(wrf_err_message))
1749 num_compute_tasks = -99
1750 DO nest_id = 1,max_dom
1751 IF ( nest_id .EQ. 1 ) THEN
1752 nest_task_offsets(nest_id) = comm_start(nest_id)
1754 IF ( comm_start(nest_id) .LT. comm_start(parent_id(nest_id)) ) THEN
1755 WRITE(wrf_err_message,&
1756 "('nest domain ',i3,'comm_start (',i3,') lt parent ',i3,' comm_start (',i3,')')") &
1757 nest_id,comm_start,parent_id(nest_id),comm_start(parent_id(nest_id))
1758 CALL wrf_error_fatal(TRIM(wrf_err_message))
1759 ELSE IF ( comm_start(nest_id) .LT. &
1760 comm_start(parent_id(nest_id)) &
1761 +nest_pes_x(parent_id(nest_id))*nest_pes_y(parent_id(nest_id))) THEN
1762 nest_task_offsets(nest_id) = comm_start(nest_id)-comm_start(parent_id(nest_id))
1764 nest_task_offsets(nest_id) = nest_pes_x(parent_id(nest_id))*nest_pes_y(parent_id(nest_id))
1767 IF ((comm_start(nest_id)+nest_pes_x(nest_id)*nest_pes_y(nest_id)) .GT. num_compute_tasks ) THEN
1768 num_compute_tasks = (comm_start(nest_id)+nest_pes_x(nest_id)*nest_pes_y(nest_id))
1773 !jm Additional code here to set up communicator for this domain and tables
1774 !jm mapping individual domain task IDs to the original local communicator
1775 !jm that is unsplit over nest domains. from now on what we are calling
1776 !jm local_communicator will be the communicator that is used by the local
1777 !jm nests. The communicator that spans all the nests will be renamed to
1778 !jm intercomm_communicator.
1779 !jm Design note: exploring the idea of using MPI intercommunicators. They
1780 !jm only work in pairs so we'd have a lot of intercommunicators to set up
1781 !jm and keep around. We'd also have to have additional communicator arguments
1782 !jm to all the nesting routines in and around the RSL nesting parts.
1783 CALL MPI_Comm_rank ( mpi_comm_here, mytask_local, ierr ) ;
1784 CALL MPI_Comm_rank ( mpi_comm_here, origmytask, ierr ) ;
1785 CALL mpi_comm_size ( mpi_comm_here, ntasks_local, ierr ) ;
1786 ALLOCATE( icolor(ntasks_local) )
1787 ALLOCATE( icolor2(ntasks_local) )
1788 ALLOCATE( idomain(ntasks_local) )
1790 ! split off the separate local communicators
1792 ! construct list of local communicators my task is in
1793 comms_i_am_in = MPI_UNDEFINED
1797 DO j = comm_start(i), comm_start(i)+nest_pes_x(i)*nest_pes_y(i)-1
1798 IF ( j+1 .GT. ntasks_local ) THEN
1799 WRITE(wrf_err_message,*)"check comm_start, nest_pes_x, nest_pes_y settings in namelist for comm ",i
1800 CALL wrf_error_fatal(wrf_err_message)
1804 IF ( icolor(mytask_local+1) .EQ. 1 ) inthisone = .TRUE.
1805 CALL MPI_Comm_dup(mpi_comm_here,comdup,ierr)
1806 CALL MPI_Comm_split(comdup,icolor(mytask_local+1),mytask_local,mpi_comm_local,ierr)
1807 IF ( inthisone ) THEN
1808 dims(1) = nest_pes_y(i) ! rows
1809 dims(2) = nest_pes_x(i) ! columns
1810 isperiodic(1) = .false.
1811 isperiodic(2) = .false.
1812 CALL mpi_cart_create( mpi_comm_local, 2, dims, isperiodic, .false., comms_i_am_in(i), ierr )
1816 ! assign domains to communicators
1817 local_communicator = MPI_UNDEFINED
1818 CALL wrf_set_dm_quilt_comm( mpi_comm_here ) ! used by module_io_quilt_old.F
1820 local_communicator_store(i) = comms_i_am_in(i)
1821 domain_active_this_task(i) = ( local_communicator_store(i) .NE. MPI_UNDEFINED )
1822 IF ( local_communicator_store(i) .NE. MPI_UNDEFINED ) THEN
1823 CALL MPI_Comm_size( local_communicator_store(i), ntasks_store(i), ierr )
1824 CALL MPI_Comm_rank( local_communicator_store(i), mytask_store(i), ierr )
1825 CALL mpi_cart_coords( local_communicator_store(i), mytask_store(i), 2, coords, ierr )
1826 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_cart_coords fails ')
1827 mytask_y_store(i) = coords(1) ! col task (1)
1828 mytask_x_store(i) = coords(2) ! col task (x)
1829 CALL MPI_Comm_dup( local_communicator_store(i), comdup2, ierr )
1830 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_dup fails ')
1832 CALL MPI_Comm_split(comdup2,mytask_y_store(i),mytask_store(i),local_communicator_x_store(i),ierr)
1833 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for y ')
1835 CALL MPI_Comm_split(comdup2,mytask_x_store(i),mytask_store(i),local_communicator_y_store(i),ierr)
1836 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for x ')
1838 CALL MPI_Comm_size( local_communicator_x_store(i), ntasks_x_store(i), ierr )
1839 CALL MPI_Comm_rank( local_communicator_x_store(i), mytask_x_store(i), ierr )
1840 CALL MPI_Comm_size( local_communicator_y_store(i), ntasks_y_store(i), ierr )
1841 CALL MPI_Comm_rank( local_communicator_y_store(i), mytask_y_store(i), ierr )
1845 intercomm_active = .FALSE.
1846 ! iterate over parent-nest pairs
1847 ! split off a new communicator from the big one that includes the tasks from the parent and nest communicators
1848 ! starting with the parent tasks followed by the nest tasks
1849 ! if a task is in both (ie. the communicators overlap) set the offset at the start of the first nest task
1850 ! in this way, we will handle cases where the parent and nest are decomposed over the same set of tasks
1851 ! (in that case, the offset would be the first task of the parent-nest communicator and that communicator)
1853 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1854 ntasks_local = num_compute_tasks
1855 DO nest_id = 2, max_dom
1856 par_id = parent_id(nest_id)
1858 DO j = 1,ntasks_local !iterate over all the tasks in the "big" communicator
1859 IF ( local_communicator_store( par_id ) .NE. MPI_UNDEFINED .OR. local_communicator_store( nest_id ) .NE. MPI_UNDEFINED ) icolor2(j)=1
1861 ! set mpi_comm_me_and_mom to be a communicator that has my parents tasks and mine
1863 mytask_is_nest = .FALSE.
1864 mytask_is_par = .FALSE.
1865 DO j = 1,ntasks_local
1867 IF ( comm_start(nest_id) .LE. j-1 .AND. j-1 .LT. comm_start(nest_id) + nest_pes_x(nest_id)*nest_pes_y(nest_id) ) THEN
1869 if ( j-1 .EQ. mytask_local ) mytask_is_nest=.TRUE.
1871 IF ( comm_start(par_id ) .LE. j-1 .AND. j-1 .LT. comm_start(par_id ) + nest_pes_x(par_id )*nest_pes_y(par_id ) ) THEN
1873 if ( j-1 .EQ. mytask_local ) mytask_is_par=.TRUE.
1877 i = icolor2(mytask_local+1)
1878 CALL MPI_Comm_dup(mpi_comm_here,comdup,ierr)
1879 CALL MPI_Comm_split(comdup,i,origmytask,mpi_comm_me_and_mom,ierr)
1881 IF ( mytask_is_nest ) THEN
1882 intercomm_active(nest_id) = .TRUE.
1883 mpi_comm_to_mom(nest_id) = mpi_comm_me_and_mom
1885 IF ( mytask_is_par ) THEN
1886 intercomm_active(par_id) = .TRUE.
1887 mpi_comm_to_kid(which_kid(nest_id),par_id) = mpi_comm_me_and_mom
1890 DEALLOCATE( icolor )
1891 DEALLOCATE( icolor2 )
1892 DEALLOCATE( idomain )
1894 ELSE IF ( ( tasks_per_split .LE. ntasks_local .AND. tasks_per_split .LE. 0 ) ) THEN
1895 domain_active_this_task(1) = .TRUE.
1896 IF ( mod( ntasks_local, tasks_per_split ) .NE. 0 ) THEN
1897 CALL wrf_message( 'WARNING: tasks_per_split does not evenly divide ntasks. Some tasks will be wasted.' )
1900 ALLOCATE( icolor(ntasks_local) )
1902 DO WHILE ( j .LT. ntasks_local / tasks_per_split )
1903 DO i = 1, tasks_per_split
1904 icolor( i + j * tasks_per_split ) = j
1909 CALL MPI_Comm_dup(mpi_comm_here,comdup,ierr)
1910 CALL MPI_Comm_split(comdup,icolor(mytask_local+1),mytask_local,mpi_comm_local,ierr)
1911 CALL wrf_set_dm_communicator( mpi_comm_local )
1912 CALL store_communicators_for_domain(1)
1913 DEALLOCATE( icolor )
1915 domain_active_this_task(1) = .TRUE.
1916 mpi_comm_local = mpi_comm_here
1917 CALL wrf_set_dm_communicator( mpi_comm_local )
1918 CALL store_communicators_for_domain(1)
1921 CALL instate_communicators_for_domain(1)
1924 ! for serial (non-MPI) builds
1926 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
1927 INTEGER thread_support_provided, thread_support_requested
1929 INTEGER i, j, k, x, y, n_x, n_y
1932 ! for parallel nesting, 201408, jm
1934 INTEGER :: io_status
1935 INTEGER :: domain_id,par_id,nest_id,kid_id
1937 !!!!! needed to sneak-peek the registry to get parent_id
1938 ! define as temporaries
1939 #include "namelist_defines.inc"
1941 ! Statements that specify the namelists
1942 #include "namelist_statements.inc"
1945 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1946 READ ( UNIT = 27 , NML = domains , IOSTAT=io_status )
1952 IF ( 1 .le. parent_id(i) .AND. parent_id(i) .LE. max_domains ) THEN
1953 which_kid(i) = nkids(parent_id(i))
1954 nkids(parent_id(i)) = nkids(parent_id(i)) + 1
1956 WRITE(wrf_err_message,*)'invalid parent id for domain ',i
1957 CALL wrf_error_fatal(TRIM(wrf_err_message))
1961 intercomm_active = .TRUE.
1962 domain_active_this_task = .TRUE.
1983 CALL instate_communicators_for_domain(1)
1985 END SUBROUTINE split_communicator
1987 SUBROUTINE init_module_dm
1990 INTEGER mpi_comm_local, mpi_comm_here, ierr, mytask, nproc
1992 CALL mpi_initialized( mpi_inited, ierr )
1993 IF ( .NOT. mpi_inited ) THEN
1994 ! If MPI has not been initialized then initialize it and
1995 ! make comm_world the communicator
1996 ! Otherwise, something else (e.g. split_communicator) has already
1997 ! initialized MPI, so just grab the communicator that
1998 ! should already be stored and use that.
1999 CALL mpi_init ( ierr )
2000 mpi_comm_here = MPI_COMM_WORLD
2001 CALL wrf_set_dm_communicator ( mpi_comm_here )
2003 CALL wrf_get_dm_communicator( mpi_comm_local )
2005 END SUBROUTINE init_module_dm
2008 SUBROUTINE wrf_dm_move_nest ( parent, nest, dx, dy )
2009 USE module_domain, ONLY : domain
2011 TYPE (domain), INTENT(INOUT) :: parent, nest
2012 INTEGER, INTENT(IN) :: dx,dy
2014 END SUBROUTINE wrf_dm_move_nest
2016 !------------------------------------------------------------------------------
2017 SUBROUTINE get_full_obs_vector( nsta, nerrf, niobf, &
2020 mp_local_cobmask, errf )
2022 !------------------------------------------------------------------------------
2023 ! PURPOSE: Do MPI allgatherv operation across processors to get the
2024 ! errors at each observation point on all processors.
2026 !------------------------------------------------------------------------------
2028 INTEGER, INTENT(IN) :: nsta ! Observation index.
2029 INTEGER, INTENT(IN) :: nerrf ! Number of error fields.
2030 INTEGER, INTENT(IN) :: niobf ! Number of observations.
2031 LOGICAL, INTENT(IN) :: MP_LOCAL_UOBMASK(NIOBF)
2032 LOGICAL, INTENT(IN) :: MP_LOCAL_VOBMASK(NIOBF)
2033 LOGICAL, INTENT(IN) :: MP_LOCAL_COBMASK(NIOBF)
2034 REAL, INTENT(INOUT) :: errf(nerrf, niobf)
2038 ! Local declarations
2039 integer i, n, nlocal_dot, nlocal_crs
2040 REAL UVT_BUFFER(NIOBF) ! Buffer for holding U, V, or T
2041 REAL QRK_BUFFER(NIOBF) ! Buffer for holding Q or RKO
2042 REAL SFP_BUFFER(NIOBF) ! Buffer for holding Surface pressure
2043 REAL PBL_BUFFER(NIOBF) ! Buffer for holding (real) KPBL index
2044 REAL QATOB_BUFFER(NIOBF) ! Buffer for holding QV at the ob location
2045 INTEGER N_BUFFER(NIOBF)
2046 REAL FULL_BUFFER(NIOBF)
2047 INTEGER IFULL_BUFFER(NIOBF)
2048 INTEGER, ALLOCATABLE , DIMENSION(:) :: IDISPLACEMENT
2049 INTEGER, ALLOCATABLE , DIMENSION(:) :: ICOUNT
2051 INTEGER :: MPI_COMM_COMP ! MPI group communicator
2052 INTEGER :: NPROCS ! Number of processors
2053 INTEGER :: IERR ! Error code from MPI routines
2055 ! Get communicator for MPI operations.
2056 CALL WRF_GET_DM_COMMUNICATOR(MPI_COMM_COMP)
2058 ! Get rank of monitor processor and broadcast to others.
2059 CALL MPI_COMM_SIZE( MPI_COMM_COMP, NPROCS, IERR )
2060 ALLOCATE (IDISPLACEMENT(NPROCS))
2061 ALLOCATE (ICOUNT(NPROCS))
2066 IF ( MP_LOCAL_UOBMASK(N) ) THEN ! USE U-POINT MASK
2067 NLOCAL_DOT = NLOCAL_DOT + 1
2068 UVT_BUFFER(NLOCAL_DOT) = ERRF(1,N) ! U WIND COMPONENT
2069 SFP_BUFFER(NLOCAL_DOT) = ERRF(7,N) ! SURFACE PRESSURE
2070 QRK_BUFFER(NLOCAL_DOT) = ERRF(9,N) ! RKO
2071 N_BUFFER(NLOCAL_DOT) = N
2074 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
2075 ICOUNT,1,MPI_INTEGER, &
2079 IDISPLACEMENT(1) = 0
2081 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
2083 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
2084 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2085 MPI_INTEGER, MPI_COMM_COMP, IERR)
2087 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
2088 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2089 MPI_REAL, MPI_COMM_COMP, IERR)
2091 ERRF(1,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2093 ! SURF PRESS AT U-POINTS
2094 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
2095 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2096 MPI_REAL, MPI_COMM_COMP, IERR)
2098 ERRF(7,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2101 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_DOT, MPI_REAL, &
2102 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2103 MPI_REAL, MPI_COMM_COMP, IERR)
2105 ERRF(9,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2111 IF ( MP_LOCAL_VOBMASK(N) ) THEN ! USE V-POINT MASK
2112 NLOCAL_DOT = NLOCAL_DOT + 1
2113 UVT_BUFFER(NLOCAL_DOT) = ERRF(2,N) ! V WIND COMPONENT
2114 SFP_BUFFER(NLOCAL_DOT) = ERRF(8,N) ! SURFACE PRESSURE
2115 N_BUFFER(NLOCAL_DOT) = N
2118 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
2119 ICOUNT,1,MPI_INTEGER, &
2123 IDISPLACEMENT(1) = 0
2125 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
2127 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
2128 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2129 MPI_INTEGER, MPI_COMM_COMP, IERR)
2131 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
2132 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2133 MPI_REAL, MPI_COMM_COMP, IERR)
2135 ERRF(2,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2137 ! SURF PRESS AT V-POINTS
2138 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
2139 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2140 MPI_REAL, MPI_COMM_COMP, IERR)
2142 ERRF(8,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2145 ! DO THE CROSS FIELDS, T AND Q
2148 IF ( MP_LOCAL_COBMASK(N) ) THEN ! USE MASS-POINT MASK
2149 NLOCAL_CRS = NLOCAL_CRS + 1
2150 UVT_BUFFER(NLOCAL_CRS) = ERRF(3,N) ! TEMPERATURE
2151 QRK_BUFFER(NLOCAL_CRS) = ERRF(4,N) ! MOISTURE
2152 PBL_BUFFER(NLOCAL_CRS) = ERRF(5,N) ! KPBL
2153 SFP_BUFFER(NLOCAL_CRS) = ERRF(6,N) ! SURFACE PRESSURE
2154 QATOB_BUFFER(NLOCAL_CRS) = ERRF(10,N) ! Model Mixing ratio itself (NOT ERROR)
2155 N_BUFFER(NLOCAL_CRS) = N
2158 CALL MPI_ALLGATHER(NLOCAL_CRS,1,MPI_INTEGER, &
2159 ICOUNT,1,MPI_INTEGER, &
2161 IDISPLACEMENT(1) = 0
2163 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
2165 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_CRS, MPI_INTEGER, &
2166 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2167 MPI_INTEGER, MPI_COMM_COMP, IERR)
2169 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_CRS, MPI_REAL, &
2170 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2171 MPI_REAL, MPI_COMM_COMP, IERR)
2174 ERRF(3,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2177 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_CRS, MPI_REAL, &
2178 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2179 MPI_REAL, MPI_COMM_COMP, IERR)
2181 ERRF(4,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2184 CALL MPI_ALLGATHERV( PBL_BUFFER, NLOCAL_CRS, MPI_REAL, &
2185 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2186 MPI_REAL, MPI_COMM_COMP, IERR)
2188 ERRF(5,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2190 ! SURF PRESS AT MASS POINTS
2191 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_CRS, MPI_REAL, &
2192 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2193 MPI_REAL, MPI_COMM_COMP, IERR)
2195 ERRF(6,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2198 ! Water vapor mixing ratio at the mass points (NOT THE ERROR)
2199 CALL MPI_ALLGATHERV( QATOB_BUFFER, NLOCAL_CRS, MPI_REAL, &
2200 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
2201 MPI_REAL, MPI_COMM_COMP, IERR)
2203 ERRF(10,IFULL_BUFFER(N)) = FULL_BUFFER(N)
2206 DEALLOCATE (IDISPLACEMENT)
2209 END SUBROUTINE get_full_obs_vector
2213 SUBROUTINE wrf_dm_maxtile_real ( val , tile)
2215 REAL val, val_all( ntasks )
2220 ! Collective operation. Each processor calls passing a local value and its index; on return
2221 ! all processors are passed back the maximum of all values passed and its tile number.
2227 CALL wrf_get_dm_communicator ( comm )
2228 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), comm, ierr )
2232 IF ( val_all(i) .GT. val ) THEN
2238 END SUBROUTINE wrf_dm_maxtile_real
2241 SUBROUTINE wrf_dm_mintile_real ( val , tile)
2243 REAL val, val_all( ntasks )
2248 ! Collective operation. Each processor calls passing a local value and its index; on return
2249 ! all processors are passed back the minimum of all values passed and its tile number.
2255 CALL wrf_get_dm_communicator ( comm )
2256 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), comm, ierr )
2260 IF ( val_all(i) .LT. val ) THEN
2266 END SUBROUTINE wrf_dm_mintile_real
2269 SUBROUTINE wrf_dm_mintile_double ( val , tile)
2271 DOUBLE PRECISION val, val_all( ntasks )
2276 ! Collective operation. Each processor calls passing a local value and its index; on return
2277 ! all processors are passed back the minimum of all values passed and its tile number.
2283 CALL wrf_get_dm_communicator ( comm )
2284 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, comm, ierr )
2288 IF ( val_all(i) .LT. val ) THEN
2294 END SUBROUTINE wrf_dm_mintile_double
2297 SUBROUTINE wrf_dm_tile_val_int ( val , tile)
2299 INTEGER val, val_all( ntasks )
2304 ! Collective operation. Get value from input tile.
2310 CALL wrf_get_dm_communicator ( comm )
2311 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, comm, ierr )
2314 END SUBROUTINE wrf_dm_tile_val_int
2316 SUBROUTINE wrf_get_hostname ( str )
2320 CALL rsl_lite_get_hostname( tmp, 512, n, cs )
2325 END SUBROUTINE wrf_get_hostname
2327 SUBROUTINE wrf_get_hostid ( hostid )
2330 INTEGER i, sz, n, cs
2331 CALL rsl_lite_get_hostname( tmp, 512, n, cs )
2334 END SUBROUTINE wrf_get_hostid
2336 END MODULE module_dm
2339 SUBROUTINE push_communicators_for_domain( id )
2341 INTEGER, INTENT(IN) :: id ! if specified also does an instate for grid id
2342 ! Only required for distrbuted memory parallel runs
2343 #if ( defined( DM_PARALLEL ) && ( ! defined( STUBMPI ) ) )
2344 IF ( communicator_stack_cursor .GE. max_domains ) CALL wrf_error_fatal("push_communicators_for_domain would excede stacksize")
2345 communicator_stack_cursor = communicator_stack_cursor + 1
2347 id_stack(communicator_stack_cursor) = current_id
2348 local_communicator_stack( communicator_stack_cursor ) = local_communicator
2349 local_communicator_periodic_stack( communicator_stack_cursor ) = local_communicator_periodic
2350 local_iocommunicator_stack( communicator_stack_cursor ) = local_iocommunicator
2351 local_communicator_x_stack( communicator_stack_cursor ) = local_communicator_x
2352 local_communicator_y_stack( communicator_stack_cursor ) = local_communicator_y
2353 ntasks_stack( communicator_stack_cursor ) = ntasks
2354 ntasks_y_stack( communicator_stack_cursor ) = ntasks_y
2355 ntasks_x_stack( communicator_stack_cursor ) = ntasks_x
2356 mytask_stack( communicator_stack_cursor ) = mytask
2357 mytask_x_stack( communicator_stack_cursor ) = mytask_x
2358 mytask_y_stack( communicator_stack_cursor ) = mytask_y
2360 CALL instate_communicators_for_domain( id )
2362 END SUBROUTINE push_communicators_for_domain
2363 SUBROUTINE pop_communicators_for_domain
2366 ! Only required for distrbuted memory parallel runs
2367 #if ( defined( DM_PARALLEL ) && ( ! defined( STUBMPI ) ) )
2368 IF ( communicator_stack_cursor .LT. 1 ) CALL wrf_error_fatal("pop_communicators_for_domain on empty stack")
2369 current_id = id_stack(communicator_stack_cursor)
2370 local_communicator = local_communicator_stack( communicator_stack_cursor )
2371 local_communicator_periodic = local_communicator_periodic_stack( communicator_stack_cursor )
2372 local_iocommunicator = local_iocommunicator_stack( communicator_stack_cursor )
2373 local_communicator_x = local_communicator_x_stack( communicator_stack_cursor )
2374 local_communicator_y = local_communicator_y_stack( communicator_stack_cursor )
2375 ntasks = ntasks_stack( communicator_stack_cursor )
2376 ntasks_y = ntasks_y_stack( communicator_stack_cursor )
2377 ntasks_x = ntasks_x_stack( communicator_stack_cursor )
2378 mytask = mytask_stack( communicator_stack_cursor )
2379 mytask_x = mytask_x_stack( communicator_stack_cursor )
2380 mytask_y = mytask_y_stack( communicator_stack_cursor )
2381 communicator_stack_cursor = communicator_stack_cursor - 1
2383 END SUBROUTINE pop_communicators_for_domain
2384 SUBROUTINE instate_communicators_for_domain( id )
2386 ! Only required for distrbuted memory parallel runs
2387 #if ( defined( DM_PARALLEL ) && ( ! defined( STUBMPI ) ) )
2389 INTEGER, INTENT(IN) :: id
2392 local_communicator = local_communicator_store( id )
2393 local_communicator_periodic = local_communicator_periodic_store( id )
2394 local_iocommunicator = local_iocommunicator_store( id )
2395 local_communicator_x = local_communicator_x_store( id )
2396 local_communicator_y = local_communicator_y_store( id )
2397 ntasks = ntasks_store( id )
2398 mytask = mytask_store( id )
2399 ntasks_x = ntasks_x_store( id )
2400 ntasks_y = ntasks_y_store( id )
2401 mytask_x = mytask_x_store( id )
2402 mytask_y = mytask_y_store( id )
2404 END SUBROUTINE instate_communicators_for_domain
2405 SUBROUTINE store_communicators_for_domain( id )
2407 ! Only required for distrbuted memory parallel runs
2408 #if ( defined( DM_PARALLEL ) && ( ! defined( STUBMPI ) ) )
2410 INTEGER, INTENT(IN) :: id
2411 local_communicator_store( id ) = local_communicator
2412 local_communicator_periodic_store( id ) = local_communicator_periodic
2413 local_iocommunicator_store( id ) = local_iocommunicator
2414 local_communicator_x_store( id ) = local_communicator_x
2415 local_communicator_y_store( id ) = local_communicator_y
2416 ntasks_store( id ) = ntasks
2417 ntasks_x_store( id ) = ntasks_x
2418 ntasks_y_store( id ) = ntasks_y
2419 mytask_store( id ) = mytask
2420 mytask_x_store( id ) = mytask_x
2421 mytask_y_store( id ) = mytask_y
2423 END SUBROUTINE store_communicators_for_domain
2425 !=========================================================================
2426 ! wrf_dm_patch_domain has to be outside the module because it is called
2427 ! by a routine in module_domain but depends on module domain
2429 SUBROUTINE wrf_dm_patch_domain ( id , domdesc , parent_id , parent_domdesc , &
2430 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
2431 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
2432 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
2433 sp1x , ep1x , sm1x , em1x , &
2434 sp2x , ep2x , sm2x , em2x , &
2435 sp3x , ep3x , sm3x , em3x , &
2436 sp1y , ep1y , sm1y , em1y , &
2437 sp2y , ep2y , sm2y , em2y , &
2438 sp3y , ep3y , sm3y , em3y , &
2440 USE module_domain, ONLY : domain, head_grid, find_grid_by_id
2441 USE module_dm, ONLY : patch_domain_rsl_lite !, push_communicators_for_domain, pop_communicators_for_domain
2444 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
2445 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
2446 sm1 , em1 , sm2 , em2 , sm3 , em3
2447 INTEGER :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
2448 sm1x , em1x , sm2x , em2x , sm3x , em3x
2449 INTEGER :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
2450 sm1y , em1y , sm2y , em2y , sm3y , em3y
2451 INTEGER, INTENT(INOUT):: id , domdesc , parent_id , parent_domdesc
2453 TYPE(domain), POINTER :: parent
2454 TYPE(domain), POINTER :: grid_ptr
2456 ! this is necessary because we cannot pass parent directly into
2457 ! wrf_dm_patch_domain because creating the correct interface definitions
2458 ! would generate a circular USE reference between module_domain and module_dm
2459 ! see comment this date in module_domain for more information. JM 20020416
2462 grid_ptr => head_grid
2463 CALL find_grid_by_id( parent_id , grid_ptr , parent )
2465 CALL push_communicators_for_domain(id)
2467 CALL patch_domain_rsl_lite ( id , parent, parent_id , &
2468 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
2469 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
2470 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
2471 sp1x , ep1x , sm1x , em1x , &
2472 sp2x , ep2x , sm2x , em2x , &
2473 sp3x , ep3x , sm3x , em3x , &
2474 sp1y , ep1y , sm1y , em1y , &
2475 sp2y , ep2y , sm2y , em2y , &
2476 sp3y , ep3y , sm3y , em3y , &
2479 CALL pop_communicators_for_domain
2482 END SUBROUTINE wrf_dm_patch_domain
2484 SUBROUTINE wrf_termio_dup( comm )
2486 INTEGER, INTENT(IN) :: comm
2487 INTEGER mytask, ntasks
2491 CALL mpi_comm_size(comm, ntasks, ierr )
2492 CALL mpi_comm_rank(comm, mytask, ierr )
2493 write(0,*)'starting wrf task ',mytask,' of ',ntasks
2494 CALL rsl_error_dup1( mytask, ntasks )
2499 END SUBROUTINE wrf_termio_dup
2501 SUBROUTINE wrf_get_myproc( myproc )
2502 USE module_dm , ONLY : mytask
2507 END SUBROUTINE wrf_get_myproc
2509 SUBROUTINE wrf_get_nproc( nproc )
2510 USE module_dm , ONLY : ntasks
2515 END SUBROUTINE wrf_get_nproc
2517 SUBROUTINE wrf_get_nprocx( nprocx )
2518 USE module_dm , ONLY : ntasks_x
2523 END SUBROUTINE wrf_get_nprocx
2525 SUBROUTINE wrf_get_nprocy( nprocy )
2526 USE module_dm , ONLY : ntasks_y
2531 END SUBROUTINE wrf_get_nprocy
2533 SUBROUTINE wrf_dm_bcast_bytes ( buf , size )
2534 USE module_dm , ONLY : local_communicator
2543 CHARACTER*1 BUF(size)
2546 CALL BYTE_BCAST ( buf , size, local_communicator )
2549 END SUBROUTINE wrf_dm_bcast_bytes
2551 SUBROUTINE wrf_dm_bcast_string( BUF, N1 )
2555 ! Collective operation. Given a string and a size in characters on task zero, broadcast and return that buffer on all tasks.
2560 INTEGER ibuf(256),i,n
2563 ! Root task is required to have the correct value of N1, other tasks
2564 ! might not have the correct value.
2565 CALL wrf_dm_bcast_integer( n , 1 )
2566 IF (n .GT. 256) n = 256
2569 ibuf(I) = ichar(buf(I:I))
2571 CALL wrf_dm_bcast_integer( ibuf, n )
2574 buf(i:i) = char(ibuf(i))
2579 END SUBROUTINE wrf_dm_bcast_string
2581 SUBROUTINE wrf_dm_bcast_string_comm( BUF, N1, COMM )
2586 ! Collective operation. Given a string and a size in characters on task zero, broadcast and return that buffer on all tasks.
2591 INTEGER ibuf(256),i,n
2594 ! Root task is required to have the correct value of N1, other tasks
2595 ! might not have the correct value.
2596 CALL BYTE_BCAST( n, IWORDSIZE, COMM )
2597 IF (n .GT. 256) n = 256
2600 ibuf(I) = ichar(buf(I:I))
2602 CALL BYTE_BCAST( ibuf, N*IWORDSIZE, COMM )
2605 buf(i:i) = char(ibuf(i))
2610 END SUBROUTINE wrf_dm_bcast_string_comm
2612 SUBROUTINE wrf_dm_bcast_integer( BUF, N1 )
2616 CALL wrf_dm_bcast_bytes ( BUF , N1 * IWORDSIZE )
2618 END SUBROUTINE wrf_dm_bcast_integer
2620 SUBROUTINE wrf_dm_bcast_double( BUF, N1 )
2623 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
2624 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
2625 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
2626 ! since we were not indexing the globbuf and Field arrays it does not matter
2628 CALL wrf_dm_bcast_bytes ( BUF , N1 * DWORDSIZE )
2630 END SUBROUTINE wrf_dm_bcast_double
2632 SUBROUTINE wrf_dm_bcast_real( BUF, N1 )
2636 CALL wrf_dm_bcast_bytes ( BUF , N1 * RWORDSIZE )
2638 END SUBROUTINE wrf_dm_bcast_real
2640 SUBROUTINE wrf_dm_bcast_logical( BUF, N1 )
2644 CALL wrf_dm_bcast_bytes ( BUF , N1 * LWORDSIZE )
2646 END SUBROUTINE wrf_dm_bcast_logical
2648 SUBROUTINE write_68( grid, v , s , &
2649 ids, ide, jds, jde, kds, kde, &
2650 ims, ime, jms, jme, kms, kme, &
2651 its, ite, jts, jte, kts, kte )
2652 USE module_domain, ONLY : domain
2654 TYPE(domain) , INTENT (INOUT) :: grid
2656 INTEGER ids, ide, jds, jde, kds, kde, &
2657 ims, ime, jms, jme, kms, kme, &
2658 its, ite, jts, jte, kts, kte
2659 REAL, DIMENSION( ims:ime , kms:kme, jms:jme ) :: v
2663 logical, external :: wrf_dm_on_monitor
2664 real globbuf( ids:ide, kds:kde, jds:jde )
2665 character*3 ord, stag
2667 if ( kds == kde ) then
2670 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
2671 ids, ide, jds, jde, kds, kde, &
2672 ims, ime, jms, jme, kms, kme, &
2673 its, ite, jts, jte, kts, kte )
2678 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
2679 ids, ide, kds, kde, jds, jde, &
2680 ims, ime, kms, kme, jms, jme, &
2681 its, ite, kts, kte, jts, jte )
2685 if ( wrf_dm_on_monitor() ) THEN
2686 WRITE(68,*) ide-ids+1, jde-jds+1 , s
2689 WRITE(68,*) globbuf(i,1,j)
2697 SUBROUTINE wrf_abort
2699 #if ( DA_CORE != 1 )
2700 USE module_cpl, ONLY : coupler_on, cpl_abort
2707 #if ( DA_CORE != 1 )
2708 IF ( coupler_on ) THEN
2709 CALL cpl_abort( 'wrf_abort', 'look for abort message in rsl* files' )
2712 CALL mpi_abort(MPI_COMM_WORLD,1,ierr)
2713 #if ( DA_CORE != 1 )
2719 END SUBROUTINE wrf_abort
2721 SUBROUTINE wrf_dm_shutdown
2725 CALL MPI_FINALIZE( ierr )
2728 END SUBROUTINE wrf_dm_shutdown
2730 LOGICAL FUNCTION wrf_dm_on_monitor()
2734 INTEGER tsk, ierr, mpi_comm_local
2735 CALL wrf_get_dm_communicator( mpi_comm_local )
2736 IF ( mpi_comm_local .NE. MPI_UNDEFINED ) THEN
2737 CALL mpi_comm_rank ( mpi_comm_local, tsk , ierr )
2738 wrf_dm_on_monitor = tsk .EQ. 0
2740 wrf_dm_on_monitor = .FALSE.
2743 wrf_dm_on_monitor = .TRUE.
2746 END FUNCTION wrf_dm_on_monitor
2748 SUBROUTINE rsl_comm_iter_init(shw,ps,pe)
2750 INTEGER iter, plus_send_start, plus_recv_start, &
2751 minus_send_start, minus_recv_start
2752 COMMON /rcii/ iter, plus_send_start, plus_recv_start, &
2753 minus_send_start, minus_recv_start
2755 minus_send_start = ps
2756 minus_recv_start = ps-1
2757 plus_send_start = pe
2758 plus_recv_start = pe+1
2759 END SUBROUTINE rsl_comm_iter_init
2761 LOGICAL FUNCTION rsl_comm_iter ( id , is_intermediate, &
2762 shw , xy , ds, de_in, ps, pe, nds,nde, &
2763 sendbeg_m, sendw_m, sendbeg_p, sendw_p, &
2764 recvbeg_m, recvw_m, recvbeg_p, recvw_p )
2765 USE module_dm, ONLY : ntasks_x, ntasks_y, mytask_x, mytask_y, minx, miny, &
2766 nest_pes_x, nest_pes_y
2768 INTEGER, INTENT(IN) :: id,shw,xy,ds,de_in,ps,pe,nds,nde
2769 LOGICAL, INTENT(IN) :: is_intermediate ! treated differently, coarse but with same decomp as nest
2770 INTEGER, INTENT(OUT) :: sendbeg_m, sendw_m, sendbeg_p, sendw_p
2771 INTEGER, INTENT(OUT) :: recvbeg_m, recvw_m, recvbeg_p, recvw_p
2772 INTEGER k, kn, ni, nj, de, Px, Py, nt, ntx, nty, me, lb, ub, ierr
2775 INTEGER iter, plus_send_start, plus_recv_start, &
2776 minus_send_start, minus_recv_start
2777 INTEGER parent_grid_ratio, parent_start
2778 COMMON /rcii/ iter, plus_send_start, plus_recv_start, &
2779 minus_send_start, minus_recv_start
2782 ntx = nest_pes_x(id)
2783 nty = nest_pes_y(id)
2784 IF ( xy .EQ. 1 ) THEN ! X/I axis
2787 dum = 2 * nty ! dummy dimension length for tfp to decompose without getting div 0
2788 IF ( is_intermediate ) THEN
2789 CALL nl_get_i_parent_start(id,parent_start)
2790 CALL nl_get_parent_grid_ratio(id,parent_grid_ratio)
2795 dum = 2 * ntx ! dummy dimension length for tfp to decompose without getting div 0
2796 IF ( is_intermediate ) THEN
2797 CALL nl_get_j_parent_start(id,parent_start)
2798 CALL nl_get_parent_grid_ratio(id,parent_grid_ratio)
2808 IF ( me .GT. 0 ) THEN
2809 lb = minus_send_start
2813 IF ( xy .eq. 1 ) THEN
2814 IF ( is_intermediate ) THEN
2815 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2816 CALL task_for_point (kn,1,nds,nde,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2817 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (h)')
2819 CALL task_for_point (k,1,ds,de,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2820 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (i)')
2822 IF ( Px .NE. me+(iter-1) ) THEN
2826 IF ( is_intermediate ) THEN
2827 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2828 CALL task_for_point (1,kn,1,dum,nds,nde,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2829 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (h)')
2831 CALL task_for_point (1,k,1,dum,ds,de,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2832 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (i)')
2834 IF ( Py .NE. me+(iter-1) ) THEN
2838 minus_send_start = minus_send_start+1
2839 sendw_m = sendw_m + 1
2845 IF ( me .GT. 0 ) THEN
2846 ub = minus_recv_start
2848 DO k = minus_recv_start,ps-shw,-1
2850 IF ( xy .eq. 1 ) THEN
2851 IF ( is_intermediate ) THEN
2852 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2853 CALL task_for_point (kn,1,nds,nde,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2854 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (j)')
2856 CALL task_for_point (k,1,ds,de,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2857 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (k)')
2859 IF ( Px .NE. me-iter ) THEN
2863 IF ( is_intermediate ) THEN
2864 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2865 CALL task_for_point (1,kn,1,dum,nds,nde,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2866 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (j)')
2868 CALL task_for_point (1,k,1,dum,ds,de,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2869 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (k)')
2871 IF ( Py .NE. me-iter ) THEN
2875 minus_recv_start = minus_recv_start-1
2876 recvw_m = recvw_m + 1
2883 IF ( ( xy .eq. 1 .and. me .LT. ntx-1 ) .OR. ( xy .eq. 0 .and. me .LT. nty-1 ) ) THEN
2884 ub = plus_send_start
2885 sendbeg_p = pe - ub + 1
2886 DO k = ub,pe-shw+1,-1
2888 IF ( xy .eq. 1 ) THEN
2889 IF ( is_intermediate ) THEN
2890 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2891 CALL task_for_point (kn,1,nds,nde,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2892 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (l)')
2894 CALL task_for_point (k,1,ds,de,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2895 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (m)')
2897 IF ( Px .NE. me-(iter-1) ) THEN
2901 IF ( is_intermediate ) THEN
2902 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2903 CALL task_for_point (1,kn,1,dum,nds,nde,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2904 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (l)')
2906 CALL task_for_point (1,k,1,dum,ds,de,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2907 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (m)')
2909 IF ( Py .NE. me-(iter-1) ) THEN
2913 plus_send_start = plus_send_start - 1
2914 sendw_p = sendw_p + 1
2920 IF ( ( xy .eq. 1 .and. me .LT. ntx-1 ) .OR. ( xy .eq. 0 .and. me .LT. nty-1 ) ) THEN
2921 lb = plus_recv_start
2925 IF ( xy .eq. 1 ) THEN
2926 IF ( is_intermediate ) THEN
2927 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2928 CALL task_for_point (kn,1,nds,nde,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2929 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (n)')
2932 CALL task_for_point (k,1,ds,de,1,dum,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2934 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (o)')
2936 IF ( Px .NE. me+iter ) THEN
2940 IF ( is_intermediate ) THEN
2941 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2942 CALL task_for_point (1,kn,1,dum,nds,nde,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2943 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (n)')
2945 CALL task_for_point (1,k,1,dum,ds,de,ntx,nty,Px,Py,minx,miny,ierr) ! modified to treat x and y separately
2946 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('error code returned by task_for_point in module_dm.F (o)')
2948 IF ( Py .NE. me+iter ) THEN
2952 plus_recv_start = plus_recv_start + 1
2953 recvw_p = recvw_p + 1
2957 if ( iter .eq. 1 ) then
2962 sendw_m = 0 ; sendw_p = 0 ; recvw_m = 0 ; recvw_p = 0
2963 sendbeg_m = 1 ; if ( me .GT. 0 ) sendw_m = shw ;
2964 sendbeg_p = 1 ; if ( me .LT. nt-1 ) sendw_p = shw
2965 recvbeg_m = 1 ; if ( me .GT. 0 ) recvw_m = shw ;
2966 recvbeg_p = 1 ; if ( me .LT. nt-1 ) recvw_p = shw ;
2968 ! write(0,*)'shw ', shw , ' xy ',xy
2969 ! write(0,*)' ds, de, ps, pe, nds,nde ',ds, de, ps, pe, nds,nde
2970 ! write(0,*)'sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p '
2971 ! write(0,*)sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p
2974 ! write(0,*)'shw ', shw , ' xy ',xy,' plus_send_start ',plus_send_start,' minus_send_start ', minus_send_start
2975 ! write(0,*)' ds, de, ps, pe, nds,nde ',ds, de, ps, pe, nds,nde
2976 ! write(0,*)'sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p '
2977 ! write(0,*)sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p
2979 rsl_comm_iter = went
2980 END FUNCTION rsl_comm_iter
2982 INTEGER FUNCTION wrf_dm_monitor_rank()
2984 wrf_dm_monitor_rank = 0
2986 END FUNCTION wrf_dm_monitor_rank
2988 ! return the global communicator if id <= 0
2989 SUBROUTINE wrf_get_dm_communicator_for_id ( id, communicator )
2990 USE module_dm , ONLY : local_communicator_store, mpi_comm_allcompute
2992 INTEGER , INTENT(IN) :: id
2993 INTEGER , INTENT(OUT) :: communicator
2994 IF ( id .le. 0 ) THEN
2995 communicator = mpi_comm_allcompute
2997 communicator = local_communicator_store(id)
3000 END SUBROUTINE wrf_get_dm_communicator_for_id
3002 SUBROUTINE wrf_get_dm_communicator ( communicator )
3003 USE module_dm , ONLY : local_communicator
3005 INTEGER , INTENT(OUT) :: communicator
3006 communicator = local_communicator
3008 END SUBROUTINE wrf_get_dm_communicator
3010 SUBROUTINE wrf_get_dm_communicator_x ( communicator )
3011 USE module_dm , ONLY : local_communicator_x
3013 INTEGER , INTENT(OUT) :: communicator
3014 communicator = local_communicator_x
3016 END SUBROUTINE wrf_get_dm_communicator_x
3018 SUBROUTINE wrf_get_dm_communicator_y ( communicator )
3019 USE module_dm , ONLY : local_communicator_y
3021 INTEGER , INTENT(OUT) :: communicator
3022 communicator = local_communicator_y
3024 END SUBROUTINE wrf_get_dm_communicator_y
3026 SUBROUTINE wrf_get_dm_iocommunicator ( iocommunicator )
3027 USE module_dm , ONLY : local_iocommunicator
3029 INTEGER , INTENT(OUT) :: iocommunicator
3030 iocommunicator = local_iocommunicator
3032 END SUBROUTINE wrf_get_dm_iocommunicator
3034 SUBROUTINE wrf_set_dm_communicator ( communicator )
3035 USE module_dm , ONLY : local_communicator
3037 INTEGER , INTENT(IN) :: communicator
3038 local_communicator = communicator
3040 END SUBROUTINE wrf_set_dm_communicator
3042 SUBROUTINE wrf_set_dm_iocommunicator ( iocommunicator )
3043 USE module_dm , ONLY : local_iocommunicator
3045 INTEGER , INTENT(IN) :: iocommunicator
3046 local_iocommunicator = iocommunicator
3048 END SUBROUTINE wrf_set_dm_iocommunicator
3050 SUBROUTINE wrf_get_dm_ntasks_x ( retval )
3051 USE module_dm , ONLY : ntasks_x
3053 INTEGER , INTENT(OUT) :: retval
3056 END SUBROUTINE wrf_get_dm_ntasks_x
3058 SUBROUTINE wrf_get_dm_ntasks_y ( retval )
3059 USE module_dm , ONLY : ntasks_y
3061 INTEGER , INTENT(OUT) :: retval
3064 END SUBROUTINE wrf_get_dm_ntasks_y
3067 SUBROUTINE wrf_set_dm_quilt_comm ( communicator )
3068 USE module_dm , ONLY : local_quilt_comm
3070 INTEGER , INTENT(IN) :: communicator
3071 local_quilt_comm = communicator
3073 END SUBROUTINE wrf_set_dm_quilt_comm
3075 SUBROUTINE wrf_get_dm_quilt_comm ( communicator )
3076 USE module_dm , ONLY : local_quilt_comm
3078 INTEGER , INTENT(OUT) :: communicator
3079 communicator = local_quilt_comm
3081 END SUBROUTINE wrf_get_dm_quilt_comm
3084 !!!!!!!!!!!!!!!!!!!!!!! PATCH TO GLOBAL !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
3086 SUBROUTINE wrf_patch_to_global_real (buf,globbuf,domdesc,stagger,ordering,&
3087 DS1,DE1,DS2,DE2,DS3,DE3,&
3088 MS1,ME1,MS2,ME2,MS3,ME3,&
3089 PS1,PE1,PS2,PE2,PS3,PE3 )
3091 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3092 MS1,ME1,MS2,ME2,MS3,ME3,&
3093 PS1,PE1,PS2,PE2,PS3,PE3
3094 CHARACTER *(*) stagger,ordering
3099 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,RWORDSIZE,&
3100 DS1,DE1,DS2,DE2,DS3,DE3,&
3101 MS1,ME1,MS2,ME2,MS3,ME3,&
3102 PS1,PE1,PS2,PE2,PS3,PE3 )
3105 END SUBROUTINE wrf_patch_to_global_real
3107 SUBROUTINE wrf_patch_to_global_double (buf,globbuf,domdesc,stagger,ordering,&
3108 DS1,DE1,DS2,DE2,DS3,DE3,&
3109 MS1,ME1,MS2,ME2,MS3,ME3,&
3110 PS1,PE1,PS2,PE2,PS3,PE3 )
3112 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3113 MS1,ME1,MS2,ME2,MS3,ME3,&
3114 PS1,PE1,PS2,PE2,PS3,PE3
3115 CHARACTER *(*) stagger,ordering
3117 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
3118 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
3119 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
3120 ! since we were not indexing the globbuf and Field arrays it does not matter
3124 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,DWORDSIZE,&
3125 DS1,DE1,DS2,DE2,DS3,DE3,&
3126 MS1,ME1,MS2,ME2,MS3,ME3,&
3127 PS1,PE1,PS2,PE2,PS3,PE3 )
3130 END SUBROUTINE wrf_patch_to_global_double
3133 SUBROUTINE wrf_patch_to_global_integer (buf,globbuf,domdesc,stagger,ordering,&
3134 DS1,DE1,DS2,DE2,DS3,DE3,&
3135 MS1,ME1,MS2,ME2,MS3,ME3,&
3136 PS1,PE1,PS2,PE2,PS3,PE3 )
3138 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3139 MS1,ME1,MS2,ME2,MS3,ME3,&
3140 PS1,PE1,PS2,PE2,PS3,PE3
3141 CHARACTER *(*) stagger,ordering
3146 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,IWORDSIZE,&
3147 DS1,DE1,DS2,DE2,DS3,DE3,&
3148 MS1,ME1,MS2,ME2,MS3,ME3,&
3149 PS1,PE1,PS2,PE2,PS3,PE3 )
3152 END SUBROUTINE wrf_patch_to_global_integer
3155 SUBROUTINE wrf_patch_to_global_logical (buf,globbuf,domdesc,stagger,ordering,&
3156 DS1,DE1,DS2,DE2,DS3,DE3,&
3157 MS1,ME1,MS2,ME2,MS3,ME3,&
3158 PS1,PE1,PS2,PE2,PS3,PE3 )
3160 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3161 MS1,ME1,MS2,ME2,MS3,ME3,&
3162 PS1,PE1,PS2,PE2,PS3,PE3
3163 CHARACTER *(*) stagger,ordering
3168 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,LWORDSIZE,&
3169 DS1,DE1,DS2,DE2,DS3,DE3,&
3170 MS1,ME1,MS2,ME2,MS3,ME3,&
3171 PS1,PE1,PS2,PE2,PS3,PE3 )
3174 END SUBROUTINE wrf_patch_to_global_logical
3177 # define FRSTELEM (1)
3182 SUBROUTINE wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,typesize,&
3183 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
3184 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
3185 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
3186 USE module_driver_constants
3188 USE module_wrf_error, ONLY : wrf_at_debug_level
3189 USE module_dm, ONLY : local_communicator, ntasks
3192 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
3193 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
3194 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
3195 CHARACTER *(*) stagger,ordering
3196 INTEGER domdesc,typesize,ierr
3200 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3201 MS1,ME1,MS2,ME2,MS3,ME3,&
3202 PS1,PE1,PS2,PE2,PS3,PE3
3203 INTEGER ids,ide,jds,jde,kds,kde,&
3204 ims,ime,jms,jme,kms,kme,&
3205 ips,ipe,jps,jpe,kps,kpe
3206 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
3208 INTEGER i, j, k, ndim
3209 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
3210 ! allocated further down, after the D indices are potentially recalculated for staggering
3211 REAL, ALLOCATABLE :: tmpbuf( : )
3212 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
3214 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
3215 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
3216 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
3218 SELECT CASE ( TRIM(ordering) )
3222 ndim = 3 ! where appropriate
3225 SELECT CASE ( TRIM(ordering) )
3227 ! the non-staggered variables come in at one-less than
3228 ! domain dimensions, but code wants full domain spec, so
3229 ! adjust if not staggered
3230 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
3231 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
3232 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
3234 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
3235 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
3236 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
3238 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
3239 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
3240 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
3242 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
3243 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
3244 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
3248 ! moved to here to be after the potential recalculations of D dims
3249 IF ( wrf_dm_on_monitor() ) THEN
3250 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
3252 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
3254 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_patch_to_global_generic')
3256 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
3257 Patch(2,1) = ps2 ; Patch(2,2) = pe2
3258 Patch(3,1) = ps3 ; Patch(3,2) = pe3
3260 IF ( typesize .EQ. RWORDSIZE ) THEN
3261 CALL just_patch_r ( buf , locbuf , size(locbuf)*RWORDSIZE/typesize, &
3262 PS1, PE1, PS2, PE2, PS3, PE3 , &
3263 MS1, ME1, MS2, ME2, MS3, ME3 )
3264 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
3265 CALL just_patch_i ( buf , locbuf , size(locbuf)*RWORDSIZE/typesize, &
3266 PS1, PE1, PS2, PE2, PS3, PE3 , &
3267 MS1, ME1, MS2, ME2, MS3, ME3 )
3268 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
3269 CALL just_patch_d ( buf , locbuf , size(locbuf)*RWORDSIZE/typesize, &
3270 PS1, PE1, PS2, PE2, PS3, PE3 , &
3271 MS1, ME1, MS2, ME2, MS3, ME3 )
3272 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
3273 CALL just_patch_l ( buf , locbuf , size(locbuf)*RWORDSIZE/typesize, &
3274 PS1, PE1, PS2, PE2, PS3, PE3 , &
3275 MS1, ME1, MS2, ME2, MS3, ME3 )
3278 ! defined in external/io_quilt
3279 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
3283 CALL collect_on_comm0 ( local_communicator , typesize , &
3284 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1), &
3285 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) )
3287 ndim = len(TRIM(ordering))
3289 IF ( wrf_at_debug_level(500) ) THEN
3293 IF ( ndim .GE. 2 .AND. wrf_dm_on_monitor() ) THEN
3295 IF ( typesize .EQ. RWORDSIZE ) THEN
3296 CALL patch_2_outbuf_r ( tmpbuf FRSTELEM , globbuf , &
3297 DS1, DE1, DS2, DE2, DS3, DE3 , &
3299 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
3300 CALL patch_2_outbuf_i ( tmpbuf FRSTELEM , globbuf , &
3301 DS1, DE1, DS2, DE2, DS3, DE3 , &
3303 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
3304 CALL patch_2_outbuf_d ( tmpbuf FRSTELEM , globbuf , &
3305 DS1, DE1, DS2, DE2, DS3, DE3 , &
3307 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
3308 CALL patch_2_outbuf_l ( tmpbuf FRSTELEM , globbuf , &
3309 DS1, DE1, DS2, DE2, DS3, DE3 , &
3315 IF ( wrf_at_debug_level(500) ) THEN
3316 CALL end_timing('wrf_patch_to_global_generic')
3318 DEALLOCATE( tmpbuf )
3321 END SUBROUTINE wrf_patch_to_global_generic
3323 SUBROUTINE just_patch_i ( inbuf , outbuf, noutbuf, &
3324 PS1,PE1,PS2,PE2,PS3,PE3, &
3325 MS1,ME1,MS2,ME2,MS3,ME3 )
3327 INTEGER , INTENT(IN) :: noutbuf
3328 INTEGER , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
3329 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3330 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3331 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(IN) :: inbuf
3333 INTEGER :: i,j,k,n , icurs
3338 outbuf( icurs ) = inbuf( i, j, k )
3344 END SUBROUTINE just_patch_i
3346 SUBROUTINE just_patch_r ( inbuf , outbuf, noutbuf, &
3347 PS1,PE1,PS2,PE2,PS3,PE3, &
3348 MS1,ME1,MS2,ME2,MS3,ME3 )
3350 INTEGER , INTENT(IN) :: noutbuf
3351 REAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
3352 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3353 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3354 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
3356 INTEGER :: i,j,k , icurs
3361 outbuf( icurs ) = inbuf( i, j, k )
3367 END SUBROUTINE just_patch_r
3369 SUBROUTINE just_patch_d ( inbuf , outbuf, noutbuf, &
3370 PS1,PE1,PS2,PE2,PS3,PE3, &
3371 MS1,ME1,MS2,ME2,MS3,ME3 )
3373 INTEGER , INTENT(IN) :: noutbuf
3374 DOUBLE PRECISION , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
3375 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3376 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3377 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
3379 INTEGER :: i,j,k,n , icurs
3384 outbuf( icurs ) = inbuf( i, j, k )
3390 END SUBROUTINE just_patch_d
3392 SUBROUTINE just_patch_l ( inbuf , outbuf, noutbuf, &
3393 PS1,PE1,PS2,PE2,PS3,PE3, &
3394 MS1,ME1,MS2,ME2,MS3,ME3 )
3396 INTEGER , INTENT(IN) :: noutbuf
3397 LOGICAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
3398 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3399 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3400 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
3402 INTEGER :: i,j,k,n , icurs
3407 outbuf( icurs ) = inbuf( i, j, k )
3413 END SUBROUTINE just_patch_l
3416 SUBROUTINE patch_2_outbuf_r( inbuf, outbuf, &
3417 DS1,DE1,DS2,DE2,DS3,DE3, &
3419 USE module_dm, ONLY : ntasks
3421 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
3422 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3423 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
3425 INTEGER :: i,j,k,n , icurs
3428 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3429 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3430 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3431 outbuf( i, j, k ) = inbuf( icurs )
3439 END SUBROUTINE patch_2_outbuf_r
3441 SUBROUTINE patch_2_outbuf_i( inbuf, outbuf, &
3442 DS1,DE1,DS2,DE2,DS3,DE3,&
3444 USE module_dm, ONLY : ntasks
3446 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
3447 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3448 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
3450 INTEGER :: i,j,k,n , icurs
3453 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3454 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3455 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3456 outbuf( i, j, k ) = inbuf( icurs )
3463 END SUBROUTINE patch_2_outbuf_i
3465 SUBROUTINE patch_2_outbuf_d( inbuf, outbuf, &
3466 DS1,DE1,DS2,DE2,DS3,DE3,&
3468 USE module_dm, ONLY : ntasks
3470 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
3471 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3472 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
3474 INTEGER :: i,j,k,n , icurs
3477 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3478 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3479 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3480 outbuf( i, j, k ) = inbuf( icurs )
3487 END SUBROUTINE patch_2_outbuf_d
3489 SUBROUTINE patch_2_outbuf_l( inbuf, outbuf, &
3490 DS1,DE1,DS2,DE2,DS3,DE3,&
3492 USE module_dm, ONLY : ntasks
3494 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
3495 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3496 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
3498 INTEGER :: i,j,k,n , icurs
3501 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3502 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3503 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3504 outbuf( i, j, k ) = inbuf( icurs )
3511 END SUBROUTINE patch_2_outbuf_l
3513 !!!!!!!!!!!!!!!!!!!!!!! GLOBAL TO PATCH !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
3515 SUBROUTINE wrf_global_to_patch_real (globbuf,buf,domdesc,stagger,ordering,&
3516 DS1,DE1,DS2,DE2,DS3,DE3,&
3517 MS1,ME1,MS2,ME2,MS3,ME3,&
3518 PS1,PE1,PS2,PE2,PS3,PE3 )
3520 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3521 MS1,ME1,MS2,ME2,MS3,ME3,&
3522 PS1,PE1,PS2,PE2,PS3,PE3
3523 CHARACTER *(*) stagger,ordering
3528 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,RWORDSIZE,&
3529 DS1,DE1,DS2,DE2,DS3,DE3,&
3530 MS1,ME1,MS2,ME2,MS3,ME3,&
3531 PS1,PE1,PS2,PE2,PS3,PE3 )
3533 END SUBROUTINE wrf_global_to_patch_real
3535 SUBROUTINE wrf_global_to_patch_double (globbuf,buf,domdesc,stagger,ordering,&
3536 DS1,DE1,DS2,DE2,DS3,DE3,&
3537 MS1,ME1,MS2,ME2,MS3,ME3,&
3538 PS1,PE1,PS2,PE2,PS3,PE3 )
3540 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3541 MS1,ME1,MS2,ME2,MS3,ME3,&
3542 PS1,PE1,PS2,PE2,PS3,PE3
3543 CHARACTER *(*) stagger,ordering
3545 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
3546 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
3547 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
3548 ! since we were not indexing the globbuf and Field arrays it does not matter
3552 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,DWORDSIZE,&
3553 DS1,DE1,DS2,DE2,DS3,DE3,&
3554 MS1,ME1,MS2,ME2,MS3,ME3,&
3555 PS1,PE1,PS2,PE2,PS3,PE3 )
3557 END SUBROUTINE wrf_global_to_patch_double
3560 SUBROUTINE wrf_global_to_patch_integer (globbuf,buf,domdesc,stagger,ordering,&
3561 DS1,DE1,DS2,DE2,DS3,DE3,&
3562 MS1,ME1,MS2,ME2,MS3,ME3,&
3563 PS1,PE1,PS2,PE2,PS3,PE3 )
3565 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3566 MS1,ME1,MS2,ME2,MS3,ME3,&
3567 PS1,PE1,PS2,PE2,PS3,PE3
3568 CHARACTER *(*) stagger,ordering
3573 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,IWORDSIZE,&
3574 DS1,DE1,DS2,DE2,DS3,DE3,&
3575 MS1,ME1,MS2,ME2,MS3,ME3,&
3576 PS1,PE1,PS2,PE2,PS3,PE3 )
3578 END SUBROUTINE wrf_global_to_patch_integer
3580 SUBROUTINE wrf_global_to_patch_logical (globbuf,buf,domdesc,stagger,ordering,&
3581 DS1,DE1,DS2,DE2,DS3,DE3,&
3582 MS1,ME1,MS2,ME2,MS3,ME3,&
3583 PS1,PE1,PS2,PE2,PS3,PE3 )
3585 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3586 MS1,ME1,MS2,ME2,MS3,ME3,&
3587 PS1,PE1,PS2,PE2,PS3,PE3
3588 CHARACTER *(*) stagger,ordering
3593 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,LWORDSIZE,&
3594 DS1,DE1,DS2,DE2,DS3,DE3,&
3595 MS1,ME1,MS2,ME2,MS3,ME3,&
3596 PS1,PE1,PS2,PE2,PS3,PE3 )
3598 END SUBROUTINE wrf_global_to_patch_logical
3600 SUBROUTINE wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,typesize,&
3601 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
3602 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
3603 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
3604 USE module_dm, ONLY : local_communicator, ntasks
3605 USE module_driver_constants
3607 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
3608 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
3609 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
3610 CHARACTER *(*) stagger,ordering
3611 INTEGER domdesc,typesize,ierr
3615 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
3616 MS1,ME1,MS2,ME2,MS3,ME3,&
3617 PS1,PE1,PS2,PE2,PS3,PE3
3618 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
3620 INTEGER i,j,k,ord,ord2d,ndim
3621 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
3622 REAL, ALLOCATABLE :: tmpbuf( : )
3623 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
3625 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
3626 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
3627 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
3629 SELECT CASE ( TRIM(ordering) )
3633 ndim = 3 ! where appropriate
3636 SELECT CASE ( TRIM(ordering) )
3638 ! the non-staggered variables come in at one-less than
3639 ! domain dimensions, but code wants full domain spec, so
3640 ! adjust if not staggered
3641 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
3642 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
3643 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
3645 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
3646 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
3647 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
3649 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
3650 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
3651 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
3653 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
3654 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
3655 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
3659 ! moved to here to be after the potential recalculations of D dims
3660 IF ( wrf_dm_on_monitor() ) THEN
3661 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
3663 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
3665 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_global_to_patch_generic')
3667 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
3668 Patch(2,1) = ps2 ; Patch(2,2) = pe2
3669 Patch(3,1) = ps3 ; Patch(3,2) = pe3
3671 ! defined in external/io_quilt
3672 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
3675 ndim = len(TRIM(ordering))
3677 IF ( wrf_dm_on_monitor() .AND. ndim .GE. 2 ) THEN
3678 IF ( typesize .EQ. RWORDSIZE ) THEN
3679 CALL outbuf_2_patch_r ( globbuf , tmpbuf FRSTELEM , &
3680 DS1, DE1, DS2, DE2, DS3, DE3 , &
3681 MS1, ME1, MS2, ME2, MS3, ME3 , &
3683 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
3684 CALL outbuf_2_patch_i ( globbuf , tmpbuf FRSTELEM , &
3685 DS1, DE1, DS2, DE2, DS3, DE3 , &
3687 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
3688 CALL outbuf_2_patch_d ( globbuf , tmpbuf FRSTELEM , &
3689 DS1, DE1, DS2, DE2, DS3, DE3 , &
3691 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
3692 CALL outbuf_2_patch_l ( globbuf , tmpbuf FRSTELEM , &
3693 DS1, DE1, DS2, DE2, DS3, DE3 , &
3698 CALL dist_on_comm0 ( local_communicator , typesize , &
3699 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) , &
3700 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1) )
3702 IF ( typesize .EQ. RWORDSIZE ) THEN
3703 CALL all_sub_r ( locbuf , buf , &
3704 PS1, PE1, PS2, PE2, PS3, PE3 , &
3705 MS1, ME1, MS2, ME2, MS3, ME3 )
3707 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
3708 CALL all_sub_i ( locbuf , buf , &
3709 PS1, PE1, PS2, PE2, PS3, PE3 , &
3710 MS1, ME1, MS2, ME2, MS3, ME3 )
3711 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
3712 CALL all_sub_d ( locbuf , buf , &
3713 PS1, PE1, PS2, PE2, PS3, PE3 , &
3714 MS1, ME1, MS2, ME2, MS3, ME3 )
3715 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
3716 CALL all_sub_l ( locbuf , buf , &
3717 PS1, PE1, PS2, PE2, PS3, PE3 , &
3718 MS1, ME1, MS2, ME2, MS3, ME3 )
3722 DEALLOCATE ( tmpbuf )
3725 END SUBROUTINE wrf_global_to_patch_generic
3727 SUBROUTINE all_sub_i ( inbuf , outbuf, &
3728 PS1,PE1,PS2,PE2,PS3,PE3, &
3729 MS1,ME1,MS2,ME2,MS3,ME3 )
3731 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
3732 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3733 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3734 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
3736 INTEGER :: i,j,k,n , icurs
3741 outbuf( i, j, k ) = inbuf ( icurs )
3747 END SUBROUTINE all_sub_i
3749 SUBROUTINE all_sub_r ( inbuf , outbuf, &
3750 PS1,PE1,PS2,PE2,PS3,PE3, &
3751 MS1,ME1,MS2,ME2,MS3,ME3 )
3753 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
3754 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3755 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3756 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
3758 INTEGER :: i,j,k,n , icurs
3763 outbuf( i, j, k ) = inbuf ( icurs )
3770 END SUBROUTINE all_sub_r
3772 SUBROUTINE all_sub_d ( inbuf , outbuf, &
3773 PS1,PE1,PS2,PE2,PS3,PE3, &
3774 MS1,ME1,MS2,ME2,MS3,ME3 )
3776 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
3777 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3778 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3779 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
3781 INTEGER :: i,j,k,n , icurs
3786 outbuf( i, j, k ) = inbuf ( icurs )
3792 END SUBROUTINE all_sub_d
3794 SUBROUTINE all_sub_l ( inbuf , outbuf, &
3795 PS1,PE1,PS2,PE2,PS3,PE3, &
3796 MS1,ME1,MS2,ME2,MS3,ME3 )
3798 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
3799 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3800 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
3801 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
3803 INTEGER :: i,j,k,n , icurs
3808 outbuf( i, j, k ) = inbuf ( icurs )
3814 END SUBROUTINE all_sub_l
3816 SUBROUTINE outbuf_2_patch_r( inbuf, outbuf, &
3817 DS1,DE1,DS2,DE2,DS3,DE3, &
3818 MS1, ME1, MS2, ME2, MS3, ME3 , &
3820 USE module_dm, ONLY : ntasks
3822 REAL , DIMENSION(*) , INTENT(OUT) :: outbuf
3823 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3824 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
3825 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
3827 INTEGER :: i,j,k,n , icurs
3831 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3832 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3833 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3834 outbuf( icurs ) = inbuf( i,j,k )
3841 END SUBROUTINE outbuf_2_patch_r
3843 SUBROUTINE outbuf_2_patch_i( inbuf, outbuf, &
3844 DS1,DE1,DS2,DE2,DS3,DE3,&
3846 USE module_dm, ONLY : ntasks
3848 INTEGER , DIMENSION(*) , INTENT(OUT) :: outbuf
3849 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3850 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
3852 INTEGER :: i,j,k,n , icurs
3855 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3856 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3857 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3858 outbuf( icurs ) = inbuf( i,j,k )
3865 END SUBROUTINE outbuf_2_patch_i
3867 SUBROUTINE outbuf_2_patch_d( inbuf, outbuf, &
3868 DS1,DE1,DS2,DE2,DS3,DE3,&
3870 USE module_dm, ONLY : ntasks
3872 DOUBLE PRECISION , DIMENSION(*) , INTENT(OUT) :: outbuf
3873 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3874 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
3876 INTEGER :: i,j,k,n , icurs
3879 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3880 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3881 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3882 outbuf( icurs ) = inbuf( i,j,k )
3889 END SUBROUTINE outbuf_2_patch_d
3891 SUBROUTINE outbuf_2_patch_l( inbuf, outbuf, &
3892 DS1,DE1,DS2,DE2,DS3,DE3,&
3894 USE module_dm, ONLY : ntasks
3896 LOGICAL , DIMENSION(*) , INTENT(OUT) :: outbuf
3897 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3898 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
3900 INTEGER :: i,j,k,n , icurs
3903 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3904 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3905 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3906 outbuf( icurs ) = inbuf( i,j,k )
3913 END SUBROUTINE outbuf_2_patch_l
3916 SUBROUTINE wrf_dm_nestexchange_init
3917 CALL rsl_lite_nesting_reset
3918 END SUBROUTINE wrf_dm_nestexchange_init
3921 !------------------------------------------------------------------
3923 #if ( EM_CORE == 1 && DA_CORE != 1 )
3925 !------------------------------------------------------------------
3927 SUBROUTINE force_domain_em_part2 ( grid, ngrid, pgrid, config_flags &
3929 #include "dummy_new_args.inc"
3932 USE module_state_description
3933 USE module_domain, ONLY : domain, get_ijk_from_grid
3934 USE module_configure, ONLY : grid_config_rec_type
3935 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, local_communicator, mytask, &
3936 nest_pes_x, nest_pes_y ! , &
3937 !push_communicators_for_domain,pop_communicators_for_domain
3938 USE module_comm_nesting_dm, ONLY : halo_force_down_sub
3939 USE module_model_constants
3942 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3943 TYPE(domain), POINTER :: ngrid
3944 TYPE(domain), POINTER :: pgrid !KAL added for vertical nesting
3945 #include "dummy_new_decl.inc"
3947 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k,kk
3948 TYPE (grid_config_rec_type) :: config_flags
3950 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3951 cims, cime, cjms, cjme, ckms, ckme, &
3952 cips, cipe, cjps, cjpe, ckps, ckpe
3953 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3954 nims, nime, njms, njme, nkms, nkme, &
3955 nips, nipe, njps, njpe, nkps, nkpe
3956 INTEGER :: ids, ide, jds, jde, kds, kde, &
3957 ims, ime, jms, jme, kms, kme, &
3958 ips, ipe, jps, jpe, kps, kpe
3959 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7,itrace
3960 REAL dummy_xs, dummy_xe, dummy_ys, dummy_ye
3962 !KAL variables for vertical nesting
3963 REAL :: p_top_m , p_surf_m , mu_m , hsca_m , pre_c ,pre_n
3964 REAL, DIMENSION(pgrid%s_vert:pgrid%e_vert) :: alt_w_c
3965 REAL, DIMENSION(pgrid%s_vert:pgrid%e_vert+1) :: alt_u_c
3966 REAL, DIMENSION(ngrid%s_vert:ngrid%e_vert) :: alt_w_n
3967 REAL, DIMENSION(ngrid%s_vert:ngrid%e_vert+1) :: alt_u_n
3969 REAL, DIMENSION(:,:,:), ALLOCATABLE :: p, al
3970 REAL :: pfu, pfd, phm, temp, qvf, qvf1, qvf2
3972 !KAL change this for vertical nesting
3973 ! force_domain_em_part1 packs up the interpolation onto the coarse (vertical) grid
3974 ! therefore the message size is based on the coarse grid number of levels
3975 ! here it is unpacked onto the intermediate grid
3976 CALL get_ijk_from_grid ( pgrid , &
3977 cids, cide, cjds, cjde, ckds, ckde, &
3978 cims, cime, cjms, cjme, ckms, ckme, &
3979 cips, cipe, cjps, cjpe, ckps, ckpe )
3981 !KAL this is the original WRF code
3982 !CALL get_ijk_from_grid ( grid , &
3983 ! cids, cide, cjds, cjde, ckds, ckde, &
3984 ! cims, cime, cjms, cjme, ckms, ckme, &
3985 ! cips, cipe, cjps, cjpe, ckps, ckpe )
3986 CALL get_ijk_from_grid ( ngrid , &
3987 nids, nide, njds, njde, nkds, nkde, &
3988 nims, nime, njms, njme, nkms, nkme, &
3989 nips, nipe, njps, njpe, nkps, nkpe )
3991 nlev = ckde - ckds + 1
3993 #include "nest_interpdown_unpack.inc"
3995 if (ngrid%vert_refine_method .NE. 0) then
3997 !KAL calculating the vertical coordinate for parent and nest grid (code from ndown)
3998 ! assume that the parent and nest have the same p_top value (as in ndown)
4000 !KAL ckde is equal to e_vert of the coarse grid. There are e_vert-1 u points. The coarse 1D grid here is e_vert+1,
4001 ! so it is the e_vert-1 points from the coarse grid, plus a surface point plus a top point. Extrapolation coefficients
4002 ! are used to get the surface and top points to fill out the pro_u_c 1D array of u values from the coarse grid.
4004 hsca_m = 6.7 !KAL scale height of the atmosphere
4005 p_top_m = ngrid%p_top
4007 mu_m = p_surf_m - p_top_m
4010 pre_c = mu_m * pgrid%c3f(k) + p_top_m + pgrid%c4f(k)
4011 alt_w_c(k) = -hsca_m * alog(pre_c/p_surf_m)
4014 pre_c = mu_m * pgrid%c3h(k) + p_top_m + pgrid%c4h(k)
4015 alt_u_c(k+1) = -hsca_m * alog(pre_c/p_surf_m)
4017 alt_u_c(1) = alt_w_c(1)
4018 alt_u_c(ckde+1) = alt_w_c(ckde)
4021 pre_n = mu_m * ngrid%c3f(k) + p_top_m + ngrid%c4f(k)
4022 alt_w_n(k) = -hsca_m * alog(pre_n/p_surf_m)
4025 pre_n = mu_m * ngrid%c3h(k) + p_top_m + ngrid%c4h(k)
4026 alt_u_n(k+1) = -hsca_m * alog(pre_n/p_surf_m)
4028 alt_u_n(1) = alt_w_n(1)
4029 alt_u_n(nkde+1) = alt_w_n(nkde)
4033 !KAL added this call for vertical nesting (return coarse grid dimensions to intended values)
4034 CALL get_ijk_from_grid ( grid , &
4035 cids, cide, cjds, cjde, ckds, ckde, &
4036 cims, cime, cjms, cjme, ckms, ckme, &
4037 cips, cipe, cjps, cjpe, ckps, ckpe )
4039 CALL get_ijk_from_grid ( grid , &
4040 ids, ide, jds, jde, kds, kde, &
4041 ims, ime, jms, jme, kms, kme, &
4042 ips, ipe, jps, jpe, kps, kpe )
4044 ! Vertical refinement is turned on.
4046 IF (ngrid%vert_refine_method .NE. 0) THEN
4048 #include "nest_forcedown_interp_vert.inc"
4050 IF ( ngrid%this_is_an_ideal_run ) THEN
4051 IF ( SIZE( grid%t_init, 1 ) * SIZE( grid%t_init, 3 ) .GT. 1 ) THEN
4052 CALL vert_interp_vert_nesting( grid%t_init, & !CD field
4053 ids, ide, kds, kde, jds, jde, & !CD dims
4054 ims, ime, kms, kme, jms, jme, & !CD dims
4055 ips, ipe, kps, MIN( (kde-1), kpe ), jps, jpe, & !CD dims
4056 pgrid%s_vert, pgrid%e_vert, & !vertical dimension of the parent grid
4057 pgrid%cf1, pgrid%cf2, pgrid%cf3, pgrid%cfn, pgrid%cfn1, & !coarse grid extrapolation constants
4058 alt_u_c, alt_u_n ) !coordinates for parent and nest
4059 END IF ! Check t_init is a fully allocated 3d array.
4060 END IF ! only for ideal runs
4063 ! Rebalance the grid on the intermediate grid. The intermediate grid has the horizontal
4064 ! resolution of the parent grid, but at this point has been interpolated in the vertical
4065 ! to the resolution of the nest. The base state (phb, pb, etc) from the parent grid is
4066 ! unpacked onto the intermediate grid every time this subroutine is called. We need the
4067 ! base state of the nest, so it is recalculated here.
4069 ! Additionally, we do not need to vertically interpolate the entire intermediate grid
4070 ! above, just the points that contribute to the boundary forcing.
4072 ! Base state potential temperature and inverse density (alpha = 1/rho) from
4073 ! the half eta levels and the base-profile surface pressure. Compute 1/rho
4074 ! from equation of state. The potential temperature is a perturbation from t0.
4076 ! Uncouple the variables moist and t_2 that are used to calculate ph_2
4078 DO j = MAX(jds,jps),MIN(jde-1,jpe)
4079 DO i = MAX(ids,ips),MIN(ide-1,ipe)
4081 grid%t_2(i,k,j) = grid%t_2(i,k,j)/((ngrid%c1h(k)*grid%mub(i,j)+ngrid%c2h(k)) + (ngrid%c1h(k)*grid%mu_2(i,j)))
4082 moist(i,k,j,P_QV) = moist(i,k,j,P_QV)/((ngrid%c1h(k)*grid%mub(i,j)+ngrid%c2h(k)) + (ngrid%c1h(k)*grid%mu_2(i,j)))
4087 DO j = MAX(jds,jps),MIN(jde-1,jpe)
4088 DO i = MAX(ids,ips),MIN(ide-1,ipe)
4091 grid%pb(i,k,j) = ngrid%c3h(k) * grid%mub(i,j) + ngrid%c4h(k) + ngrid%p_top
4093 ! If this is a real run, recalc t_init.
4095 IF ( .NOT. ngrid%this_is_an_ideal_run ) THEN
4096 temp = MAX ( ngrid%tiso, ngrid%t00 + ngrid%tlp*LOG(grid%pb(i,k,j)/ngrid%p00) )
4097 IF ( grid%pb(i,k,j) .LT. ngrid%p_strat ) THEN
4098 temp = ngrid%tiso + ngrid%tlp_strat * LOG ( grid%pb(i,k,j)/ngrid%p_strat )
4100 grid%t_init(i,k,j) = temp*(ngrid%p00/grid%pb(i,k,j))**(r_d/cp) - t0
4102 grid%alb(i,k,j) = (r_d/p1000mb)*(grid%t_init(i,k,j)+t0)*(grid%pb(i,k,j)/p1000mb)**cvpm
4105 ! Integrate base geopotential, starting at terrain elevation. This assures that
4106 ! the base state is in exact hydrostatic balance with respect to the model equations.
4107 ! This field is on full levels.
4109 grid%phb(i,1,j) = grid%ht(i,j) * g
4110 IF (grid%hypsometric_opt == 1) THEN
4113 grid%phb(i,kk,j) = grid%phb(i,k,j) - ngrid%dnw(k)*(ngrid%c1h(k)*grid%mub(i,j)+ngrid%c2h(k))*grid%alb(i,k,j)
4115 ELSE IF (grid%hypsometric_opt == 2) THEN
4117 pfu = ngrid%c3f(k )*grid%MUB(i,j) + ngrid%c4f(k ) + ngrid%p_top
4118 pfd = ngrid%c3f(k-1)*grid%MUB(i,j) + ngrid%c4f(k-1) + ngrid%p_top
4119 phm = ngrid%c3h(k-1)*grid%MUB(i,j) + ngrid%c4h(k-1) + ngrid%p_top
4120 grid%phb(i,k,j) = grid%phb(i,k-1,j) + grid%alb(i,k-1,j)*phm*LOG(pfd/pfu)
4123 CALL wrf_error_fatal( 'module_dm: hypsometric_opt should be 1 or 2' )
4124 END IF ! which hypsometric option
4127 ! Perturbation fields
4128 ALLOCATE( p (ips:ipe, kps:kpe, jps:jpe) )
4129 ALLOCATE( al(ips:ipe, kps:kpe, jps:jpe) )
4130 DO j = MAX(jds,jps),MIN(jde-1,jpe)
4131 DO i = MAX(ids,ips),MIN(ide-1,ipe)
4132 ! Integrate the hydrostatic equation (from the RHS of the bigstep vertical momentum
4133 ! equation) down from the top to get the pressure perturbation. First get the pressure
4134 ! perturbation, moisture, and inverse density (total and perturbation) at the top-most level.
4139 qvf1 = 0.5*(moist(i,kk,j,P_QV)+moist(i,kk,j,P_QV))
4143 p(i,kk,j) = - 0.5*((ngrid%c1f(k)*grid%Mu_2(i,j))+qvf1*(ngrid%c1f(k)*grid%Mub(i,j)+ngrid%c2f(k)))/ngrid%rdnw(kk)/qvf2
4144 IF ( config_flags%use_theta_m == 0) THEN
4145 qvf = 1. + rvovrd*moist(i,kk,j,P_QV)
4149 al(i,kk,j) = (r_d/p1000mb)*(grid%t_2(i,kk,j)+t0)*qvf* &
4150 (((p(i,kk,j)+grid%pb(i,kk,j))/p1000mb)**cvpm) - grid%alb(i,kk,j)
4152 ! Now, integrate down the column to compute the pressure perturbation, and diagnose the two
4153 ! inverse density fields (total and perturbation).
4157 qvf1 = 0.5*(moist(i,kk,j,P_QV)+moist(i,kk+1,j,P_QV))
4160 p(i,kk,j) = p(i,kk+1,j) - ((ngrid%c1f(k)*grid%Mu_2(i,j)) + qvf1*(ngrid%c1f(k)*grid%Mub(i,j)+ngrid%c2f(k)))/qvf2/ngrid%rdn(kk+1)
4161 IF ( config_flags%use_theta_m == 0) THEN
4162 qvf = 1. + rvovrd*moist(i,kk,j,P_QV)
4166 al(i,kk,j) = (r_d/p1000mb)*(grid%t_2(i,kk,j)+t0)*qvf* &
4167 (((p(i,kk,j)+grid%pb(i,kk,j))/p1000mb)**cvpm) - grid%alb(i,kk,j)
4170 ! This is the hydrostatic equation used in the model after the small timesteps. In
4171 ! the model, grid%al (inverse density) is computed from the geopotential.
4173 IF (grid%hypsometric_opt == 1) THEN
4176 grid%ph_2(i,kk,j) = grid%ph_2(i,kk-1,j) - &
4177 ngrid%dnw(kk-1) * ( ((ngrid%c1h(k)*grid%mub(i,j)+ngrid%c2h(k))+(ngrid%c1h(k)*grid%mu_2(i,j)))*al(i,kk-1,j) &
4178 + (ngrid%c1h(k)*grid%mu_2(i,j))*grid%alb(i,kk-1,j) )
4181 ! Alternative hydrostatic eq.: dZ = -al*p*dLOG(p), where p is dry pressure.
4182 ! Note that al*p approximates Rd*T and dLOG(p) does z.
4183 ! Here T varies mostly linear with z, the first-order integration produces better result.
4185 ELSE IF (grid%hypsometric_opt == 2) THEN
4187 grid%ph_2(i,1,j) = grid%phb(i,1,j)
4189 pfu = ngrid%c3f(k )*( grid%MUB(i,j)+grid%MU_2(i,j) ) + ngrid%c4f(k ) + ngrid%p_top
4190 pfd = ngrid%c3f(k-1)*( grid%MUB(i,j)+grid%MU_2(i,j) ) + ngrid%c4f(k-1) + ngrid%p_top
4191 phm = ngrid%c3h(k-1)*( grid%MUB(i,j)+grid%MU_2(i,j) ) + ngrid%c4h(k-1) + ngrid%p_top
4192 grid%ph_2(i,k,j) = grid%ph_2(i,k-1,j) + (grid%alb(i,k-1,j)+al(i,k-1,j))*phm*LOG(pfd/pfu)
4196 grid%ph_2(i,k,j) = grid%ph_2(i,k,j) - grid%phb(i,k,j)
4207 ! Couple the variables moist and t_2, and the newly calculated ph_2
4208 DO j = MAX(jds,jps),MIN(jde-1,jpe)
4209 DO i = MAX(ids,ips),MIN(ide-1,ipe)
4211 grid%ph_2(i,k,j) = grid%ph_2(i,k,j)*((ngrid%c1f(k)*grid%Mub(i,j)+ngrid%c2f(k)) + (ngrid%c1f(k)*grid%Mu_2(i,j)))
4215 DO j = MAX(jds,jps),MIN(jde-1,jpe)
4216 DO i = MAX(ids,ips),MIN(ide-1,ipe)
4218 grid%t_2(i,k,j) = grid%t_2(i,k,j)*((ngrid%c1h(k)*grid%mub(i,j)+ngrid%c2h(k)) + (ngrid%c1h(k)*grid%mu_2(i,j)))
4219 moist(i,k,j,P_QV) = moist(i,k,j,P_QV)*((ngrid%c1h(k)*grid%mub(i,j)+ngrid%c2h(k)) + (ngrid%c1h(k)*grid%mu_2(i,j)))
4228 #include "HALO_FORCE_DOWN.inc"
4230 ! code here to interpolate the data into the nested domain
4231 # include "nest_forcedown_interp.inc"
4234 END SUBROUTINE force_domain_em_part2
4236 !------------------------------------------------------------------
4238 SUBROUTINE interp_domain_em_part1 ( grid, intermediate_grid, ngrid, config_flags &
4240 #include "dummy_new_args.inc"
4243 USE module_state_description
4244 USE module_domain, ONLY : domain, get_ijk_from_grid
4245 USE module_configure, ONLY : grid_config_rec_type
4246 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, &
4247 nest_task_offsets, nest_pes_x, nest_pes_y, which_kid, &
4248 intercomm_active, mpi_comm_to_kid, mpi_comm_to_mom, &
4249 mytask, get_dm_max_halo_width
4253 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
4254 TYPE(domain), POINTER :: intermediate_grid
4255 TYPE(domain), POINTER :: ngrid
4256 #include "dummy_new_decl.inc"
4258 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
4259 INTEGER iparstrt,jparstrt,sw
4260 TYPE (grid_config_rec_type) :: config_flags
4262 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
4263 cims, cime, cjms, cjme, ckms, ckme, &
4264 cips, cipe, cjps, cjpe, ckps, ckpe
4265 INTEGER :: iids, iide, ijds, ijde, ikds, ikde, &
4266 iims, iime, ijms, ijme, ikms, ikme, &
4267 iips, iipe, ijps, ijpe, ikps, ikpe
4268 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
4269 nims, nime, njms, njme, nkms, nkme, &
4270 nips, nipe, njps, njpe, nkps, nkpe
4272 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
4274 INTEGER icoord, jcoord, idim_cd, jdim_cd, pgr
4275 INTEGER thisdomain_max_halo_width
4276 INTEGER local_comm, myproc, nproc
4277 INTEGER ioffset, ierr
4279 CALL wrf_get_dm_communicator ( local_comm )
4280 CALL wrf_get_myproc( myproc )
4281 CALL wrf_get_nproc( nproc )
4283 CALL get_ijk_from_grid ( grid , &
4284 cids, cide, cjds, cjde, ckds, ckde, &
4285 cims, cime, cjms, cjme, ckms, ckme, &
4286 cips, cipe, cjps, cjpe, ckps, ckpe )
4287 CALL get_ijk_from_grid ( intermediate_grid , &
4288 iids, iide, ijds, ijde, ikds, ikde, &
4289 iims, iime, ijms, ijme, ikms, ikme, &
4290 iips, iipe, ijps, ijpe, ikps, ikpe )
4291 CALL get_ijk_from_grid ( ngrid , &
4292 nids, nide, njds, njde, nkds, nkde, &
4293 nims, nime, njms, njme, nkms, nkme, &
4294 nips, nipe, njps, njpe, nkps, nkpe )
4296 CALL nl_get_parent_grid_ratio ( ngrid%id, pgr )
4297 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
4298 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
4299 CALL nl_get_shw ( intermediate_grid%id, sw )
4300 icoord = iparstrt - sw
4301 jcoord = jparstrt - sw
4302 idim_cd = iide - iids + 1
4303 jdim_cd = ijde - ijds + 1
4305 nlev = ckde - ckds + 1
4307 ! get max_halo_width for parent. It may be smaller if it is moad
4308 CALL get_dm_max_halo_width ( grid%id , thisdomain_max_halo_width )
4310 IF ( grid%active_this_task ) THEN
4311 #include "nest_interpdown_pack.inc"
4314 ! determine which communicator and offset to use
4315 IF ( intercomm_active( grid%id ) ) THEN ! I am parent
4316 local_comm = mpi_comm_to_kid( which_kid(ngrid%id), grid%id )
4317 ioffset = nest_task_offsets(ngrid%id)
4318 ELSE IF ( intercomm_active( ngrid%id ) ) THEN ! I am nest
4319 local_comm = mpi_comm_to_mom( ngrid%id )
4320 ioffset = nest_task_offsets(ngrid%id)
4323 IF ( grid%active_this_task .OR. ngrid%active_this_task ) THEN
4325 CALL mpi_comm_rank(local_comm,myproc,ierr)
4326 CALL mpi_comm_size(local_comm,nproc,ierr)
4328 CALL rsl_lite_bcast_msgs( myproc, nest_pes_x(grid%id)*nest_pes_y(grid%id), &
4329 nest_pes_x(ngrid%id)*nest_pes_y(ngrid%id), &
4330 ioffset, local_comm )
4334 END SUBROUTINE interp_domain_em_part1
4336 !------------------------------------------------------------------
4338 SUBROUTINE interp_domain_em_part2 ( grid, ngrid, pgrid, config_flags &
4340 #include "dummy_new_args.inc"
4343 USE module_state_description
4344 USE module_domain, ONLY : domain, get_ijk_from_grid
4345 USE module_configure, ONLY : grid_config_rec_type
4346 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, &
4347 mytask, get_dm_max_halo_width, which_kid
4348 ! push_communicators_for_domain,pop_communicators_for_domain
4349 USE module_comm_nesting_dm, ONLY : halo_interp_down_sub
4352 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
4353 TYPE(domain), POINTER :: ngrid
4354 TYPE(domain), POINTER :: pgrid !KAL added for vertical nesting
4355 #include "dummy_new_decl.inc"
4357 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
4358 TYPE (grid_config_rec_type) :: config_flags
4360 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
4361 cims, cime, cjms, cjme, ckms, ckme, &
4362 cips, cipe, cjps, cjpe, ckps, ckpe
4363 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
4364 nims, nime, njms, njme, nkms, nkme, &
4365 nips, nipe, njps, njpe, nkps, nkpe
4366 INTEGER :: ids, ide, jds, jde, kds, kde, &
4367 ims, ime, jms, jme, kms, kme, &
4368 ips, ipe, jps, jpe, kps, kpe
4370 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
4374 INTEGER thisdomain_max_halo_width
4376 !KAL variables for vertical nesting
4377 REAL :: p_top_m , p_surf_m , mu_m , hsca_m , pre_c ,pre_n
4378 REAL, DIMENSION(pgrid%s_vert:pgrid%e_vert) :: alt_w_c
4379 REAL, DIMENSION(pgrid%s_vert:pgrid%e_vert+1) :: alt_u_c
4380 REAL, DIMENSION(ngrid%s_vert:ngrid%e_vert) :: alt_w_n
4381 REAL, DIMENSION(ngrid%s_vert:ngrid%e_vert+1) :: alt_u_n
4384 !KAL change this for vertical nesting
4385 ! interp_domain_em_part1 packs up the interpolation onto the coarse (vertical) grid
4386 ! therefore the message size is based on the coarse grid number of levels
4387 ! here it is unpacked onto the intermediate grid
4388 CALL get_ijk_from_grid ( pgrid , &
4389 cids, cide, cjds, cjde, ckds, ckde, &
4390 cims, cime, cjms, cjme, ckms, ckme, &
4391 cips, cipe, cjps, cjpe, ckps, ckpe )
4392 !KAL this is the original WRF code
4393 !CALL get_ijk_from_grid ( grid , &
4394 ! cids, cide, cjds, cjde, ckds, ckde, &
4395 ! cims, cime, cjms, cjme, ckms, ckme, &
4396 ! cips, cipe, cjps, cjpe, ckps, ckpe )
4397 CALL get_ijk_from_grid ( ngrid , &
4398 nids, nide, njds, njde, nkds, nkde, &
4399 nims, nime, njms, njme, nkms, nkme, &
4400 nips, nipe, njps, njpe, nkps, nkpe )
4402 nlev = ckde - ckds + 1
4404 CALL get_dm_max_halo_width ( ngrid%id , thisdomain_max_halo_width )
4406 #include "nest_interpdown_unpack.inc"
4409 if (ngrid%vert_refine_method .NE. 0) then
4411 !KAL calculating the vertical coordinate for parent and nest grid (code from ndown)
4412 ! assume that the parent and nest have the same p_top value (as in ndown)
4414 !KAL ckde is equal to e_vert of the coarse grid. There are e_vert-1 u points. The coarse 1D grid here is e_vert+1,
4415 ! so it is the e_vert-1 points from the coarse grid, plus a surface point plus a top point. Extrapolation coefficients
4416 ! are used to get the surface and top points to fill out the pro_u_c 1D array of u values from the coarse grid.
4418 hsca_m = 6.7 !KAL scale height of the atmosphere
4419 p_top_m = ngrid%p_top
4421 mu_m = p_surf_m - p_top_m
4424 pre_c = mu_m * pgrid%c3f(k) + p_top_m + pgrid%c4f(k)
4425 alt_w_c(k) = -hsca_m * alog(pre_c/p_surf_m)
4428 pre_c = mu_m * pgrid%c3h(k) + p_top_m + pgrid%c4h(k)
4429 alt_u_c(k+1) = -hsca_m * alog(pre_c/p_surf_m)
4431 alt_u_c(1) = alt_w_c(1)
4432 alt_u_c(ckde+1) = alt_w_c(ckde)
4435 pre_n = mu_m * ngrid%c3f(k) + p_top_m + ngrid%c4f(k)
4436 alt_w_n(k) = -hsca_m * alog(pre_n/p_surf_m)
4439 pre_n = mu_m * ngrid%c3h(k) + p_top_m + ngrid%c4h(k)
4440 alt_u_n(k+1) = -hsca_m * alog(pre_n/p_surf_m)
4442 alt_u_n(1) = alt_w_n(1)
4443 alt_u_n(nkde+1) = alt_w_n(nkde)
4448 !KAL added this call for vertical nesting (return coarse grid dimensions to intended values)
4449 CALL get_ijk_from_grid ( grid , &
4450 cids, cide, cjds, cjde, ckds, ckde, &
4451 cims, cime, cjms, cjme, ckms, ckme, &
4452 cips, cipe, cjps, cjpe, ckps, ckpe )
4454 CALL get_ijk_from_grid ( grid , &
4455 ids, ide, jds, jde, kds, kde, &
4456 ims, ime, jms, jme, kms, kme, &
4457 ips, ipe, jps, jpe, kps, kpe )
4460 if (ngrid%vert_refine_method .NE. 0) then
4462 !KAL added this code (the include file) for the vertical nesting
4463 #include "nest_interpdown_interp_vert.inc"
4466 !KAL finish off the 1-D variables (t_base, u_base, v_base, qv_base, and z_base) (move this out of here if alt_u_c and alt_u_n are calculated elsewhere)
4467 CALL vert_interp_vert_nesting_1d ( &
4468 ngrid%t_base, & ! CD field
4469 ids, ide, kds, kde, jds, jde, & ! CD dims
4470 ims, ime, kms, kme, jms, jme, & ! CD dims
4471 ips, ipe, kps, MIN( (kde-1), kpe ), jps, jpe, & ! CD dims
4472 pgrid%s_vert, pgrid%e_vert, & ! vertical dimension of the parent grid
4473 pgrid%cf1, pgrid%cf2, pgrid%cf3, pgrid%cfn, pgrid%cfn1, & ! coarse grid extrapolation constants
4474 alt_u_c, alt_u_n) ! coordinates for parent and nest
4475 CALL vert_interp_vert_nesting_1d ( &
4476 ngrid%u_base, & ! CD field
4477 ids, ide, kds, kde, jds, jde, & ! CD dims
4478 ims, ime, kms, kme, jms, jme, & ! CD dims
4479 ips, ipe, kps, MIN( (kde-1), kpe ), jps, jpe, & ! CD dims
4480 pgrid%s_vert, pgrid%e_vert, & ! vertical dimension of the parent grid
4481 pgrid%cf1, pgrid%cf2, pgrid%cf3, pgrid%cfn, pgrid%cfn1, & ! coarse grid extrapolation constants
4482 alt_u_c, alt_u_n) ! coordinates for parent and nest
4483 CALL vert_interp_vert_nesting_1d ( &
4484 ngrid%v_base, & ! CD field
4485 ids, ide, kds, kde, jds, jde, & ! CD dims
4486 ims, ime, kms, kme, jms, jme, & ! CD dims
4487 ips, ipe, kps, MIN( (kde-1), kpe ), jps, jpe, & ! CD dims
4488 pgrid%s_vert, pgrid%e_vert, & ! vertical dimension of the parent grid
4489 pgrid%cf1, pgrid%cf2, pgrid%cf3, pgrid%cfn, pgrid%cfn1, & ! coarse grid extrapolation constants
4490 alt_u_c, alt_u_n) ! coordinates for parent and nest
4491 CALL vert_interp_vert_nesting_1d ( &
4492 ngrid%qv_base, & ! CD field
4493 ids, ide, kds, kde, jds, jde, & ! CD dims
4494 ims, ime, kms, kme, jms, jme, & ! CD dims
4495 ips, ipe, kps, MIN( (kde-1), kpe ), jps, jpe, & ! CD dims
4496 pgrid%s_vert, pgrid%e_vert, & ! vertical dimension of the parent grid
4497 pgrid%cf1, pgrid%cf2, pgrid%cf3, pgrid%cfn, pgrid%cfn1, & ! coarse grid extrapolation constants
4498 alt_u_c, alt_u_n) ! coordinates for parent and nest
4499 CALL vert_interp_vert_nesting_1d ( &
4500 ngrid%z_base, & ! CD field
4501 ids, ide, kds, kde, jds, jde, & ! CD dims
4502 ims, ime, kms, kme, jms, jme, & ! CD dims
4503 ips, ipe, kps, MIN( (kde-1), kpe ), jps, jpe, & ! CD dims
4504 pgrid%s_vert, pgrid%e_vert, & ! vertical dimension of the parent grid
4505 pgrid%cf1, pgrid%cf2, pgrid%cf3, pgrid%cfn, pgrid%cfn1, & ! coarse grid extrapolation constants
4506 alt_u_c, alt_u_n) ! coordinates for parent and nest
4510 CALL push_communicators_for_domain( grid%id )
4512 #include "HALO_INTERP_DOWN.inc"
4514 CALL pop_communicators_for_domain
4516 # include "nest_interpdown_interp.inc"
4519 END SUBROUTINE interp_domain_em_part2
4521 !------------------------------------------------------------------
4523 SUBROUTINE interp_domain_em_small_part1 ( grid, intermediate_grid, ngrid, config_flags &
4525 #include "dummy_new_args.inc"
4528 USE module_state_description
4529 USE module_domain, ONLY : domain, get_ijk_from_grid
4530 USE module_configure, ONLY : grid_config_rec_type
4531 USE module_comm_dm, ONLY: halo_em_horiz_interp_sub
4532 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, &
4533 mytask, get_dm_max_halo_width, &
4534 nest_task_offsets, mpi_comm_to_kid, mpi_comm_to_mom, &
4535 which_kid, nest_pes_x, nest_pes_y, intercomm_active
4539 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
4540 TYPE(domain), POINTER :: intermediate_grid
4541 TYPE(domain), POINTER :: ngrid
4542 #include "dummy_new_decl.inc"
4544 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
4545 INTEGER iparstrt,jparstrt,sw
4546 TYPE (grid_config_rec_type) :: config_flags
4548 INTEGER :: ids, ide, jds, jde, kds, kde, &
4549 ims, ime, jms, jme, kms, kme, &
4550 ips, ipe, jps, jpe, kps, kpe
4552 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
4553 cims, cime, cjms, cjme, ckms, ckme, &
4554 cips, cipe, cjps, cjpe, ckps, ckpe
4555 INTEGER :: iids, iide, ijds, ijde, ikds, ikde, &
4556 iims, iime, ijms, ijme, ikms, ikme, &
4557 iips, iipe, ijps, ijpe, ikps, ikpe
4558 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
4559 nims, nime, njms, njme, nkms, nkme, &
4560 nips, nipe, njps, njpe, nkps, nkpe
4562 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
4564 INTEGER icoord, jcoord, idim_cd, jdim_cd, pgr
4565 INTEGER thisdomain_max_halo_width
4566 INTEGER local_comm, myproc, nproc
4569 CALL wrf_get_dm_communicator ( local_comm )
4570 CALL wrf_get_myproc( myproc )
4571 CALL wrf_get_nproc( nproc )
4573 CALL get_ijk_from_grid ( grid , &
4574 ids, ide, jds, jde, kds, kde, &
4575 ims, ime, jms, jme, kms, kme, &
4576 ips, ipe, jps, jpe, kps, kpe )
4578 # include "HALO_EM_HORIZ_INTERP.inc"
4581 CALL get_ijk_from_grid ( grid , &
4582 cids, cide, cjds, cjde, ckds, ckde, &
4583 cims, cime, cjms, cjme, ckms, ckme, &
4584 cips, cipe, cjps, cjpe, ckps, ckpe )
4585 CALL get_ijk_from_grid ( intermediate_grid , &
4586 iids, iide, ijds, ijde, ikds, ikde, &
4587 iims, iime, ijms, ijme, ikms, ikme, &
4588 iips, iipe, ijps, ijpe, ikps, ikpe )
4589 CALL get_ijk_from_grid ( ngrid , &
4590 nids, nide, njds, njde, nkds, nkde, &
4591 nims, nime, njms, njme, nkms, nkme, &
4592 nips, nipe, njps, njpe, nkps, nkpe )
4594 CALL nl_get_parent_grid_ratio ( ngrid%id, pgr )
4595 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
4596 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
4597 CALL nl_get_shw ( intermediate_grid%id, sw )
4598 icoord = iparstrt - sw
4599 jcoord = jparstrt - sw
4600 idim_cd = iide - iids + 1
4601 jdim_cd = ijde - ijds + 1
4603 nlev = ckde - ckds + 1
4605 ! get max_halo_width for parent. It may be smaller if it is moad
4606 CALL get_dm_max_halo_width ( grid%id , thisdomain_max_halo_width )
4608 ! How many 3d arrays, so far just 3d theta-300 and geopotential perturbation,
4609 ! and the 2d topo elevation, three max press/temp/height fields, and three
4610 ! min press/temp/height fields.
4612 msize = ( 2 )* nlev + 7
4614 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, calling rsl_lite_to_child')
4615 CALL rsl_lite_to_child_info( local_communicator, msize*RWORDSIZE &
4616 ,cips,cipe,cjps,cjpe &
4617 ,iids,iide,ijds,ijde &
4618 ,nids,nide,njds,njde &
4620 ,ntasks_x,ntasks_y &
4621 ,thisdomain_max_halo_width &
4625 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, back from rsl_lite_to_child')
4626 DO while ( retval .eq. 1 )
4627 IF ( SIZE(grid%ph_2) .GT. 1 ) THEN
4628 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, ph_2')
4630 xv(k)= grid%ph_2(pig,k,pjg)
4632 CALL rsl_lite_to_child_msg(((ckde)-(ckds)+1)*RWORDSIZE,xv)
4635 IF ( SIZE(grid%t_2) .GT. 1 ) THEN
4636 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, t_2')
4637 DO k = ckds,(ckde-1)
4638 xv(k)= grid%t_2(pig,k,pjg)
4640 CALL rsl_lite_to_child_msg((((ckde-1))-(ckds)+1)*RWORDSIZE,xv)
4643 IF ( SIZE(grid%ht) .GT. 1 ) THEN
4644 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, ht')
4645 xv(1)= grid%ht(pig,pjg)
4646 CALL rsl_lite_to_child_msg(RWORDSIZE,xv)
4649 IF ( SIZE(grid%t_max_p) .GT. 1 ) THEN
4650 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, t_max_p')
4651 xv(1)= grid%t_max_p(pig,pjg)
4652 CALL rsl_lite_to_child_msg(RWORDSIZE,xv)
4655 IF ( SIZE(grid%ght_max_p) .GT. 1 ) THEN
4656 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, ght_max_p')
4657 xv(1)= grid%ght_max_p(pig,pjg)
4658 CALL rsl_lite_to_child_msg(RWORDSIZE,xv)
4661 IF ( SIZE(grid%max_p) .GT. 1 ) THEN
4662 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, max_p')
4663 xv(1)= grid%max_p(pig,pjg)
4664 CALL rsl_lite_to_child_msg(RWORDSIZE,xv)
4667 IF ( SIZE(grid%t_min_p) .GT. 1 ) THEN
4668 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, t_min_p')
4669 xv(1)= grid%t_min_p(pig,pjg)
4670 CALL rsl_lite_to_child_msg(RWORDSIZE,xv)
4673 IF ( SIZE(grid%ght_min_p) .GT. 1 ) THEN
4674 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, ght_min_p')
4675 xv(1)= grid%ght_min_p(pig,pjg)
4676 CALL rsl_lite_to_child_msg(RWORDSIZE,xv)
4679 IF ( SIZE(grid%min_p) .GT. 1 ) THEN
4680 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, min_p')
4681 xv(1)= grid%min_p(pig,pjg)
4682 CALL rsl_lite_to_child_msg(RWORDSIZE,xv)
4685 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, calling rsl_lite_to_child_info')
4686 CALL rsl_lite_to_child_info( local_communicator, msize*RWORDSIZE &
4687 ,cips,cipe,cjps,cjpe &
4688 ,iids,iide,ijds,ijde &
4689 ,nids,nide,njds,njde &
4691 ,ntasks_x,ntasks_y &
4692 ,thisdomain_max_halo_width &
4696 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, back from rsl_lite_to_child_info')
4699 ! determine which communicator and offset to use
4700 IF ( intercomm_active( grid%id ) ) THEN ! I am parent
4701 local_comm = mpi_comm_to_kid( which_kid(ngrid%id), grid%id )
4702 ioffset = nest_task_offsets(ngrid%id)
4703 ELSE IF ( intercomm_active( ngrid%id ) ) THEN ! I am nest
4704 local_comm = mpi_comm_to_mom( ngrid%id )
4705 ioffset = nest_task_offsets(ngrid%id)
4708 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, calling rsl_lite_bcast')
4709 CALL rsl_lite_bcast_msgs( myproc, nest_pes_x(grid%id)*nest_pes_y(grid%id), &
4710 nest_pes_x(ngrid%id)*nest_pes_y(ngrid%id), &
4711 ioffset, local_comm )
4712 !call wrf_debug(0,'/external/RSL_LITE/module_dm.F, back from rsl_lite_bcast')
4715 END SUBROUTINE interp_domain_em_small_part1
4717 !------------------------------------------------------------------
4719 SUBROUTINE interp_domain_em_small_part2 ( grid, ngrid, config_flags &
4721 #include "dummy_new_args.inc"
4724 USE module_state_description
4725 USE module_domain, ONLY : domain, get_ijk_from_grid
4726 USE module_configure, ONLY : grid_config_rec_type
4727 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, &
4728 mytask, get_dm_max_halo_width
4729 USE module_comm_nesting_dm, ONLY : halo_interp_down_sub
4732 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
4733 TYPE(domain), POINTER :: ngrid
4734 #include "dummy_new_decl.inc"
4736 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
4737 TYPE (grid_config_rec_type) :: config_flags
4739 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
4740 cims, cime, cjms, cjme, ckms, ckme, &
4741 cips, cipe, cjps, cjpe, ckps, ckpe
4742 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
4743 nims, nime, njms, njme, nkms, nkme, &
4744 nips, nipe, njps, njpe, nkps, nkpe
4745 INTEGER :: ids, ide, jds, jde, kds, kde, &
4746 ims, ime, jms, jme, kms, kme, &
4747 ips, ipe, jps, jpe, kps, kpe
4749 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
4753 INTEGER thisdomain_max_halo_width
4755 CALL get_ijk_from_grid ( grid , &
4756 cids, cide, cjds, cjde, ckds, ckde, &
4757 cims, cime, cjms, cjme, ckms, ckme, &
4758 cips, cipe, cjps, cjpe, ckps, ckpe )
4759 CALL get_ijk_from_grid ( ngrid , &
4760 nids, nide, njds, njde, nkds, nkde, &
4761 nims, nime, njms, njme, nkms, nkme, &
4762 nips, nipe, njps, njpe, nkps, nkpe )
4764 nlev = ckde - ckds + 1
4766 CALL get_dm_max_halo_width ( ngrid%id , thisdomain_max_halo_width )
4768 CALL rsl_lite_from_parent_info(pig,pjg,retval)
4770 DO while ( retval .eq. 1 )
4772 IF ( SIZE(grid%ph_2) .GT. 1 ) THEN
4773 CALL rsl_lite_from_parent_msg(((ckde)-(ckds)+1)*RWORDSIZE,xv)
4775 grid%ph_2(pig,k,pjg) = xv(k)
4779 IF ( SIZE(grid%t_2) .GT. 1 ) THEN
4780 CALL rsl_lite_from_parent_msg((((ckde-1))-(ckds)+1)*RWORDSIZE,xv)
4781 DO k = ckds,(ckde-1)
4782 grid%t_2(pig,k,pjg) = xv(k)
4786 IF ( SIZE(grid%ht) .GT. 1 ) THEN
4787 CALL rsl_lite_from_parent_msg(RWORDSIZE,xv)
4788 grid%ht(pig,pjg) = xv(1)
4791 IF ( SIZE(grid%t_max_p) .GT. 1 ) THEN
4792 CALL rsl_lite_from_parent_msg(RWORDSIZE,xv)
4793 grid%t_max_p(pig,pjg) = xv(1)
4796 IF ( SIZE(grid%ght_max_p) .GT. 1 ) THEN
4797 CALL rsl_lite_from_parent_msg(RWORDSIZE,xv)
4798 grid%ght_max_p(pig,pjg) = xv(1)
4801 IF ( SIZE(grid%max_p) .GT. 1 ) THEN
4802 CALL rsl_lite_from_parent_msg(RWORDSIZE,xv)
4803 grid%max_p(pig,pjg) = xv(1)
4806 IF ( SIZE(grid%t_min_p) .GT. 1 ) THEN
4807 CALL rsl_lite_from_parent_msg(RWORDSIZE,xv)
4808 grid%t_min_p(pig,pjg) = xv(1)
4811 IF ( SIZE(grid%ght_min_p) .GT. 1 ) THEN
4812 CALL rsl_lite_from_parent_msg(RWORDSIZE,xv)
4813 grid%ght_min_p(pig,pjg) = xv(1)
4816 IF ( SIZE(grid%min_p) .GT. 1 ) THEN
4817 CALL rsl_lite_from_parent_msg(RWORDSIZE,xv)
4818 grid%min_p(pig,pjg) = xv(1)
4821 CALL rsl_lite_from_parent_info(pig,pjg,retval)
4825 CALL get_ijk_from_grid ( grid , &
4826 ids, ide, jds, jde, kds, kde, &
4827 ims, ime, jms, jme, kms, kme, &
4828 ips, ipe, jps, jpe, kps, kpe )
4830 #include "HALO_INTERP_DOWN.inc"
4832 CALL interp_fcn_bl ( grid%ph_2, &
4833 cids, cide, ckds, ckde, cjds, cjde, &
4834 cims, cime, ckms, ckme, cjms, cjme, &
4835 cips, cipe, ckps, MIN( ckde, ckpe ), cjps, cjpe, &
4837 nids, nide, nkds, nkde, njds, njde, &
4838 nims, nime, nkms, nkme, njms, njme, &
4839 nips, nipe, nkps, MIN( nkde, nkpe ), njps, njpe, &
4840 config_flags%shw, ngrid%imask_nostag, &
4842 ngrid%i_parent_start, ngrid%j_parent_start, &
4843 ngrid%parent_grid_ratio, ngrid%parent_grid_ratio, &
4844 grid%ht, ngrid%ht, &
4845 grid%t_max_p, ngrid%t_max_p, &
4846 grid%ght_max_p, ngrid%ght_max_p, &
4847 grid%max_p, ngrid%max_p, &
4848 grid%t_min_p, ngrid%t_min_p, &
4849 grid%ght_min_p, ngrid%ght_min_p, &
4850 grid%min_p, ngrid%min_p, &
4851 ngrid%znw, ngrid%p_top )
4853 CALL interp_fcn_bl ( grid%t_2, &
4854 cids, cide, ckds, ckde, cjds, cjde, &
4855 cims, cime, ckms, ckme, cjms, cjme, &
4856 cips, cipe, ckps, MIN( (ckde-1), ckpe ), cjps, cjpe, &
4858 nids, nide, nkds, nkde, njds, njde, &
4859 nims, nime, nkms, nkme, njms, njme, &
4860 nips, nipe, nkps, MIN( (nkde-1), nkpe ), njps, njpe, &
4861 config_flags%shw, ngrid%imask_nostag, &
4863 ngrid%i_parent_start, ngrid%j_parent_start, &
4864 ngrid%parent_grid_ratio, ngrid%parent_grid_ratio, &
4865 grid%ht, ngrid%ht, &
4866 grid%t_max_p, ngrid%t_max_p, &
4867 grid%ght_max_p, ngrid%ght_max_p, &
4868 grid%max_p, ngrid%max_p, &
4869 grid%t_min_p, ngrid%t_min_p, &
4870 grid%ght_min_p, ngrid%ght_min_p, &
4871 grid%min_p, ngrid%min_p, &
4872 ngrid%znu, ngrid%p_top )
4875 END SUBROUTINE interp_domain_em_small_part2
4877 !------------------------------------------------------------------
4879 SUBROUTINE feedback_nest_prep ( grid, config_flags &
4881 #include "dummy_new_args.inc"
4884 USE module_state_description
4885 USE module_domain, ONLY : domain, get_ijk_from_grid
4886 USE module_configure, ONLY : grid_config_rec_type
4887 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask !, &
4888 !push_communicators_for_domain, pop_communicators_for_domain
4889 USE module_comm_nesting_dm, ONLY : halo_interp_up_sub
4892 TYPE(domain), TARGET :: grid ! name of the grid being dereferenced (must be "grid")
4893 TYPE (grid_config_rec_type) :: config_flags ! configureation flags, has vertical dim of
4894 ! soil temp, moisture, etc., has vertical dim
4895 ! of soil categories
4896 #include "dummy_new_decl.inc"
4898 INTEGER :: ids, ide, jds, jde, kds, kde, &
4899 ims, ime, jms, jme, kms, kme, &
4900 ips, ipe, jps, jpe, kps, kpe
4902 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
4904 INTEGER :: idum1, idum2
4907 CALL get_ijk_from_grid ( grid , &
4908 ids, ide, jds, jde, kds, kde, &
4909 ims, ime, jms, jme, kms, kme, &
4910 ips, ipe, jps, jpe, kps, kpe )
4912 IF ( grid%active_this_task ) THEN
4913 CALL push_communicators_for_domain( grid%id )
4916 #include "HALO_INTERP_UP.inc"
4919 CALL pop_communicators_for_domain
4922 END SUBROUTINE feedback_nest_prep
4924 !------------------------------------------------------------------
4926 SUBROUTINE feedback_domain_em_part1 ( grid, ngrid, config_flags &
4928 #include "dummy_new_args.inc"
4931 USE module_state_description
4932 USE module_domain, ONLY : domain, get_ijk_from_grid
4933 USE module_configure, ONLY : grid_config_rec_type, model_config_rec, model_to_grid_config_rec
4934 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
4935 ipe_save, jpe_save, ips_save, jps_save, &
4936 nest_pes_x, nest_pes_y
4940 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
4941 TYPE(domain), POINTER :: ngrid
4942 #include "dummy_new_decl.inc"
4944 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
4945 TYPE(domain), POINTER :: xgrid
4946 TYPE (grid_config_rec_type) :: config_flags, nconfig_flags
4948 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
4949 cims, cime, cjms, cjme, ckms, ckme, &
4950 cips, cipe, cjps, cjpe, ckps, ckpe
4951 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
4952 nims, nime, njms, njme, nkms, nkme, &
4953 nips, nipe, njps, njpe, nkps, nkpe
4955 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
4957 INTEGER local_comm, myproc, nproc, idum1, idum2
4958 INTEGER thisdomain_max_halo_width
4960 !cyl: add variables for trajectory
4964 SUBROUTINE feedback_nest_prep ( grid, config_flags &
4966 #include "dummy_new_args.inc"
4969 USE module_state_description
4970 USE module_domain, ONLY : domain
4971 USE module_configure, ONLY : grid_config_rec_type
4973 TYPE (grid_config_rec_type) :: config_flags
4974 TYPE(domain), TARGET :: grid
4975 #include "dummy_new_decl.inc"
4976 END SUBROUTINE feedback_nest_prep
4980 CALL wrf_get_dm_communicator ( local_comm )
4981 CALL wrf_get_myproc( myproc )
4982 CALL wrf_get_nproc( nproc )
4986 CALL get_ijk_from_grid ( grid , &
4987 cids, cide, cjds, cjde, ckds, ckde, &
4988 cims, cime, cjms, cjme, ckms, ckme, &
4989 cips, cipe, cjps, cjpe, ckps, ckpe )
4991 CALL get_ijk_from_grid ( ngrid , &
4992 nids, nide, njds, njde, nkds, nkde, &
4993 nims, nime, njms, njme, nkms, nkme, &
4994 nips, nipe, njps, njpe, nkps, nkpe )
4996 nlev = ckde - ckds + 1
4998 ips_save = ngrid%i_parent_start ! used in feedback_domain_em_part2 below
4999 jps_save = ngrid%j_parent_start
5000 ipe_save = ngrid%i_parent_start + (nide-nids+1) / ngrid%parent_grid_ratio - 1
5001 jpe_save = ngrid%j_parent_start + (njde-njds+1) / ngrid%parent_grid_ratio - 1
5003 ! feedback_nest_prep invokes a halo exchange on the ngrid. It is done this way
5004 ! in a separate routine because the HALOs need the data to be dereference from the
5005 ! grid data structure and, in this routine, the dereferenced fields are related to
5006 ! the intermediate domain, not the nest itself. Save the current grid pointer to intermediate
5007 ! domain, switch grid to point to ngrid, invoke feedback_nest_prep, then restore grid
5008 ! to point to intermediate domain.
5010 CALL model_to_grid_config_rec ( ngrid%id , model_config_rec , nconfig_flags )
5011 CALL set_scalar_indices_from_config ( ngrid%id , idum1 , idum2 )
5015 CALL feedback_nest_prep ( grid, nconfig_flags &
5017 #include "actual_new_args.inc"
5021 ! put things back so grid is intermediate grid
5024 CALL set_scalar_indices_from_config ( grid%id , idum1 , idum2 )
5026 ! "interp" (basically copy) ngrid onto intermediate grid
5028 #include "nest_feedbackup_interp.inc"
5031 END SUBROUTINE feedback_domain_em_part1
5033 !------------------------------------------------------------------
5035 SUBROUTINE feedback_domain_em_part2 ( grid, intermediate_grid, ngrid , config_flags &
5037 #include "dummy_new_args.inc"
5040 USE module_state_description
5041 USE module_domain, ONLY : domain, domain_clock_get, get_ijk_from_grid
5042 USE module_configure, ONLY : grid_config_rec_type, model_config_rec
5043 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
5044 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width, &
5045 nest_pes_x, nest_pes_y, &
5046 intercomm_active, nest_task_offsets, &
5047 mpi_comm_to_mom, mpi_comm_to_kid, which_kid !, &
5048 !push_communicators_for_domain, pop_communicators_for_domain
5050 USE module_comm_nesting_dm, ONLY : halo_interp_up_sub
5055 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
5056 TYPE(domain), POINTER :: intermediate_grid
5057 TYPE(domain), POINTER :: ngrid
5058 TYPE(domain), POINTER :: parent_grid
5060 #include "dummy_new_decl.inc"
5062 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
5063 TYPE (grid_config_rec_type) :: config_flags
5065 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
5066 cims, cime, cjms, cjme, ckms, ckme, &
5067 cips, cipe, cjps, cjpe, ckps, ckpe
5068 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
5069 nims, nime, njms, njme, nkms, nkme, &
5070 nips, nipe, njps, njpe, nkps, nkpe
5071 INTEGER :: xids, xide, xjds, xjde, xkds, xkde, &
5072 xims, xime, xjms, xjme, xkms, xkme, &
5073 xips, xipe, xjps, xjpe, xkps, xkpe
5074 INTEGER :: ids, ide, jds, jde, kds, kde, &
5075 ims, ime, jms, jme, kms, kme, &
5076 ips, ipe, jps, jpe, kps, kpe
5078 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
5080 INTEGER icoord, jcoord, idim_cd, jdim_cd
5081 INTEGER local_comm, myproc, nproc, ioffset
5082 INTEGER iparstrt, jparstrt, sw, thisdomain_max_halo_width
5085 character*256 :: timestr
5088 LOGICAL, EXTERNAL :: cd_feedback_mask
5090 !cyl: add variables for trajectory
5093 ! On entry to this routine,
5094 ! "grid" refers to the parent domain
5095 ! "intermediate_grid" refers to local copy of parent domain that overlies this patch of nest
5096 ! "ngrid" refers to the nest, which is only needed for smoothing on the parent because
5097 ! the nest feedback data has already been transferred during em_nest_feedbackup_interp
5099 ! The way these settings c and n dimensions are set, below, looks backwards but from the point
5100 ! of view of the RSL routine rsl_lite_to_parent_info(), call to which is included by
5101 ! em_nest_feedbackup_pack, the "n" domain represents the parent domain and the "c" domain
5102 ! represents the intermediate domain. The backwards lookingness should be fixed in the gen_comms.c
5103 ! registry routine that accompanies RSL_LITE but, just as it's sometimes easier to put up a road
5104 ! sign that says "DIP" than fix the dip, at this point it was easier just to write this comment. JM
5108 CALL domain_clock_get( grid, current_timestr=timestr )
5110 CALL get_ijk_from_grid ( intermediate_grid , &
5111 cids, cide, cjds, cjde, ckds, ckde, &
5112 cims, cime, cjms, cjme, ckms, ckme, &
5113 cips, cipe, cjps, cjpe, ckps, ckpe )
5114 CALL get_ijk_from_grid ( grid , &
5115 nids, nide, njds, njde, nkds, nkde, &
5116 nims, nime, njms, njme, nkms, nkme, &
5117 nips, nipe, njps, njpe, nkps, nkpe )
5118 CALL get_ijk_from_grid ( ngrid , &
5119 xids, xide, xjds, xjde, xkds, xkde, &
5120 xims, xime, xjms, xjme, xkms, xkme, &
5121 xips, xipe, xjps, xjpe, xkps, xkpe )
5123 ips_save = ngrid%i_parent_start ! used in feedback_domain_em_part2 below
5124 jps_save = ngrid%j_parent_start
5125 ipe_save = ngrid%i_parent_start + (xide-xids+1) / ngrid%parent_grid_ratio - 1
5126 jpe_save = ngrid%j_parent_start + (xjde-xjds+1) / ngrid%parent_grid_ratio - 1
5131 IF ( ngrid%active_this_task ) THEN
5132 !cyl add this for trajectory
5133 CALL push_communicators_for_domain( ngrid%id )
5135 do tjk = 1,config_flags%num_traj
5136 if (ngrid%traj_long(tjk) .eq. -9999.0) then
5137 ! print*,'n=-9999',tjk
5138 ngrid%traj_long(tjk)=grid%traj_long(tjk)
5139 ngrid%traj_k(tjk)=grid%traj_k(tjk)
5141 ! print*,'p!=-9999',tjk
5142 grid%traj_long(tjk)=ngrid%traj_long(tjk)
5143 grid%traj_k(tjk)=ngrid%traj_k(tjk)
5145 if (ngrid%traj_lat(tjk) .eq. -9999.0) then
5146 ngrid%traj_lat(tjk)=grid%traj_lat(tjk)
5147 ngrid%traj_k(tjk)=grid%traj_k(tjk)
5149 grid%traj_lat(tjk)=ngrid%traj_lat(tjk)
5150 grid%traj_k(tjk)=ngrid%traj_k(tjk)
5155 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
5156 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
5157 CALL nl_get_shw ( intermediate_grid%id, sw )
5158 icoord = iparstrt - sw
5159 jcoord = jparstrt - sw
5160 idim_cd = cide - cids + 1
5161 jdim_cd = cjde - cjds + 1
5163 nlev = ckde - ckds + 1
5165 CALL get_dm_max_halo_width ( grid%id , thisdomain_max_halo_width )
5169 #include "nest_feedbackup_pack.inc"
5171 CALL pop_communicators_for_domain
5175 ! CALL wrf_get_dm_communicator ( local_comm )
5176 ! CALL wrf_get_myproc( myproc )
5177 ! CALL wrf_get_nproc( nproc )
5179 ! determine which communicator and offset to use
5180 IF ( intercomm_active( grid%id ) ) THEN ! I am parent
5181 local_comm = mpi_comm_to_kid( which_kid(ngrid%id), grid%id )
5182 ioffset = nest_task_offsets(ngrid%id)
5183 ELSE IF ( intercomm_active( ngrid%id ) ) THEN ! I am nest
5184 local_comm = mpi_comm_to_mom( ngrid%id )
5185 ioffset = nest_task_offsets(ngrid%id)
5188 IF ( grid%active_this_task .OR. ngrid%active_this_task ) THEN
5190 CALL mpi_comm_rank(local_comm,myproc,ierr)
5191 CALL mpi_comm_size(local_comm,nproc,ierr)
5194 CALL rsl_lite_merge_msgs( myproc, nest_pes_x(grid%id)*nest_pes_y(grid%id), &
5195 nest_pes_x(ngrid%id)*nest_pes_y(ngrid%id), &
5196 ioffset, local_comm )
5199 IF ( grid%active_this_task ) THEN
5200 CALL push_communicators_for_domain( grid%id )
5203 #define NEST_INFLUENCE(A,B) A = B
5204 #include "nest_feedbackup_unpack.inc"
5206 ! smooth coarse grid
5207 CALL get_ijk_from_grid ( ngrid, &
5208 nids, nide, njds, njde, nkds, nkde, &
5209 nims, nime, njms, njme, nkms, nkme, &
5210 nips, nipe, njps, njpe, nkps, nkpe )
5211 CALL get_ijk_from_grid ( grid , &
5212 ids, ide, jds, jde, kds, kde, &
5213 ims, ime, jms, jme, kms, kme, &
5214 ips, ipe, jps, jpe, kps, kpe )
5216 #include "HALO_INTERP_UP.inc"
5218 CALL get_ijk_from_grid ( grid , &
5219 cids, cide, cjds, cjde, ckds, ckde, &
5220 cims, cime, cjms, cjme, ckms, ckme, &
5221 cips, cipe, cjps, cjpe, ckps, ckpe )
5223 #include "nest_feedbackup_smooth.inc"
5225 CALL pop_communicators_for_domain
5229 END SUBROUTINE feedback_domain_em_part2
5233 !------------------------------------------------------------------
5235 SUBROUTINE wrf_gatherv_real (Field, field_ofst, &
5236 my_count , & ! sendcount
5237 globbuf, glob_ofst , & ! recvbuf
5238 counts , & ! recvcounts
5241 communicator , & ! communicator
5243 USE module_dm, ONLY : getrealmpitype
5245 INTEGER field_ofst, glob_ofst
5246 INTEGER my_count, communicator, root, ierr
5247 INTEGER , DIMENSION(*) :: counts, displs
5248 REAL, DIMENSION(*) :: Field, globbuf
5252 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
5253 my_count , & ! sendcount
5254 getrealmpitype() , & ! sendtype
5255 globbuf( glob_ofst ) , & ! recvbuf
5256 counts , & ! recvcounts
5258 getrealmpitype() , & ! recvtype
5260 communicator , & ! communicator
5264 END SUBROUTINE wrf_gatherv_real
5266 SUBROUTINE wrf_gatherv_double (Field, field_ofst, &
5267 my_count , & ! sendcount
5268 globbuf, glob_ofst , & ! recvbuf
5269 counts , & ! recvcounts
5272 communicator , & ! communicator
5276 INTEGER field_ofst, glob_ofst
5277 INTEGER my_count, communicator, root, ierr
5278 INTEGER , DIMENSION(*) :: counts, displs
5279 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
5280 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
5281 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
5282 ! if we were not indexing the globbuf and Field arrays it would not even matter
5283 REAL, DIMENSION(*) :: Field, globbuf
5287 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
5288 my_count , & ! sendcount
5289 MPI_DOUBLE_PRECISION , & ! sendtype
5290 globbuf( glob_ofst ) , & ! recvbuf
5291 counts , & ! recvcounts
5293 MPI_DOUBLE_PRECISION , & ! recvtype
5295 communicator , & ! communicator
5299 END SUBROUTINE wrf_gatherv_double
5301 SUBROUTINE wrf_gatherv_integer (Field, field_ofst, &
5302 my_count , & ! sendcount
5303 globbuf, glob_ofst , & ! recvbuf
5304 counts , & ! recvcounts
5307 communicator , & ! communicator
5310 INTEGER field_ofst, glob_ofst
5311 INTEGER my_count, communicator, root, ierr
5312 INTEGER , DIMENSION(*) :: counts, displs
5313 INTEGER, DIMENSION(*) :: Field, globbuf
5317 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
5318 my_count , & ! sendcount
5319 MPI_INTEGER , & ! sendtype
5320 globbuf( glob_ofst ) , & ! recvbuf
5321 counts , & ! recvcounts
5323 MPI_INTEGER , & ! recvtype
5325 communicator , & ! communicator
5329 END SUBROUTINE wrf_gatherv_integer
5332 SUBROUTINE wrf_scatterv_real ( &
5333 globbuf, glob_ofst , & ! recvbuf
5334 counts , & ! recvcounts
5335 Field, field_ofst, &
5336 my_count , & ! sendcount
5339 communicator , & ! communicator
5341 USE module_dm, ONLY : getrealmpitype
5343 INTEGER field_ofst, glob_ofst
5344 INTEGER my_count, communicator, root, ierr
5345 INTEGER , DIMENSION(*) :: counts, displs
5346 REAL, DIMENSION(*) :: Field, globbuf
5350 CALL mpi_scatterv( &
5351 globbuf( glob_ofst ) , & ! recvbuf
5352 counts , & ! recvcounts
5354 getrealmpitype() , & ! recvtype
5355 Field( field_ofst ), & ! sendbuf
5356 my_count , & ! sendcount
5357 getrealmpitype() , & ! sendtype
5359 communicator , & ! communicator
5363 END SUBROUTINE wrf_scatterv_real
5365 SUBROUTINE wrf_scatterv_double ( &
5366 globbuf, glob_ofst , & ! recvbuf
5367 counts , & ! recvcounts
5368 Field, field_ofst, &
5369 my_count , & ! sendcount
5372 communicator , & ! communicator
5375 INTEGER field_ofst, glob_ofst
5376 INTEGER my_count, communicator, root, ierr
5377 INTEGER , DIMENSION(*) :: counts, displs
5378 REAL, DIMENSION(*) :: Field, globbuf
5381 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
5382 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
5383 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
5384 ! if we were not indexing the globbuf and Field arrays it would not even matter
5386 CALL mpi_scatterv( &
5387 globbuf( glob_ofst ) , & ! recvbuf
5388 counts , & ! recvcounts
5390 MPI_DOUBLE_PRECISION , & ! recvtype
5391 Field( field_ofst ), & ! sendbuf
5392 my_count , & ! sendcount
5393 MPI_DOUBLE_PRECISION , & ! sendtype
5395 communicator , & ! communicator
5399 END SUBROUTINE wrf_scatterv_double
5401 SUBROUTINE wrf_scatterv_integer ( &
5402 globbuf, glob_ofst , & ! recvbuf
5403 counts , & ! recvcounts
5404 Field, field_ofst, &
5405 my_count , & ! sendcount
5408 communicator , & ! communicator
5411 INTEGER field_ofst, glob_ofst
5412 INTEGER my_count, communicator, root, ierr
5413 INTEGER , DIMENSION(*) :: counts, displs
5414 INTEGER, DIMENSION(*) :: Field, globbuf
5418 CALL mpi_scatterv( &
5419 globbuf( glob_ofst ) , & ! recvbuf
5420 counts , & ! recvcounts
5422 MPI_INTEGER , & ! recvtype
5423 Field( field_ofst ), & ! sendbuf
5424 my_count , & ! sendcount
5425 MPI_INTEGER , & ! sendtype
5427 communicator , & ! communicator
5431 END SUBROUTINE wrf_scatterv_integer
5432 ! end new stuff 20070124
5434 SUBROUTINE wrf_dm_gatherv ( v, elemsize , km_s, km_e, wordsz )
5436 INTEGER elemsize, km_s, km_e, wordsz
5438 IF ( wordsz .EQ. DWORDSIZE ) THEN
5439 CALL wrf_dm_gatherv_double(v, elemsize , km_s, km_e)
5441 CALL wrf_dm_gatherv_single(v, elemsize , km_s, km_e)
5443 END SUBROUTINE wrf_dm_gatherv
5445 SUBROUTINE wrf_dm_gatherv_double ( v, elemsize , km_s, km_e )
5447 INTEGER elemsize, km_s, km_e
5450 # ifndef USE_MPI_IN_PLACE
5451 REAL*8 v_local((km_e-km_s+1)*elemsize)
5453 INTEGER, DIMENSION(:), ALLOCATABLE :: recvcounts, displs
5454 INTEGER send_type, myproc, nproc, local_comm, ierr, i
5456 send_type = MPI_DOUBLE_PRECISION
5457 CALL wrf_get_dm_communicator ( local_comm )
5458 CALL wrf_get_nproc( nproc )
5459 CALL wrf_get_myproc( myproc )
5460 ALLOCATE( recvcounts(nproc), displs(nproc) )
5461 i = (km_e-km_s+1)*elemsize
5462 CALL mpi_allgather( i,1,MPI_INTEGER,recvcounts,1,MPI_INTEGER,local_comm,ierr) ;
5464 CALL mpi_allgather( i,1,MPI_INTEGER,displs,1,MPI_INTEGER,local_comm,ierr) ;
5465 # ifdef USE_MPI_IN_PLACE
5466 CALL mpi_allgatherv( MPI_IN_PLACE, &
5468 DO i = 1,elemsize*(km_e-km_s+1)
5469 v_local(i) = v(i+elemsize*km_s-1)
5471 CALL mpi_allgatherv( v_local, &
5473 (km_e-km_s+1)*elemsize, &
5481 DEALLOCATE(recvcounts)
5485 END SUBROUTINE wrf_dm_gatherv_double
5487 SUBROUTINE wrf_dm_gatherv_single ( v, elemsize , km_s, km_e )
5489 INTEGER elemsize, km_s, km_e
5492 # ifndef USE_MPI_IN_PLACE
5493 REAL*4 v_local((km_e-km_s+1)*elemsize)
5495 INTEGER, DIMENSION(:), ALLOCATABLE :: recvcounts, displs
5496 INTEGER send_type, myproc, nproc, local_comm, ierr, i
5498 send_type = MPI_REAL
5499 CALL wrf_get_dm_communicator ( local_comm )
5500 CALL wrf_get_nproc( nproc )
5501 CALL wrf_get_myproc( myproc )
5502 ALLOCATE( recvcounts(nproc), displs(nproc) )
5503 i = (km_e-km_s+1)*elemsize
5504 CALL mpi_allgather( i,1,MPI_INTEGER,recvcounts,1,MPI_INTEGER,local_comm,ierr) ;
5506 CALL mpi_allgather( i,1,MPI_INTEGER,displs,1,MPI_INTEGER,local_comm,ierr) ;
5507 # ifdef USE_MPI_IN_PLACE
5508 CALL mpi_allgatherv( MPI_IN_PLACE, &
5510 DO i = 1,elemsize*(km_e-km_s+1)
5511 v_local(i) = v(i+elemsize*km_s-1)
5513 CALL mpi_allgatherv( v_local, &
5515 (km_e-km_s+1)*elemsize, &
5523 DEALLOCATE(recvcounts)
5527 END SUBROUTINE wrf_dm_gatherv_single
5529 SUBROUTINE wrf_dm_decomp1d( nt, km_s, km_e )
5531 INTEGER, INTENT(IN) :: nt
5532 INTEGER, INTENT(OUT) :: km_s, km_e
5534 INTEGER nn, nnp, na, nb
5535 INTEGER myproc, nproc
5537 CALL wrf_get_myproc(myproc)
5538 CALL wrf_get_nproc(nproc)
5539 nn = nt / nproc ! min number done by this task
5541 if ( myproc .lt. mod( nt, nproc ) ) nnp = nnp + 1 ! distribute remainder
5543 na = min( myproc, mod(nt,nproc) ) ! Number of blocks with remainder that precede this one
5544 nb = max( 0, myproc - na ) ! number of blocks without a remainder that precede this one
5545 km_s = na * ( nn+1) + nb * nn ! starting iteration for this task
5546 km_e = km_s + nnp - 1 ! ending iteration for this task
5547 END SUBROUTINE wrf_dm_decomp1d
5550 SUBROUTINE wrf_dm_define_comms ( grid )
5551 USE module_domain, ONLY : domain
5553 TYPE(domain) , INTENT (INOUT) :: grid
5555 END SUBROUTINE wrf_dm_define_comms
5557 SUBROUTINE tfp_message( fname, lno )
5562 WRITE(mess,*)'tfp_message: ',trim(fname),lno
5563 CALL wrf_message(mess)
5564 # ifdef ALLOW_OVERDECOMP
5565 CALL task_for_point_message ! defined in RSL_LITE/task_for_point.c
5567 CALL wrf_error_fatal(mess)
5570 END SUBROUTINE tfp_message
5572 SUBROUTINE set_dm_debug
5573 USE module_dm, ONLY : dm_debug_flag
5575 dm_debug_flag = .TRUE.
5576 END SUBROUTINE set_dm_debug
5577 SUBROUTINE reset_dm_debug
5578 USE module_dm, ONLY : dm_debug_flag
5580 dm_debug_flag = .FALSE.
5581 END SUBROUTINE reset_dm_debug
5582 SUBROUTINE get_dm_debug ( arg )
5583 USE module_dm, ONLY : dm_debug_flag
5587 END SUBROUTINE get_dm_debug