1 !***********************************************************************
2 !* GNU Lesser General Public License
4 !* This file is part of the GFDL Flexible Modeling System (FMS).
6 !* FMS is free software: you can redistribute it and/or modify it under
7 !* the terms of the GNU Lesser General Public License as published by
8 !* the Free Software Foundation, either version 3 of the License, or (at
9 !* your option) any later version.
11 !* FMS is distributed in the hope that it will be useful, but WITHOUT
12 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 !* You should have received a copy of the GNU Lesser General Public
17 !* License along with FMS. If not, see <http://www.gnu.org/licenses/>.
18 !***********************************************************************
19 !-----------------------------------------------------------------------
20 !> @defgroup mpp_domains_mod mpp_domains_mod
22 !> @brief Domain decomposition and domain update for message-passing codes
23 !> @author V. Balaji SGI/GFDL Princeton University
25 !> A set of simple calls for domain
26 !! decomposition and domain updates on rectilinear grids. It requires the
27 !! module mpp.F90, upon which it is built.\n
28 !! Scalable implementations of finite-difference codes are generally
29 !! based on decomposing the model domain into subdomains that are
30 !! distributed among processors. These domains will then be obliged to
31 !! exchange data at their boundaries if data dependencies are merely
32 !! nearest-neighbour, or may need to acquire information from the global
33 !! domain if there are extended data dependencies, as in the spectral
34 !! transform. The domain decomposition is a key operation in the
35 !! development of parallel codes.\n
37 !! mpp_domains_mod provides a domain decomposition and domain
38 !! update API for rectilinear grids, built on top of the mpp_mod API for message passing.
39 !! Features of mpp_domains_mod include:\n
41 !! Simple, minimal API, with free access to underlying API for more complicated stuff.\n
43 !! Design toward typical use in climate/weather CFD codes.\n
46 !! It is assumed that domain decomposition will mainly be in 2
47 !! horizontal dimensions, which will in general be the two
48 !! fastest-varying indices. There is a separate implementation of 1D
49 !! decomposition on the fastest-varying index, and 1D decomposition on
50 !! the second index, treated as a special case of 2D decomposition, is
51 !! also possible. We define domain as the grid associated with a <I>task</I>.
52 !! We define the compute domain as the set of gridpoints that are
53 !! computed by a task, and the data domain as the set of points
54 !! that are required by the task for the calculation. There can in
55 !! general be more than 1 task per PE, though often
56 !! the number of domains is the same as the processor count. We define
57 !! the global domain as the global computational domain of the
58 !! entire model (i.e, the same as the computational domain if run on a
59 !! single processor). 2D domains are defined using a derived type domain2D,
60 !! constructed as follows (see comments in code for more details).
62 !! type, public :: domain_axis_spec
64 !! integer :: begin, end, size, max_size
65 !! logical :: is_global
66 !! end type domain_axis_spec
68 !! type, public :: domain1D
70 !! type(domain_axis_spec) :: compute, data, global, active
71 !! logical :: mustputb, mustgetb, mustputf, mustgetf, folded
72 !! type(domain1D), pointer, dimension(:) :: list
73 !! integer :: pe ! pe to which the domain is assigned
77 !! type, public :: domain2D
79 !! type(domain1D) :: x
80 !! type(domain1D) :: y
81 !! type(domain2D), pointer, dimension(:) :: list
82 !! integer :: pe ! PE to which this domain is assigned
86 !! type(domain1D), public :: NULL_DOMAIN1D
87 !! type(domain2D), public :: NULL_DOMAIN2D
89 !> @addtogroup mpp_domains_mod
92 module mpp_domains_mod
94 #if defined(use_libMPI)
98 use mpp_parameter_mod, only : MPP_DEBUG, MPP_VERBOSE, MPP_DOMAIN_TIME
99 use mpp_parameter_mod, only : GLOBAL_DATA_DOMAIN, CYCLIC_GLOBAL_DOMAIN, GLOBAL,CYCLIC
100 use mpp_parameter_mod, only : AGRID, BGRID_SW, BGRID_NE, CGRID_NE, CGRID_SW, DGRID_NE, DGRID_SW
101 use mpp_parameter_mod, only : FOLD_WEST_EDGE, FOLD_EAST_EDGE, FOLD_SOUTH_EDGE, FOLD_NORTH_EDGE
102 use mpp_parameter_mod, only : WUPDATE, EUPDATE, SUPDATE, NUPDATE, XUPDATE, YUPDATE
103 use mpp_parameter_mod, only : NON_BITWISE_EXACT_SUM, BITWISE_EXACT_SUM, MPP_DOMAIN_TIME
104 use mpp_parameter_mod, only : CENTER, CORNER, SCALAR_PAIR, SCALAR_BIT, BITWISE_EFP_SUM
105 use mpp_parameter_mod, only : NORTH, NORTH_EAST, EAST, SOUTH_EAST
106 use mpp_parameter_mod, only : SOUTH, SOUTH_WEST, WEST, NORTH_WEST
107 use mpp_parameter_mod, only : MAX_DOMAIN_FIELDS, NULL_PE, DOMAIN_ID_BASE
108 use mpp_parameter_mod, only : ZERO, NINETY, MINUS_NINETY, ONE_HUNDRED_EIGHTY, MAX_TILES
109 use mpp_parameter_mod, only : EVENT_SEND, EVENT_RECV, ROOT_GLOBAL
110 use mpp_parameter_mod, only : NONBLOCK_UPDATE_TAG, EDGEONLY, EDGEUPDATE
111 use mpp_parameter_mod, only : NONSYMEDGE, NONSYMEDGEUPDATE
112 use mpp_data_mod, only : mpp_domains_stack, ptr_domains_stack
113 use mpp_data_mod, only : mpp_domains_stack_nonblock, ptr_domains_stack_nonblock
114 use mpp_mod, only : mpp_pe, mpp_root_pe, mpp_npes, mpp_error, FATAL, WARNING, NOTE
115 use mpp_mod, only : stdout, stderr, stdlog, mpp_send, mpp_recv, mpp_transmit, mpp_sync_self
116 use mpp_mod, only : mpp_clock_id, mpp_clock_begin, mpp_clock_end
117 use mpp_mod, only : mpp_max, mpp_min, mpp_sum, mpp_get_current_pelist, mpp_broadcast
118 use mpp_mod, only : mpp_sum_ad
119 use mpp_mod, only : mpp_sync, mpp_init, lowercase
120 use mpp_mod, only : input_nml_file, mpp_alltoall
121 use mpp_mod, only : mpp_type, mpp_byte
122 use mpp_mod, only : mpp_type_create, mpp_type_free
123 use mpp_mod, only : COMM_TAG_1, COMM_TAG_2, COMM_TAG_3, COMM_TAG_4
124 use mpp_mod, only : mpp_declare_pelist, mpp_set_current_pelist
125 use mpp_memutils_mod, only : mpp_memuse_begin, mpp_memuse_end
126 use mpp_efp_mod, only : mpp_reproducing_sum
131 !--- public paramters imported from mpp_domains_parameter_mod
132 public :: GLOBAL_DATA_DOMAIN, CYCLIC_GLOBAL_DOMAIN, BGRID_NE, BGRID_SW, CGRID_NE, CGRID_SW, AGRID
133 public :: DGRID_NE, DGRID_SW, FOLD_WEST_EDGE, FOLD_EAST_EDGE, FOLD_SOUTH_EDGE, FOLD_NORTH_EDGE
134 public :: WUPDATE, EUPDATE, SUPDATE, NUPDATE, XUPDATE, YUPDATE
135 public :: NON_BITWISE_EXACT_SUM, BITWISE_EXACT_SUM, MPP_DOMAIN_TIME, BITWISE_EFP_SUM
136 public :: CENTER, CORNER, SCALAR_PAIR
137 public :: NORTH, NORTH_EAST, EAST, SOUTH_EAST
138 public :: SOUTH, SOUTH_WEST, WEST, NORTH_WEST
139 public :: ZERO, NINETY, MINUS_NINETY, ONE_HUNDRED_EIGHTY
140 public :: EDGEUPDATE, NONSYMEDGEUPDATE
142 !--- public data imported from mpp_data_mod
143 public :: NULL_DOMAIN1D, NULL_DOMAIN2D
145 public :: domain_axis_spec, domain1D, domain2D, DomainCommunicator2D
146 public :: nest_domain_type, mpp_group_update_type
148 !--- public interface from mpp_domains_util.h
149 public :: mpp_domains_set_stack_size, mpp_get_compute_domain, mpp_get_compute_domains
150 public :: mpp_get_data_domain, mpp_get_global_domain, mpp_get_domain_components
151 public :: mpp_get_layout, mpp_get_pelist, operator(.EQ.), operator(.NE.)
152 public :: mpp_domain_is_symmetry, mpp_domain_is_initialized
153 public :: mpp_get_neighbor_pe, mpp_nullify_domain_list
154 public :: mpp_set_compute_domain, mpp_set_data_domain, mpp_set_global_domain
155 public :: mpp_get_memory_domain, mpp_get_domain_shift, mpp_domain_is_tile_root_pe
156 public :: mpp_get_tile_id, mpp_get_domain_extents, mpp_get_current_ntile, mpp_get_ntile_count
157 public :: mpp_get_tile_list
158 public :: mpp_get_tile_npes, mpp_get_domain_root_pe, mpp_get_tile_pelist, mpp_get_tile_compute_domains
159 public :: mpp_get_num_overlap, mpp_get_overlap
160 public :: mpp_get_io_domain, mpp_get_domain_pe, mpp_get_domain_tile_root_pe
161 public :: mpp_get_domain_tile_commid, mpp_get_domain_commid
162 public :: mpp_get_domain_name, mpp_get_io_domain_layout
163 public :: mpp_copy_domain, mpp_set_domain_symmetry
164 public :: mpp_get_update_pelist, mpp_get_update_size
165 public :: mpp_get_domain_npes, mpp_get_domain_pelist
166 public :: mpp_clear_group_update
167 public :: mpp_group_update_initialized, mpp_group_update_is_set
168 public :: mpp_get_global_domains
169 public :: mpp_create_super_grid_domain
171 !--- public interface from mpp_domains_reduce.h
172 public :: mpp_global_field, mpp_global_max, mpp_global_min, mpp_global_sum
173 public :: mpp_global_sum_tl, mpp_global_sum_ad
174 !--- public interface from mpp_domains_misc.h
175 public :: mpp_broadcast_domain, mpp_domains_init, mpp_domains_exit, mpp_redistribute
176 public :: mpp_update_domains, mpp_check_field
177 public :: mpp_start_update_domains, mpp_complete_update_domains
178 public :: mpp_create_group_update, mpp_do_group_update
179 public :: mpp_start_group_update, mpp_complete_group_update
180 public :: mpp_reset_group_update_field
181 public :: mpp_update_nest_fine, mpp_update_nest_coarse
182 public :: mpp_get_boundary
183 public :: mpp_update_domains_ad
184 public :: mpp_get_boundary_ad
185 public :: mpp_pass_SG_to_UG, mpp_pass_UG_to_SG
186 !--- public interface from mpp_domains_define.h
187 public :: mpp_define_layout, mpp_define_domains, mpp_modify_domain, mpp_define_mosaic
188 public :: mpp_define_mosaic_pelist, mpp_define_null_domain, mpp_mosaic_defined
189 public :: mpp_define_io_domain, mpp_deallocate_domain
190 public :: mpp_compute_extent, mpp_compute_block_extent
192 !--- public interface for unstruct domain
193 public :: mpp_define_unstruct_domain, domainUG, mpp_get_UG_io_domain
194 public :: mpp_get_UG_domain_npes, mpp_get_UG_compute_domain, mpp_get_UG_domain_tile_id
195 public :: mpp_get_UG_domain_pelist, mpp_get_ug_domain_grid_index
196 public :: mpp_get_UG_domain_ntiles, mpp_get_UG_global_domain
197 public :: mpp_global_field_ug, mpp_get_ug_domain_tile_list, mpp_get_UG_compute_domains
198 public :: mpp_define_null_UG_domain, NULL_DOMAINUG, mpp_get_UG_domains_index
199 public :: mpp_get_UG_SG_domain, mpp_get_UG_domain_tile_pe_inf
201 !--- public interface from mpp_define_domains.inc
202 public :: mpp_define_nest_domains, mpp_get_C2F_index, mpp_get_F2C_index
203 public :: mpp_shift_nest_domains
204 public :: mpp_get_nest_coarse_domain, mpp_get_nest_fine_domain
205 public :: mpp_is_nest_coarse, mpp_is_nest_fine
206 public :: mpp_get_nest_pelist, mpp_get_nest_npes
207 public :: mpp_get_nest_fine_pelist, mpp_get_nest_fine_npes
211 public :: mpp_domain_UG_is_tile_root_pe
212 public :: mpp_deallocate_domainUG
213 public :: mpp_get_io_domain_UG_layout
216 integer, parameter :: NAME_LENGTH = 64
217 integer, parameter :: MAXLIST = 100
218 integer, parameter :: MAXOVERLAP = 200
219 integer, parameter :: FIELD_S = 0
220 integer, parameter :: FIELD_X = 1
221 integer, parameter :: FIELD_Y = 2
225 ! data types used by mpp_domains_mod
227 !> @brief Private type for axis specification data for an unstructured grid
228 !> @ingroup mpp_domains_mod
229 type :: unstruct_axis_spec
231 integer :: begin, end, size, max_size
232 integer :: begin_index, end_index
233 end type unstruct_axis_spec
235 !> Private type for axis specification data for an unstructured domain
236 !> @ingroup mpp_domains_mod
237 type :: unstruct_domain_spec
239 type(unstruct_axis_spec) :: compute
243 end type unstruct_domain_spec
246 !> @ingroup mpp_domains_mod
247 type :: unstruct_overlap_type
251 integer, pointer :: i(:)=>NULL()
252 integer, pointer :: j(:)=>NULL()
253 end type unstruct_overlap_type
256 !> @ingroup mpp_domains_mod
257 type :: unstruct_pass_type
259 integer :: nsend, nrecv
260 type(unstruct_overlap_type), pointer :: recv(:)=>NULL()
261 type(unstruct_overlap_type), pointer :: send(:)=>NULL()
262 end type unstruct_pass_type
264 !> Domain information for managing data on unstructured grids
265 !> @ingroup mpp_domains_mod
268 type(unstruct_axis_spec) :: compute, global !< axis specifications
269 type(unstruct_domain_spec), pointer :: list(:)=>NULL() !<
270 type(domainUG), pointer :: io_domain=>NULL() !<
271 type(unstruct_pass_type) :: SG2UG
272 type(unstruct_pass_type) :: UG2SG
273 integer, pointer :: grid_index(:) => NULL() !< index of grid on current pe
274 type(domain2d), pointer :: SG_domain => NULL()
279 integer :: tile_root_pe
281 integer :: npes_io_group
282 integer(i4_kind) :: io_layout
285 !> Used to specify index limits along an axis of a domain
286 !> @ingroup mpp_domains_mod
287 type :: domain_axis_spec
289 integer :: begin !< start of domain axis
290 integer :: end !< end of domain axis
291 integer :: size !< size of domain axis
292 integer :: max_size !< max size in set
293 logical :: is_global !< .true. if domain axis extent covers global domain
294 end type domain_axis_spec
296 !> A private type used to specify index limits for a domain decomposition
297 !> @ingroup mpp_domains_mod
298 type :: domain1D_spec
300 type(domain_axis_spec) :: compute
301 type(domain_axis_spec) :: global
303 end type domain1D_spec
305 !> @brief Private type to specify multiple index limits and pe information for a 2D domain
306 !> @ingroup mpp_domains_mod
307 type :: domain2D_spec
309 type(domain1D_spec), pointer :: x(:) => NULL() !< x-direction domain decomposition
310 type(domain1D_spec), pointer :: y(:) => NULL() !< y-direction domain decomposition
311 integer, pointer :: tile_id(:) => NULL() !< tile id of each tile
312 integer :: pe !< PE to which this domain is assigned
313 integer :: pos !< position of this PE within link list
314 integer :: tile_root_pe !< root pe of tile.
315 end type domain2D_spec
317 !> Type for overlapping data
318 !> @ingroup mpp_domains_mod
321 integer :: count = 0 !< number of overlapping
323 integer :: start_pos !< start position in the buffer
324 integer :: totsize !< all message size
325 integer , pointer :: msgsize(:) => NULL() !< overlapping msgsize to be sent or received
326 integer, pointer :: tileMe(:) => NULL() !< my tile id for this overlap
327 integer, pointer :: tileNbr(:) => NULL() !< neighbor tile id for this overlap
328 integer, pointer :: is(:) => NULL() !< starting i-index
329 integer, pointer :: ie(:) => NULL() !< ending i-index
330 integer, pointer :: js(:) => NULL() !< starting j-index
331 integer, pointer :: je(:) => NULL() !< ending j-index
332 integer, pointer :: dir(:) => NULL() !< direction ( value 1,2,3,4 = E,S,W,N)
333 integer, pointer :: rotation(:) => NULL() !< rotation angle.
334 integer, pointer :: index(:) => NULL() !< for refinement
335 logical, pointer :: from_contact(:) => NULL() !< indicate if the overlap is computed from
336 !! define_contact_overlap
337 end type overlap_type
339 !> Private type for overlap specifications
340 !> @ingroup mpp_domains_mod
343 integer :: whalo, ehalo, shalo, nhalo !< halo size
344 integer :: xbegin, xend, ybegin, yend
345 integer :: nsend, nrecv
346 integer :: sendsize, recvsize
347 type(overlap_type), pointer :: send(:) => NULL()
348 type(overlap_type), pointer :: recv(:) => NULL()
349 type(overlapSpec), pointer :: next => NULL()
352 !> @brief Upper and lower x and y bounds for a tile
353 !> @ingroup mpp_domains_mod
355 integer :: xbegin, xend, ybegin, yend
358 !> The domain2D type contains all the necessary information to
359 !! define the global, compute and data domains of each task, as well as the PE
360 !! associated with the task. The PEs from which remote data may be
361 !! acquired to update the data domain are also contained in a linked list of neighbours.
363 !! Domain types of higher rank can be constructed from type domain1D
364 !! typically we only need 1 and 2D, but could need higher (e.g 3D LES)
365 !! some elements are repeated below if they are needed once per domain, not once per axis
366 !> @ingroup mpp_domains_mod
369 character(len=NAME_LENGTH) :: name='unnamed' !< name of the domain, default is "unspecified"
370 integer(i8_kind) :: id
371 integer :: pe !< PE to which this domain is assigned
373 integer :: pos !< position of this PE within link list
374 logical :: symmetry !< indicate the domain is symmetric or non-symmetric.
375 integer :: whalo, ehalo !< halo size in x-direction
376 integer :: shalo, nhalo !< halo size in y-direction
377 integer :: ntiles !< number of tiles within mosaic
378 integer :: comm_id !< MPI communicator for the mosaic
379 integer :: tile_comm_id !< MPI communicator for this tile of domain
380 integer :: max_ntile_pe !< maximum value in the pelist of number of tiles on each pe.
381 integer :: ncontacts !< number of contact region within mosaic.
382 logical :: rotated_ninety !< indicate if any contact rotate NINETY or MINUS_NINETY
383 logical :: initialized=.FALSE. !< indicate if the overlapping is computed or not.
384 integer :: tile_root_pe !< root pe of current tile.
385 integer :: io_layout(2) !< io_layout, will be set through mpp_define_io_domain
386 !! default = domain layout
387 integer, pointer :: pearray(:,:) => NULL() !< pe of each layout position
388 integer, pointer :: tile_id(:) => NULL() !< tile id of each tile on current processor
389 integer, pointer :: tile_id_all(:)=> NULL() !< tile id of all the tiles of domain
390 type(domain1D), pointer :: x(:) => NULL() !< x-direction domain decomposition
391 type(domain1D), pointer :: y(:) => NULL() !< y-direction domain decomposition
392 type(domain2D_spec),pointer :: list(:) => NULL() !< domain decomposition on pe list
393 type(tile_type), pointer :: tileList(:) => NULL() !< store tile information
394 type(overlapSpec), pointer :: check_C => NULL() !< send and recv information for boundary
395 !! consistency check of C-cell
396 type(overlapSpec), pointer :: check_E => NULL() !< send and recv information for boundary
397 !! consistency check of E-cell
398 type(overlapSpec), pointer :: check_N => NULL() !< send and recv information for boundary
399 !! consistency check of N-cell
400 type(overlapSpec), pointer :: bound_C => NULL() !< send information for getting boundary
401 !! value for symmetry domain.
402 type(overlapSpec), pointer :: bound_E => NULL() !< send information for getting boundary
403 !! value for symmetry domain.
404 type(overlapSpec), pointer :: bound_N => NULL() !< send information for getting boundary
405 !! value for symmetry domain.
406 type(overlapSpec), pointer :: update_T => NULL() !< send and recv information for halo update of T-cell.
407 type(overlapSpec), pointer :: update_E => NULL() !< send and recv information for halo update of E-cell.
408 type(overlapSpec), pointer :: update_C => NULL() !< send and recv information for halo update of C-cell.
409 type(overlapSpec), pointer :: update_N => NULL() !< send and recv information for halo update of N-cell.
410 type(domain2d), pointer :: io_domain => NULL() !< domain for IO, will be set through calling
411 !! mpp_set_io_domain ( this will be changed).
414 !> Type used to represent the contact between tiles.
415 !> @note This type will only be used in mpp_domains_define.inc
416 !> @ingroup mpp_domains_mod
417 type, private :: contact_type
419 integer :: ncontact !< number of neighbor tile.
420 integer, pointer :: tile(:) =>NULL() !< neighbor tile
421 integer, pointer :: align1(:)=>NULL(), align2(:)=>NULL() !< alignment of me and neighbor
422 real, pointer :: refine1(:)=>NULL(), refine2(:)=>NULL() !
423 integer, pointer :: is1(:)=>NULL(), ie1(:)=>NULL() !< i-index of current tile repsenting contact
424 integer, pointer :: js1(:)=>NULL(), je1(:)=>NULL() !< j-index of current tile repsenting contact
425 integer, pointer :: is2(:)=>NULL(), ie2(:)=>NULL() !< i-index of neighbor tile repsenting contact
426 integer, pointer :: js2(:)=>NULL(), je2(:)=>NULL() !< j-index of neighbor tile repsenting contact
427 end type contact_type
429 !> index bounds for use in @ref nestSpec
430 !> @ingroup mpp_domains_mod
432 integer :: is_me, ie_me, js_me, je_me
433 integer :: is_you, ie_you, js_you, je_you
436 !> Used to specify bounds and index information for nested tiles as a linked list
437 !> @ingroup mpp_domains_mod
440 integer :: xbegin, xend, ybegin, yend
441 integer :: xbegin_c, xend_c, ybegin_c, yend_c
442 integer :: xbegin_f, xend_f, ybegin_f, yend_f
443 integer :: xsize_c, ysize_c
444 type(index_type) :: west, east, south, north, center
445 integer :: nsend, nrecv
446 integer :: extra_halo
447 type(overlap_type), pointer :: send(:) => NULL()
448 type(overlap_type), pointer :: recv(:) => NULL()
449 type(nestSpec), pointer :: next => NULL()
453 !> @brief domain with nested fine and course tiles
454 !> @ingroup mpp_domains_mod
455 type :: nest_domain_type
456 character(len=NAME_LENGTH) :: name
458 integer, pointer :: nest_level(:) => NULL() !< Added for moving nest functionality
459 type(nest_level_type), pointer :: nest(:) => NULL()
461 integer, pointer :: tile_fine(:), tile_coarse(:)
462 integer, pointer :: istart_fine(:), iend_fine(:), jstart_fine(:), jend_fine(:)
463 integer, pointer :: istart_coarse(:), iend_coarse(:), jstart_coarse(:), jend_coarse(:)
464 end type nest_domain_type
466 !> Private type to hold data for each level of nesting
467 !> @ingroup mpp_domains_mod
468 type :: nest_level_type
471 logical :: is_fine, is_coarse
473 integer :: my_num_nest
474 integer, pointer :: my_nest_id(:)
475 integer, pointer :: tile_fine(:), tile_coarse(:)
476 integer, pointer :: istart_fine(:), iend_fine(:), jstart_fine(:), jend_fine(:)
477 integer, pointer :: istart_coarse(:), iend_coarse(:), jstart_coarse(:), jend_coarse(:)
478 integer :: x_refine, y_refine
479 logical :: is_fine_pe, is_coarse_pe
480 integer, pointer :: pelist(:) => NULL()
481 integer, pointer :: pelist_fine(:) => NULL()
482 integer, pointer :: pelist_coarse(:) => NULL()
483 type(nestSpec), pointer :: C2F_T => NULL()
484 type(nestSpec), pointer :: C2F_C => NULL()
485 type(nestSpec), pointer :: C2F_E => NULL()
486 type(nestSpec), pointer :: C2F_N => NULL()
487 type(nestSpec), pointer :: F2C_T => NULL()
488 type(nestSpec), pointer :: F2C_C => NULL()
489 type(nestSpec), pointer :: F2C_E => NULL()
490 type(nestSpec), pointer :: F2C_N => NULL()
491 type(domain2d), pointer :: domain_fine => NULL()
492 type(domain2d), pointer :: domain_coarse => NULL()
493 end type nest_level_type
496 !> Used for sending domain data between pe's
497 !> @ingroup mpp_domains_mod
498 type :: domaincommunicator2D
500 logical :: initialized=.false.
501 integer(i8_kind) :: id=-9999
502 integer(i8_kind) :: l_addr =-9999
503 integer(i8_kind) :: l_addrx =-9999
504 integer(i8_kind) :: l_addry =-9999
505 type(domain2D), pointer :: domain =>NULL()
506 type(domain2D), pointer :: domain_in =>NULL()
507 type(domain2D), pointer :: domain_out =>NULL()
508 type(overlapSpec), pointer :: send(:,:,:,:) => NULL()
509 type(overlapSpec), pointer :: recv(:,:,:,:) => NULL()
510 integer, dimension(:,:), allocatable :: sendis
511 integer, dimension(:,:), allocatable :: sendie
512 integer, dimension(:,:), allocatable :: sendjs
513 integer, dimension(:,:), allocatable :: sendje
514 integer, dimension(:,:), allocatable :: recvis
515 integer, dimension(:,:), allocatable :: recvie
516 integer, dimension(:,:), allocatable :: recvjs
517 integer, dimension(:,:), allocatable :: recvje
518 logical, dimension(:), allocatable :: S_do_buf
519 logical, dimension(:), allocatable :: R_do_buf
520 integer, dimension(:), allocatable :: cto_pe
521 integer, dimension(:), allocatable :: cfrom_pe
522 integer, dimension(:), allocatable :: S_msize
523 integer, dimension(:), allocatable :: R_msize
524 integer :: Slist_size=0, Rlist_size=0
525 integer :: isize=0, jsize=0, ke=0
526 integer :: isize_in=0, jsize_in=0
527 integer :: isize_out=0, jsize_out=0
528 integer :: isize_max=0, jsize_max=0
529 integer :: gf_ioff=0, gf_joff=0
531 integer, dimension(:) , allocatable :: isizeR
532 integer, dimension(:) , allocatable :: jsizeR
533 integer, dimension(:,:), allocatable :: sendisR
534 integer, dimension(:,:), allocatable :: sendjsR
535 integer(i8_kind), dimension(:), allocatable :: rem_addr
536 integer(i8_kind), dimension(:), allocatable :: rem_addrx
537 integer(i8_kind), dimension(:), allocatable :: rem_addry
538 integer(i8_kind), dimension(:,:), allocatable :: rem_addrl
539 integer(i8_kind), dimension(:,:), allocatable :: rem_addrlx
540 integer(i8_kind), dimension(:,:), allocatable :: rem_addrly
541 integer :: position !< data location. T, E, C, or N.
542 end type DomainCommunicator2D
544 integer, parameter :: MAX_REQUEST = 100
546 !> Used for nonblocking data transfer
547 !> @ingroup mpp_domains_mod
548 type :: nonblock_type
551 integer :: recv_msgsize
552 integer :: send_msgsize
553 integer :: update_flags
554 integer :: update_position
555 integer :: update_gridtype
556 integer :: update_whalo
557 integer :: update_ehalo
558 integer :: update_shalo
559 integer :: update_nhalo
560 integer :: request_send_count
561 integer :: request_recv_count
562 integer, dimension(MAX_REQUEST) :: request_send
563 integer, dimension(MAX_REQUEST) :: request_recv
564 integer, dimension(MAX_REQUEST) :: size_recv
565 integer, dimension(MAX_REQUEST) :: type_recv
566 integer, dimension(MAX_REQUEST) :: buffer_pos_send
567 integer, dimension(MAX_REQUEST) :: buffer_pos_recv
568 integer(i8_kind) :: field_addrs(MAX_DOMAIN_FIELDS)
569 integer(i8_kind) :: field_addrs2(MAX_DOMAIN_FIELDS)
571 end type nonblock_type
573 !> used for updates on a group
574 !> @ingroup mpp_domains_mod
575 type :: mpp_group_update_type
577 logical :: initialized = .FALSE.
578 logical :: k_loop_inside = .TRUE.
579 logical :: nonsym_edge = .FALSE.
580 integer :: nscalar = 0
581 integer :: nvector = 0
582 integer :: flags_s=0, flags_v=0
583 integer :: whalo_s=0, ehalo_s=0, shalo_s=0, nhalo_s=0
584 integer :: isize_s=0, jsize_s=0, ksize_s=1
585 integer :: whalo_v=0, ehalo_v=0, shalo_v=0, nhalo_v=0
586 integer :: isize_x=0, jsize_x=0, ksize_v=1
587 integer :: isize_y=0, jsize_y=0
588 integer :: position=0, gridtype=0
589 logical :: recv_s(8), recv_x(8), recv_y(8)
590 integer :: is_s=0, ie_s=0, js_s=0, je_s=0
591 integer :: is_x=0, ie_x=0, js_x=0, je_x=0
592 integer :: is_y=0, ie_y=0, js_y=0, je_y=0
593 integer :: nrecv=0, nsend=0
594 integer :: npack=0, nunpack=0
595 integer :: reset_index_s = 0
596 integer :: reset_index_v = 0
597 integer :: tot_msgsize = 0
598 integer :: from_pe(MAXOVERLAP)
599 integer :: to_pe(MAXOVERLAP)
600 integer :: recv_size(MAXOVERLAP)
601 integer :: send_size(MAXOVERLAP)
602 integer :: buffer_pos_recv(MAXOVERLAP)
603 integer :: buffer_pos_send(MAXOVERLAP)
604 integer :: pack_type(MAXOVERLAP)
605 integer :: pack_buffer_pos(MAXOVERLAP)
606 integer :: pack_rotation(MAXOVERLAP)
607 integer :: pack_size(MAXOVERLAP)
608 integer :: pack_is(MAXOVERLAP)
609 integer :: pack_ie(MAXOVERLAP)
610 integer :: pack_js(MAXOVERLAP)
611 integer :: pack_je(MAXOVERLAP)
612 integer :: unpack_type(MAXOVERLAP)
613 integer :: unpack_buffer_pos(MAXOVERLAP)
614 integer :: unpack_rotation(MAXOVERLAP)
615 integer :: unpack_size(MAXOVERLAP)
616 integer :: unpack_is(MAXOVERLAP)
617 integer :: unpack_ie(MAXOVERLAP)
618 integer :: unpack_js(MAXOVERLAP)
619 integer :: unpack_je(MAXOVERLAP)
620 integer(i8_kind) :: addrs_s(MAX_DOMAIN_FIELDS)
621 integer(i8_kind) :: addrs_x(MAX_DOMAIN_FIELDS)
622 integer(i8_kind) :: addrs_y(MAX_DOMAIN_FIELDS)
623 integer :: buffer_start_pos = -1
624 integer :: request_send(MAX_REQUEST)
625 integer :: request_recv(MAX_REQUEST)
626 integer :: type_recv(MAX_REQUEST)
627 end type mpp_group_update_type
629 !> One dimensional domain used to manage shared data access between pes
630 !> @ingroup mpp_domains_mod
633 type(domain_axis_spec) :: compute !< index limits for compute domain
634 type(domain_axis_spec) :: domain_data !< index limits for data domain
635 type(domain_axis_spec) :: global !< index limits for global domain
636 type(domain_axis_spec) :: memory !< index limits for memory domain
637 logical :: cyclic !< true if domain is cyclic
638 type(domain1D), pointer :: list(:) =>NULL() !< list of each pe's domains
639 integer :: pe !<PE to which this domain is assigned
640 integer :: pos !< position of this PE within link list, i.e domain%list(pos)%pe = pe
641 integer :: goffset !< needed for global sum
642 integer :: loffset !< needed for global sum
645 !#######################################################################
647 !> @addtogroup mpp_domains_mod
649 !***********************************************************************
653 !***********************************************************************
655 logical :: module_is_initialized = .false.
656 logical :: debug = .FALSE.
657 logical :: verbose=.FALSE.
658 logical :: mosaic_defined = .false.
659 integer :: mpp_domains_stack_size=0
660 integer :: mpp_domains_stack_hwm=0
661 type(domain1D),save :: NULL_DOMAIN1D
662 type(domain2D),save :: NULL_DOMAIN2D
663 type(domainUG),save :: NULL_DOMAINUG
664 integer :: current_id_update = 0
665 integer :: num_update = 0
666 integer :: num_nonblock_group_update = 0
667 integer :: nonblock_buffer_pos = 0
668 integer :: nonblock_group_buffer_pos = 0
669 logical :: start_update = .true.
670 logical :: complete_update = .false.
671 type(nonblock_type), allocatable :: nonblock_data(:)
672 integer, parameter :: MAX_NONBLOCK_UPDATE = 100
674 integer :: group_update_buffer_pos = 0
675 logical :: complete_group_update_on = .false.
676 !-------- The following variables are used in mpp_domains_comm.h
678 integer, parameter :: MAX_ADDRS=512
679 integer(i8_kind),dimension(MAX_ADDRS),save :: addrs_sorted=-9999 !< list of sorted local addresses
680 integer, dimension(-1:MAX_ADDRS),save :: addrs_idx=-9999 !< index of address associated with d_comm
681 integer, save :: a_sort_len=0 !< length sorted memory list
682 integer, save :: n_addrs=0 !< number of memory addresses used
684 integer(i8_kind), parameter :: ADDR2_BASE = 65536_i8_kind !< = 0x0000000000010000
685 integer, parameter :: MAX_ADDRS2=128
686 integer(i8_kind),dimension(MAX_ADDRS2),save :: addrs2_sorted=-9999 !< list of sorted local addresses
687 integer, dimension(-1:MAX_ADDRS2),save :: addrs2_idx=-9999 !< index of addr2 associated with d_comm
688 integer, save :: a2_sort_len=0 !< length sorted memory list
689 integer, save :: n_addrs2=0 !< number of memory addresses used
691 integer, parameter :: MAX_DOM_IDS=128
692 integer(i8_kind),dimension(MAX_DOM_IDS),save :: ids_sorted=-9999 !< list of sorted domain identifiers
693 integer, dimension(-1:MAX_DOM_IDS),save :: ids_idx=-9999 !< index of d_comm associated with sorted addesses
694 integer, save :: i_sort_len=0 !< length sorted domain ids list
695 integer, save :: n_ids=0 !< number of domain ids used
696 !!(=i_sort_len; domain ids are never removed)
698 integer, parameter :: MAX_FIELDS=1024
699 integer(i8_kind), dimension(MAX_FIELDS),save :: dcKey_sorted=-9999 !< list of sorted local addresses
700 ! Not sure why static d_comm fails during deallocation of derived type members; allocatable works
701 ! type(DomainCommunicator2D),dimension(MAX_FIELDS),save,target :: d_comm !< domain communicators
702 type(DomainCommunicator2D),dimension(:),allocatable,save,target :: d_comm !< domain communicators
703 integer, dimension(-1:MAX_FIELDS),save :: d_comm_idx=-9999 !< index of
704 !! d_comm associated with sorted addresses
705 integer, save :: dc_sort_len=0 !< length sorted comm keys
706 !! (=num active communicators)
707 integer, save :: n_comm=0 !< number of communicators used
709 ! integer(i8_kind), parameter :: GT_BASE=2**8
710 integer(i8_kind), parameter :: GT_BASE = 256_i8_kind !0x0000000000000100
712 ! integer(i8_kind), parameter :: KE_BASE=2**48
713 integer(i8_kind), parameter :: KE_BASE = 281474976710656_i8_kind !0x0001000000000000
715 integer(i8_kind) :: domain_cnt=0
717 !--- the following variables are used in mpp_domains_misc.h
718 logical :: domain_clocks_on=.FALSE.
719 integer :: send_clock=0, recv_clock=0, unpk_clock=0
720 integer :: wait_clock=0, pack_clock=0
721 integer :: send_pack_clock_nonblock=0, recv_clock_nonblock=0, unpk_clock_nonblock=0
722 integer :: wait_clock_nonblock=0
723 integer :: nest_send_clock=0, nest_recv_clock=0, nest_unpk_clock=0
724 integer :: nest_wait_clock=0, nest_pack_clock=0
725 integer :: group_recv_clock=0, group_send_clock=0, group_pack_clock=0, group_unpk_clock=0, group_wait_clock=0
726 integer :: nonblock_group_recv_clock=0, nonblock_group_send_clock=0, nonblock_group_pack_clock=0
727 integer :: nonblock_group_unpk_clock=0, nonblock_group_wait_clock=0
729 !> namelist interface
730 character(len=32) :: debug_update_domain = "none" !< when debug_update_domain = none, no debug will be done.
731 !! When debug_update_domain is set to fatal,
732 !! the run will be exited with fatal error message
733 !! When debug_update_domain is set to warning,
734 !! the run will output warning message.
735 !! When debug update_domain is set to note,
736 !! the run will output some note message.
737 logical :: debug_message_passing = .false. !< Will check the consistency on the boundary between
738 !! processor/tile when updating domain for symmetric domain and
739 !! check the consistency on the north folded edge.
740 integer :: nthread_control_loop = 8 !< Determine the loop order for packing and unpacking.
741 !! When number of threads is greater than nthread_control_loop,
742 !! the k-loop will be moved outside and combined with number
743 !! of pack and unpack. When the number of threads is
744 !! less than or equal to nthread_control_loop, the k-loop
745 !! is moved inside, but still outside, of j,i loop.
746 logical :: efp_sum_overflow_check = .false. !< If .true., always do overflow_check
747 !! when doing EFP bitwise mpp_global_sum.
748 logical :: use_alltoallw = .false.
749 namelist /mpp_domains_nml/ debug_update_domain, domain_clocks_on, debug_message_passing, nthread_control_loop, &
750 efp_sum_overflow_check, use_alltoallw
752 !***********************************************************************
754 integer, parameter :: NO_CHECK = -1
755 integer :: debug_update_level = NO_CHECK
757 !***********************************************************************
759 ! public interface from mpp_domains_define.h
761 !***********************************************************************
762 !> @brief Retrieve layout associated with a domain decomposition.
763 !> Given a global 2D domain and the number of divisions in the
764 !! decomposition ndivs (usually the PE count unless some
765 !! domains are \e masked) this calls returns a 2D domain layout.
766 !! By default, mpp_define_layout will attempt to divide the
767 !! 2D index space into domains that maintain the aspect ratio of the
768 !! global domain. If this cannot be done, the algorithm favours domains
769 !! that are longer in \e x than \e y, a preference that could improve vector performance.
770 !! <br>Example usage:
771 !! @code{.F90}call mpp_define_layout( global_indices, ndivs, layout )@endcode
772 !> @ingroup mpp_domains_mod
773 interface mpp_define_layout
774 module procedure mpp_define_layout2D
777 !> @brief Set up a domain decomposition.
779 !> There are two forms for the \e mpp_define_domains call. The 2D version is generally
780 !! to be used but is built by repeated calls to the 1D version, also provided.
782 !! <br>Example usage:
784 !! call mpp_define_domains( global_indices, ndivs, domain, &
785 !! pelist, flags, halo, extent, maskmap )
786 !! call mpp_define_domains( global_indices, layout, domain, pelist, &
787 !! xflags, yflags, xhalo, yhalo, &
788 !! xextent, yextent, maskmap, name )
790 !! @param global_indices Defines the global domain.
791 !! @param ndivs The number of domain divisions required.
792 !! @param [inout] domain Holds the resulting domain decomposition.
793 !! @param pelist List of PEs to which the domains are to be assigned.
794 !! @param flags An optional flag to pass additional information
795 !! about the desired domain topology. Useful flags in a 1D decomposition
796 !! include <TT>GLOBAL_DATA_DOMAIN</TT> and
797 !! <TT>CYCLIC_GLOBAL_DOMAIN</TT>. Flags are integers: multiple flags may
798 !! be added together. The flag values are public parameters available by
800 !! @param halo Width of the halo.
801 !! @param extent Normally <TT>mpp_define_domains</TT> attempts
802 !! an even division of the global domain across <TT>ndivs</TT>
803 !! domains. The <TT>extent</TT> array can be used by the user to pass a
804 !! custom domain division. The <TT>extent</TT> array has <TT>ndivs</TT>
805 !! elements and holds the compute domain widths, which should add up to
806 !! cover the global domain exactly.
807 !! @param maskmap Some divisions may be masked
808 !! (<TT>maskmap=.FALSE.</TT>) to exclude them from the computation (e.g
809 !! for ocean model domains that are all land). The <TT>maskmap</TT> array
810 !! is dimensioned <TT>ndivs</TT> and contains <TT>.TRUE.</TT> values for
811 !! any domain that must be <I>included</I> in the computation (default
812 !! all). The <TT>pelist</TT> array length should match the number of
813 !! domains included in the computation.
815 !! <br>Example usage:
817 !! call mpp_define_domains( (/1,100/), 10, domain, &
818 !! flags=GLOBAL_DATA_DOMAIN+CYCLIC_GLOBAL_DOMAIN, halo=2 )
821 !! defines 10 compute domains spanning the range [1,100] of the global
822 !! domain. The compute domains are non-overlapping blocks of 10. All the data
823 !! domains are global, and with a halo of 2 span the range [-1:102]. And
824 !! since the global domain has been declared to be cyclic,
825 !! domain(9)%next => domain(0) and domain(0)%prev =>
826 !! domain(9). A field is allocated on the data domain, and computations proceed on
827 !! the compute domain. A call to mpp_update_domains would fill in the values
828 !! in the halo region:<br>
831 !! call mpp_get_data_domain( domain, isd, ied ) !returns -1 and 102
832 !! call mpp_get_compute_domain( domain, is, ie ) !returns (1,10) on PE 0 ...
833 !! allocate( a(isd:ied) )
835 !! a(i) = <perform computations>
837 !! call mpp_update_domains( a, domain )
840 !! The call to mpp_update_domainsfills in the regions outside
841 !! the compute domain. Since the global domain is cyclic, the values at
842 !! \e i=(-1,0) are the same as at \e i=(99,100); and \e i=(101,102)
843 !! are the same as \e i=(1,2).
845 !! The 2D version is just an extension of this syntax to two dimensions.
847 !! The 2D version of the above should generally be used in
848 !! codes, including 1D-decomposed ones, if there is a possibility of
849 !! future evolution toward 2D decomposition. The arguments are similar to
850 !! the 1D case, except that now we have optional arguments
851 !! flags, halo, extent and maskmap along two axes.
853 !! flags can now take an additional possible value to fold one or more edges.
854 !! This is done by using flags \e FOLD_WEST_EDGE, \e FOLD_EAST_EDGE, \e FOLD_SOUTH_EDGE or
855 !! \e FOLD_NORTH_EDGE. When a fold exists (e.g cylindrical domain),
856 !! vector fields reverse sign upon
857 !! crossing the fold. This parity reversal is performed only in the vector version of
858 !! mpp_update_domains. In addition, shift operations may need to be applied to vector fields on
859 !! staggered grids, also described in the vector interface to mpp_update_domains.
861 !! <TT>name</TT> is the name associated with the decomposition,
862 !! e.g <TT>'Ocean model'</TT>. If this argument is present,
863 !! <TT>mpp_define_domains</TT> will print the domain decomposition
864 !! generated to <TT>stdlog</TT>.
867 !! call mpp_define_domains( (/1,100,1,100/), (/2,2/), domain, xhalo=1 )
868 !! will create the following domain layout:<br>
870 !! | domain |domain(1)|domain(2) |domain(3) |domain(4) |
871 !! |--------------|---------|-----------|-----------|-------------|
872 !! |Compute domain|1,50,1,50|51,100,1,50|1,50,51,100|51,100,51,100|
873 !! |Data domain |0,51,1,50|50,101,1,50|0,51,51,100|50,101,51,100|
876 !! Again, we allocate arrays on the data domain, perform computations
877 !! on the compute domain, and call mpp_update_domains to update the halo region.
879 !! If we wished to perfom a 1D decomposition along Y on the same global domain,
882 !! call mpp_define_domains( (/1,100,1,100/), layout=(/4,1/), domain, xhalo=1 )
884 !! This will create the following domain layout:<br>
886 !! | domain |domain(1) |domain(2) |domain(3) |domain(4) |
887 !! |--------------|----------|-----------|-----------|------------|
888 !! |Compute domain|1,100,1,25|1,100,26,50|1,100,51,75|1,100,76,100|
889 !! |Data domain |0,101,1,25|0,101,26,50|0,101,51,75|1,101,76,100|
890 !> @ingroup mpp_domains_mod
891 interface mpp_define_domains
892 module procedure mpp_define_domains1D
893 module procedure mpp_define_domains2D
896 !> Defines a nullified 1D or 2D domain
898 !> <br> Example usage:
900 !! call mpp_define_null_domain(domain)
902 !> @ingroup mpp_domains_mod
903 interface mpp_define_null_domain
904 module procedure mpp_define_null_domain1D
905 module procedure mpp_define_null_domain2D
908 !> Copy 1D or 2D domain
909 !> @param domain_in Input domain to get read
910 !> @param domain_out Output domain to get written to
911 !> @ingroup mpp_domains_mod
912 interface mpp_copy_domain
913 module procedure mpp_copy_domain1D
914 module procedure mpp_copy_domain2D
915 end interface mpp_copy_domain
916 !> Deallocate given 1D or 2D domain
917 !> @param domain an allocated @ref domain1D or @ref domain2D
918 !> @ingroup mpp_domains_mod
919 interface mpp_deallocate_domain
920 module procedure mpp_deallocate_domain1D
921 module procedure mpp_deallocate_domain2D
924 !> Modifies the extents (compute, data and global) of a given domain
925 !> @ingroup mpp_domains_mod
926 interface mpp_modify_domain
927 module procedure mpp_modify_domain1D
928 module procedure mpp_modify_domain2D
932 !***********************************************************************
934 ! public interface from mpp_domains_misc.h
936 !***********************************************************************
938 !> Performs halo updates for a given domain.<br>
940 !! Used to perform a halo update of a
941 !! domain-decomposed array on each PE. \e MPP_TYPE can be of type
942 !! complex, integer, logical or real of 4-byte or 8-byte kind; of rank up to 5.
943 !! The vector version (with two input data fields) is only present for real types.
944 !! For 2D domain updates, if there are halos present along both
945 !! x and y, we can choose to update one only, by specifying \e flags=XUPDATE or \e flags=YUPDATE.
946 !! In addition, one-sided updates can be performed by setting flags
947 !! to any combination of WUPDATE, EUPDATE, SUPDATE and NUPDATE
948 !! to update the west, east, north and south halos respectively.
949 !! Any combination of halos may be used by adding the requisite flags, e.g:
950 !! \e flags=XUPDATE+SUPDATE or \e flags=EUPDATE+WUPDATE+SUPDATE will update the east,
951 !! west and south halos.<br>
953 !! If a call to \e mpp_update_domains involves at least one E-W
954 !! halo and one N-S halo, the corners involved will also be updated, i.e,
955 !! in the example above, the SE and SW corners will be updated.<br>
956 !! If \e flags is not supplied, that is equivalent to \e flags=XUPDATE+YUPDATE.<br>
958 !! The vector version is passed the \e x and \e y components of a vector field in tandem,
959 !! and both are updated upon return. They are passed together to treat parity issues on various
960 !! grids. For example, on a cubic sphere projection, the \e x \e y components may be
961 !! interchanged when passing from an equatorial cube face to a polar face.
962 !! For grids with folds, vector components change sign on crossing the fold. Paired scalar
963 !! quantities can also be passed with the vector version if \e flags=SCALAR_PAIR, in which
964 !! case components are appropriately interchanged, but signs are not.<br>
966 !! Special treatment at boundaries such as folds is also required for
967 !! staggered grids. The following types of staggered grids are
970 !! 1) AGRID: values are at grid centers.<br>
971 !! 2) BGRID_NE: vector fields are at the NE vertex of a grid
972 !! cell, i.e: the array elements \e u(i,j)and \e v(i,j)are
973 !! actually at (i,j;) with respect to the grid centers.<br>
974 !! 3) BGRID_SW: vector fields are at the SW vertex of a grid
975 !! cell, i.e: the array elements \e u(i,j) and \e v(i,j) are
976 !! actually at (i;,j;) with respect to the grid centers<br>
977 !! 4) CGRID_NE: vector fields are at the N and E faces of a
978 !! grid cell, i.e: the array elements \e u(i,j) and \e v(i,j)
979 !! are actually at (i;,j) and (i,j+½) with respect to the
981 !! 5) CGRID_SW: vector fields are at the S and W faces of a
982 !! grid cell, i.e: the array elements \e u(i,j)and \e v(i,j)
983 !! are actually at (i;,j) and (i,j;) with respect to the
986 !! The gridtypes listed above are all available by use association as
987 !! integer parameters. The scalar version of \e mpp_update_domains
988 !! assumes that the values of a scalar field are always at \e AGRID
989 !! locations, and no special boundary treatment is required. If vector
990 !! fields are at staggered locations, the optional argument
991 !! \e gridtype must be appropriately set for correct treatment at
994 !! It is safe to apply vector field updates to the appropriate arrays
995 !! irrespective of the domain topology: if the topology requires no
996 !! special treatment of vector fields, specifying \e gridtype will
999 !! \e mpp_update_domains internally buffers the date being sent
1000 !! and received into single messages for efficiency. A turnable internal
1001 !! buffer area in memory is provided for this purpose by
1002 !! \e mpp_domains_mod. The size of this buffer area can be set by
1003 !! the user by calling mpp_domains
1004 !! \e mpp_domains_set_stack_size.
1007 !! call mpp_update_domains( field, domain, flags )
1008 !! Update a 1D domain for the given field.
1009 !! call mpp_update_domains( fieldx, fieldy, domain, flags, gridtype )
1010 !! Update a 2D domain for the given fields.
1011 !> @ingroup mpp_domains_mod
1012 interface mpp_update_domains
1013 module procedure mpp_update_domain2D_r8_2d
1014 module procedure mpp_update_domain2D_r8_3d
1015 module procedure mpp_update_domain2D_r8_4d
1016 module procedure mpp_update_domain2D_r8_5d
1017 module procedure mpp_update_domain2D_r8_2dv
1018 module procedure mpp_update_domain2D_r8_3dv
1019 module procedure mpp_update_domain2D_r8_4dv
1020 module procedure mpp_update_domain2D_r8_5dv
1022 module procedure mpp_update_domain2D_c8_2d
1023 module procedure mpp_update_domain2D_c8_3d
1024 module procedure mpp_update_domain2D_c8_4d
1025 module procedure mpp_update_domain2D_c8_5d
1027 module procedure mpp_update_domain2D_i8_2d
1028 module procedure mpp_update_domain2D_i8_3d
1029 module procedure mpp_update_domain2D_i8_4d
1030 module procedure mpp_update_domain2D_i8_5d
1031 module procedure mpp_update_domain2D_r4_2d
1032 module procedure mpp_update_domain2D_r4_3d
1033 module procedure mpp_update_domain2D_r4_4d
1034 module procedure mpp_update_domain2D_r4_5d
1035 module procedure mpp_update_domain2D_r4_2dv
1036 module procedure mpp_update_domain2D_r4_3dv
1037 module procedure mpp_update_domain2D_r4_4dv
1038 module procedure mpp_update_domain2D_r4_5dv
1040 module procedure mpp_update_domain2D_c4_2d
1041 module procedure mpp_update_domain2D_c4_3d
1042 module procedure mpp_update_domain2D_c4_4d
1043 module procedure mpp_update_domain2D_c4_5d
1045 module procedure mpp_update_domain2D_i4_2d
1046 module procedure mpp_update_domain2D_i4_3d
1047 module procedure mpp_update_domain2D_i4_4d
1048 module procedure mpp_update_domain2D_i4_5d
1051 !> Interface to start halo updates
1052 !! \e mpp_start_update_domains is used to start a halo update of a
1053 !! domain-decomposed array on each PE. \e MPP_TYPE_ can be of type
1054 !! \e complex, \e integer, \e logical or \e real;
1055 !! of 4-byte or 8-byte kind; of rank up to 5. The vector version (with
1056 !! two input data fields) is only present for \ereal types.<br>
1058 !! \empp_start_update_domains must be paired together with
1059 !! \empp_complete_update_domains. In \e mpp_start_update_domains,
1060 !! a buffer will be pre-post to receive (non-blocking) the
1061 !! data and data on computational domain will be packed and sent (non-blocking send)
1062 !! to other processor. In \e mpp_complete_update_domains, buffer will
1063 !! be unpacked to fill the halo and mpp_sync_self will be called to
1064 !! to ensure communication safe at the last call of mpp_complete_update_domains.<br>
1066 !! Each mpp_update_domains can be replaced by the combination of mpp_start_update_domains
1067 !! and mpp_complete_update_domains. The arguments in mpp_start_update_domains
1068 !! and mpp_complete_update_domains should be the exact the same as in
1069 !! mpp_update_domains to be replaced except no optional argument "complete".
1070 !! The following are examples on how to replace mpp_update_domains with
1071 !! mpp_start_update_domains/mpp_complete_update_domains
1073 !> @par Example 1: Replace one scalar mpp_update_domains.<br>
1077 !! call mpp_update_domains(data, domain, flags=update_flags)<br>
1081 !! id_update = mpp_start_update_domains(data, domain, flags=update_flags)<br>
1082 !! ...( doing some computation )<br>
1083 !! call mpp_complete_update_domains(id_update, data, domain, flags=update_flags)<br>
1085 !> @par Example 2: Replace group scalar mpp_update_domains<br>
1089 !! call mpp_update_domains(data_1, domain, flags=update_flags, complete=.false.)<br>
1090 !! .... ( other n-2 call mpp_update_domains with complete = .false. )<br>
1091 !! call mpp_update_domains(data_n, domain, flags=update_flags, complete=.true. )<br>
1095 !! id_up_1 = mpp_start_update_domains(data_1, domain, flags=update_flags)<br>
1096 !! .... ( other n-2 call mpp_start_update_domains )<br>
1097 !! id_up_n = mpp_start_update_domains(data_n, domain, flags=update_flags)<br>
1099 !! ..... ( doing some computation )<br>
1101 !! call mpp_complete_update_domains(id_up_1, data_1, domain, flags=update_flags)<br>
1102 !! .... ( other n-2 call mpp_complete_update_domains )<br>
1103 !! call mpp_complete_update_domains(id_up_n, data_n, domain, flags=update_flags)<br>
1105 !> @par Example 3: Replace group CGRID_NE vector, mpp_update_domains<br>
1109 !! call mpp_update_domains(u_1, v_1, domain, flags=update_flgs, gridtype=CGRID_NE, complete=.false.)<br>
1110 !! .... ( other n-2 call mpp_update_domains with complete = .false. )<br>
1111 !! call mpp_update_domains(u_1, v_1, domain, flags=update_flags, gridtype=CGRID_NE, complete=.true. )<br>
1115 !! id_up_1 = mpp_start_update_domains(u_1, v_1, domain, flags=update_flags, gridtype=CGRID_NE)<br>
1116 !! .... ( other n-2 call mpp_start_update_domains )<br>
1117 !! id_up_n = mpp_start_update_domains(u_n, v_n, domain, flags=update_flags, gridtype=CGRID_NE)<br>
1119 !! ..... ( doing some computation )<br>
1121 !! call mpp_complete_update_domains(id_up_1, u_1, v_1, domain, flags=update_flags, gridtype=CGRID_NE)<br>
1122 !! .... ( other n-2 call mpp_complete_update_domains )<br>
1123 !! call mpp_complete_update_domains(id_up_n, u_n, v_n, domain, flags=update_flags, gridtype=CGRID_NE)<br>
1125 !! For 2D domain updates, if there are halos present along both
1126 !! \e x and \e y, we can choose to update one only, by
1127 !! specifying \e flags=XUPDATE or \e flags=YUPDATE. In
1128 !! addition, one-sided updates can be performed by setting \e flags
1129 !! to any combination of \e WUPDATE, \e EUPDATE,
1130 !! \e SUPDATE and \e NUPDATE, to update the west, east, north
1131 !! and south halos respectively. Any combination of halos may be used by
1132 !! adding the requisite flags, e.g: \e flags=XUPDATE+SUPDATE or
1133 !! \e flags=EUPDATE+WUPDATE+SUPDATE will update the east, west and
1136 !! If a call to \e mpp_start_update_domains/mpp_complete_update_domains involves at least one E-W
1137 !! halo and one N-S halo, the corners involved will also be updated, i.e,
1138 !! in the example above, the SE and SW corners will be updated.<br>
1140 !! If \e flags is not supplied, that is
1141 !! equivalent to \e flags=XUPDATE+YUPDATE.<br>
1143 !! The vector version is passed the \e x and \e y
1144 !! components of a vector field in tandem, and both are updated upon
1145 !! return. They are passed together to treat parity issues on various
1146 !! grids. For example, on a cubic sphere projection, the \e x and
1147 !! \e y components may be interchanged when passing from an
1148 !! equatorial cube face to a polar face. For grids with folds, vector
1149 !! components change sign on crossing the fold. Paired scalar quantities
1150 !! can also be passed with the vector version if flags=SCALAR_PAIR, in which
1151 !! case components are appropriately interchanged, but signs are not.<br>
1153 !! Special treatment at boundaries such as folds is also required for
1154 !! staggered grids. The following types of staggered grids are
1157 !! 1) \e AGRID: values are at grid centers.<br>
1158 !! 2) \e BGRID_NE: vector fields are at the NE vertex of a grid
1159 !! cell, i.e: the array elements \e u(i,j) and \e v(i,j) are
1160 !! actually at (i+½,j+½) with respect to the grid centers.<br>
1161 !! 3) \e BGRID_SW: vector fields are at the SW vertex of a grid
1162 !! cell, i.e., the array elements \e u(i,j) and \e v(i,j) are
1163 !! actually at (i-½,j-½) with respect to the grid centers.<br>
1164 !! 4) \e CGRID_NE: vector fields are at the N and E faces of a
1165 !! grid cell, i.e: the array elements \e u(i,j) and \e v(i,j)
1166 !! are actually at (i+½,j) and (i,j+½) with respect to the
1167 !! grid centers.<br>
1168 !! 5) \e CGRID_SW: vector fields are at the S and W faces of a
1169 !! grid cell, i.e: the array elements \e u(i,j) and \e v(i,j)
1170 !! are actually at (i-½,j) and (i,j-½) with respect to the
1171 !! grid centers.<br>
1173 !! The gridtypes listed above are all available by use association as
1174 !! integer parameters. If vector fields are at staggered locations, the
1175 !! optional argument \e gridtype must be appropriately set for
1176 !! correct treatment at boundaries.
1178 !! It is safe to apply vector field updates to the appropriate arrays
1179 !! irrespective of the domain topology: if the topology requires no
1180 !! special treatment of vector fields, specifying \e gridtype will
1183 !! \e mpp_start_update_domains/mpp_complete_update_domains internally
1184 !! buffers the data being sent and received into single messages for efficiency.
1185 !! A turnable internal buffer area in memory is provided for this purpose by
1186 !! \e mpp_domains_mod. The size of this buffer area can be set by
1187 !! the user by calling \e mpp_domains_set_stack_size.<br>
1190 !! call mpp_start_update_domains( field, domain, flags )
1191 !! call mpp_complete_update_domains( field, domain, flags )
1192 !> @ingroup mpp_domains_mod
1193 interface mpp_start_update_domains
1194 module procedure mpp_start_update_domain2D_r8_2d
1195 module procedure mpp_start_update_domain2D_r8_3d
1196 module procedure mpp_start_update_domain2D_r8_4d
1197 module procedure mpp_start_update_domain2D_r8_5d
1198 module procedure mpp_start_update_domain2D_r8_2dv
1199 module procedure mpp_start_update_domain2D_r8_3dv
1200 module procedure mpp_start_update_domain2D_r8_4dv
1201 module procedure mpp_start_update_domain2D_r8_5dv
1203 module procedure mpp_start_update_domain2D_c8_2d
1204 module procedure mpp_start_update_domain2D_c8_3d
1205 module procedure mpp_start_update_domain2D_c8_4d
1206 module procedure mpp_start_update_domain2D_c8_5d
1208 module procedure mpp_start_update_domain2D_i8_2d
1209 module procedure mpp_start_update_domain2D_i8_3d
1210 module procedure mpp_start_update_domain2D_i8_4d
1211 module procedure mpp_start_update_domain2D_i8_5d
1212 module procedure mpp_start_update_domain2D_r4_2d
1213 module procedure mpp_start_update_domain2D_r4_3d
1214 module procedure mpp_start_update_domain2D_r4_4d
1215 module procedure mpp_start_update_domain2D_r4_5d
1216 module procedure mpp_start_update_domain2D_r4_2dv
1217 module procedure mpp_start_update_domain2D_r4_3dv
1218 module procedure mpp_start_update_domain2D_r4_4dv
1219 module procedure mpp_start_update_domain2D_r4_5dv
1221 module procedure mpp_start_update_domain2D_c4_2d
1222 module procedure mpp_start_update_domain2D_c4_3d
1223 module procedure mpp_start_update_domain2D_c4_4d
1224 module procedure mpp_start_update_domain2D_c4_5d
1226 module procedure mpp_start_update_domain2D_i4_2d
1227 module procedure mpp_start_update_domain2D_i4_3d
1228 module procedure mpp_start_update_domain2D_i4_4d
1229 module procedure mpp_start_update_domain2D_i4_5d
1232 !> Must be used after a call to @ref mpp_start_update_domains
1233 !! in order to complete a nonblocking domain update. See @ref mpp_start_update_domains
1235 !> @ingroup mpp_domains_mod
1236 interface mpp_complete_update_domains
1237 module procedure mpp_complete_update_domain2D_r8_2d
1238 module procedure mpp_complete_update_domain2D_r8_3d
1239 module procedure mpp_complete_update_domain2D_r8_4d
1240 module procedure mpp_complete_update_domain2D_r8_5d
1241 module procedure mpp_complete_update_domain2D_r8_2dv
1242 module procedure mpp_complete_update_domain2D_r8_3dv
1243 module procedure mpp_complete_update_domain2D_r8_4dv
1244 module procedure mpp_complete_update_domain2D_r8_5dv
1246 module procedure mpp_complete_update_domain2D_c8_2d
1247 module procedure mpp_complete_update_domain2D_c8_3d
1248 module procedure mpp_complete_update_domain2D_c8_4d
1249 module procedure mpp_complete_update_domain2D_c8_5d
1251 module procedure mpp_complete_update_domain2D_i8_2d
1252 module procedure mpp_complete_update_domain2D_i8_3d
1253 module procedure mpp_complete_update_domain2D_i8_4d
1254 module procedure mpp_complete_update_domain2D_i8_5d
1255 module procedure mpp_complete_update_domain2D_r4_2d
1256 module procedure mpp_complete_update_domain2D_r4_3d
1257 module procedure mpp_complete_update_domain2D_r4_4d
1258 module procedure mpp_complete_update_domain2D_r4_5d
1259 module procedure mpp_complete_update_domain2D_r4_2dv
1260 module procedure mpp_complete_update_domain2D_r4_3dv
1261 module procedure mpp_complete_update_domain2D_r4_4dv
1262 module procedure mpp_complete_update_domain2D_r4_5dv
1264 module procedure mpp_complete_update_domain2D_c4_2d
1265 module procedure mpp_complete_update_domain2D_c4_3d
1266 module procedure mpp_complete_update_domain2D_c4_4d
1267 module procedure mpp_complete_update_domain2D_c4_5d
1269 module procedure mpp_complete_update_domain2D_i4_2d
1270 module procedure mpp_complete_update_domain2D_i4_3d
1271 module procedure mpp_complete_update_domain2D_i4_4d
1272 module procedure mpp_complete_update_domain2D_i4_5d
1275 !> Private interface used for non blocking updates
1276 !> @ingroup mpp_domains_mod
1277 interface mpp_start_do_update
1278 module procedure mpp_start_do_update_r8_3d
1279 module procedure mpp_start_do_update_r8_3dv
1281 module procedure mpp_start_do_update_c8_3d
1283 module procedure mpp_start_do_update_i8_3d
1284 module procedure mpp_start_do_update_r4_3d
1285 module procedure mpp_start_do_update_r4_3dv
1287 module procedure mpp_start_do_update_c4_3d
1289 module procedure mpp_start_do_update_i4_3d
1292 !> Private interface used for non blocking updates
1293 !> @ingroup mpp_domains_mod
1294 interface mpp_complete_do_update
1295 module procedure mpp_complete_do_update_r8_3d
1296 module procedure mpp_complete_do_update_r8_3dv
1298 module procedure mpp_complete_do_update_c8_3d
1300 module procedure mpp_complete_do_update_i8_3d
1301 module procedure mpp_complete_do_update_r4_3d
1302 module procedure mpp_complete_do_update_r4_3dv
1304 module procedure mpp_complete_do_update_c4_3d
1306 module procedure mpp_complete_do_update_i4_3d
1309 !> Constructor for the @ref mpp_group_update_type which is
1310 !! then used with @ref mpp_start_group_update
1313 !> @ingroup mpp_domains_mod
1314 interface mpp_create_group_update
1315 module procedure mpp_create_group_update_r4_2d
1316 module procedure mpp_create_group_update_r4_3d
1317 module procedure mpp_create_group_update_r4_4d
1318 module procedure mpp_create_group_update_r4_2dv
1319 module procedure mpp_create_group_update_r4_3dv
1320 module procedure mpp_create_group_update_r4_4dv
1321 module procedure mpp_create_group_update_r8_2d
1322 module procedure mpp_create_group_update_r8_3d
1323 module procedure mpp_create_group_update_r8_4d
1324 module procedure mpp_create_group_update_r8_2dv
1325 module procedure mpp_create_group_update_r8_3dv
1326 module procedure mpp_create_group_update_r8_4dv
1327 end interface mpp_create_group_update
1329 !> @ingroup mpp_domains_mod
1330 interface mpp_do_group_update
1331 module procedure mpp_do_group_update_r4
1332 module procedure mpp_do_group_update_r8
1333 end interface mpp_do_group_update
1335 !> Starts non-blocking group update
1336 !! Must be followed up with a call to @ref mpp_complete_group_update
1337 !! @ref mpp_group_update_type can be created with @ref mpp_create_group_update
1339 !> @param[inout] type(mpp_group_update_type) group type created for group update
1340 !> @param[inout] type(domain2D) domain to update
1341 !> @ingroup mpp_domains_mod
1342 interface mpp_start_group_update
1343 module procedure mpp_start_group_update_r4
1344 module procedure mpp_start_group_update_r8
1345 end interface mpp_start_group_update
1347 !> Completes a pending non-blocking group update
1348 !! Must follow a call to @ref mpp_start_group_update
1350 !> @param[inout] type(mpp_group_update_type) group
1351 !> @param[inout] type(domain2D) domain
1352 !> @param[in] d_type data type
1353 !> @ingroup mpp_domains_mod
1354 interface mpp_complete_group_update
1355 module procedure mpp_complete_group_update_r4
1356 module procedure mpp_complete_group_update_r8
1357 end interface mpp_complete_group_update
1359 !> @ingroup mpp_domains_mod
1360 interface mpp_reset_group_update_field
1361 module procedure mpp_reset_group_update_field_r4_2d
1362 module procedure mpp_reset_group_update_field_r4_3d
1363 module procedure mpp_reset_group_update_field_r4_4d
1364 module procedure mpp_reset_group_update_field_r4_2dv
1365 module procedure mpp_reset_group_update_field_r4_3dv
1366 module procedure mpp_reset_group_update_field_r4_4dv
1367 module procedure mpp_reset_group_update_field_r8_2d
1368 module procedure mpp_reset_group_update_field_r8_3d
1369 module procedure mpp_reset_group_update_field_r8_4d
1370 module procedure mpp_reset_group_update_field_r8_2dv
1371 module procedure mpp_reset_group_update_field_r8_3dv
1372 module procedure mpp_reset_group_update_field_r8_4dv
1373 end interface mpp_reset_group_update_field
1375 !> Pass the data from coarse grid to fill the buffer to be ready to be interpolated
1377 !! <br>Example usage:
1379 !! call mpp_update_nest_fine(field, nest_domain, wbuffer, ebuffer, sbuffer,
1380 !! nbuffer, nest_level, flags, complete, position, extra_halo, name,
1382 !> @ingroup mpp_domains_mod
1383 interface mpp_update_nest_fine
1384 module procedure mpp_update_nest_fine_r8_2d
1385 module procedure mpp_update_nest_fine_r8_3d
1386 module procedure mpp_update_nest_fine_r8_4d
1387 module procedure mpp_update_nest_fine_r8_2dv
1388 module procedure mpp_update_nest_fine_r8_3dv
1389 module procedure mpp_update_nest_fine_r8_4dv
1391 module procedure mpp_update_nest_fine_c8_2d
1392 module procedure mpp_update_nest_fine_c8_3d
1393 module procedure mpp_update_nest_fine_c8_4d
1395 module procedure mpp_update_nest_fine_i8_2d
1396 module procedure mpp_update_nest_fine_i8_3d
1397 module procedure mpp_update_nest_fine_i8_4d
1398 module procedure mpp_update_nest_fine_r4_2d
1399 module procedure mpp_update_nest_fine_r4_3d
1400 module procedure mpp_update_nest_fine_r4_4d
1401 module procedure mpp_update_nest_fine_r4_2dv
1402 module procedure mpp_update_nest_fine_r4_3dv
1403 module procedure mpp_update_nest_fine_r4_4dv
1405 module procedure mpp_update_nest_fine_c4_2d
1406 module procedure mpp_update_nest_fine_c4_3d
1407 module procedure mpp_update_nest_fine_c4_4d
1409 module procedure mpp_update_nest_fine_i4_2d
1410 module procedure mpp_update_nest_fine_i4_3d
1411 module procedure mpp_update_nest_fine_i4_4d
1414 !> @ingroup mpp_domains_mod
1415 interface mpp_do_update_nest_fine
1416 module procedure mpp_do_update_nest_fine_r8_3d
1417 module procedure mpp_do_update_nest_fine_r8_3dv
1419 module procedure mpp_do_update_nest_fine_c8_3d
1421 module procedure mpp_do_update_nest_fine_i8_3d
1422 module procedure mpp_do_update_nest_fine_r4_3d
1423 module procedure mpp_do_update_nest_fine_r4_3dv
1425 module procedure mpp_do_update_nest_fine_c4_3d
1427 module procedure mpp_do_update_nest_fine_i4_3d
1430 !> Pass the data from fine grid to fill the buffer to be ready to be interpolated
1431 !! onto coarse grid.
1432 !! <br>Example usage:
1434 !! call mpp_update_nest_coarse(field, nest_domain, field_out, nest_level, complete,
1435 !! position, name, tile_count)
1436 !> @ingroup mpp_domains_mod
1437 interface mpp_update_nest_coarse
1438 module procedure mpp_update_nest_coarse_r8_2d
1439 module procedure mpp_update_nest_coarse_r8_3d
1440 module procedure mpp_update_nest_coarse_r8_4d
1441 module procedure mpp_update_nest_coarse_r8_2dv
1442 module procedure mpp_update_nest_coarse_r8_3dv
1443 module procedure mpp_update_nest_coarse_r8_4dv
1445 module procedure mpp_update_nest_coarse_c8_2d
1446 module procedure mpp_update_nest_coarse_c8_3d
1447 module procedure mpp_update_nest_coarse_c8_4d
1449 module procedure mpp_update_nest_coarse_i8_2d
1450 module procedure mpp_update_nest_coarse_i8_3d
1451 module procedure mpp_update_nest_coarse_i8_4d
1452 module procedure mpp_update_nest_coarse_r4_2d
1453 module procedure mpp_update_nest_coarse_r4_3d
1454 module procedure mpp_update_nest_coarse_r4_4d
1455 module procedure mpp_update_nest_coarse_r4_2dv
1456 module procedure mpp_update_nest_coarse_r4_3dv
1457 module procedure mpp_update_nest_coarse_r4_4dv
1459 module procedure mpp_update_nest_coarse_c4_2d
1460 module procedure mpp_update_nest_coarse_c4_3d
1461 module procedure mpp_update_nest_coarse_c4_4d
1463 module procedure mpp_update_nest_coarse_i4_2d
1464 module procedure mpp_update_nest_coarse_i4_3d
1465 module procedure mpp_update_nest_coarse_i4_4d
1468 !> @brief Used by @ref mpp_update_nest_coarse to perform domain updates
1470 !> @ingroup mpp_domains_mod
1471 interface mpp_do_update_nest_coarse
1472 module procedure mpp_do_update_nest_coarse_r8_3d
1473 module procedure mpp_do_update_nest_coarse_r8_3dv
1475 module procedure mpp_do_update_nest_coarse_c8_3d
1477 module procedure mpp_do_update_nest_coarse_i8_3d
1478 module procedure mpp_do_update_nest_coarse_r4_3d
1479 module procedure mpp_do_update_nest_coarse_r4_3dv
1481 module procedure mpp_do_update_nest_coarse_c4_3d
1483 module procedure mpp_do_update_nest_coarse_i4_3d
1486 !> Get the index of the data passed from fine grid to coarse grid.
1487 !! <br>Example usage:
1489 !! call mpp_get_F2C_index(nest_domain, is_coarse, ie_coarse, js_coarse, je_coarse,
1490 !! is_fine, ie_fine, js_fine, je_fine, nest_level, position)
1491 !> @ingroup mpp_domains_mod
1492 interface mpp_get_F2C_index
1493 module procedure mpp_get_F2C_index_fine
1494 module procedure mpp_get_F2C_index_coarse
1497 !> Broadcasts domain to every pe. Only useful outside the context of it's own pelist
1499 !> <br>Example usage:
1500 !! call mpp_broadcast_domain(domain)
1501 !! call mpp_broadcast_domain(domain_in, domain_out)
1502 !! call mpp_broadcast_domain(domain, tile_coarse) ! nested domains
1504 !> @ingroup mpp_domains_mod
1505 interface mpp_broadcast_domain
1506 module procedure mpp_broadcast_domain_1
1507 module procedure mpp_broadcast_domain_2
1508 module procedure mpp_broadcast_domain_ug
1509 module procedure mpp_broadcast_domain_nest_fine
1510 module procedure mpp_broadcast_domain_nest_coarse
1513 !--------------------------------------------------------------
1514 ! for adjoint update
1515 !--------------------------------------------------------------
1516 !> Similar to @ref mpp_update_domains , updates adjoint domains
1517 !> @ingroup mpp_domains_mod
1518 interface mpp_update_domains_ad
1519 module procedure mpp_update_domains_ad_2D_r8_2d
1520 module procedure mpp_update_domains_ad_2D_r8_3d
1521 module procedure mpp_update_domains_ad_2D_r8_4d
1522 module procedure mpp_update_domains_ad_2D_r8_5d
1523 module procedure mpp_update_domains_ad_2D_r8_2dv
1524 module procedure mpp_update_domains_ad_2D_r8_3dv
1525 module procedure mpp_update_domains_ad_2D_r8_4dv
1526 module procedure mpp_update_domains_ad_2D_r8_5dv
1527 module procedure mpp_update_domains_ad_2D_r4_2d
1528 module procedure mpp_update_domains_ad_2D_r4_3d
1529 module procedure mpp_update_domains_ad_2D_r4_4d
1530 module procedure mpp_update_domains_ad_2D_r4_5d
1531 module procedure mpp_update_domains_ad_2D_r4_2dv
1532 module procedure mpp_update_domains_ad_2D_r4_3dv
1533 module procedure mpp_update_domains_ad_2D_r4_4dv
1534 module procedure mpp_update_domains_ad_2D_r4_5dv
1537 !> Private interface used for @ref mpp_update_domains
1538 !> @ingroup mpp_domains_mod
1539 interface mpp_do_update
1540 module procedure mpp_do_update_r8_3d
1541 module procedure mpp_do_update_r8_3dv
1543 module procedure mpp_do_update_c8_3d
1545 module procedure mpp_do_update_i8_3d
1546 module procedure mpp_do_update_r4_3d
1547 module procedure mpp_do_update_r4_3dv
1549 module procedure mpp_do_update_c4_3d
1551 module procedure mpp_do_update_i4_3d
1553 !> Private interface to updates data domain of 3D field whose computational domains have been computed
1554 !> @ingroup mpp_domains_mod
1555 interface mpp_do_check
1556 module procedure mpp_do_check_r8_3d
1557 module procedure mpp_do_check_r8_3dv
1559 module procedure mpp_do_check_c8_3d
1561 module procedure mpp_do_check_i8_3d
1562 module procedure mpp_do_check_r4_3d
1563 module procedure mpp_do_check_r4_3dv
1565 module procedure mpp_do_check_c4_3d
1567 module procedure mpp_do_check_i4_3d
1570 !> Passes data from a structured grid to an unstructured grid
1571 !! <br>Example usage:
1573 !! call mpp_pass_SG_to_UG(domain, sg_data, ug_data)
1574 !> @ingroup mpp_domains_mod
1575 interface mpp_pass_SG_to_UG
1576 module procedure mpp_pass_SG_to_UG_r8_2d
1577 module procedure mpp_pass_SG_to_UG_r8_3d
1578 module procedure mpp_pass_SG_to_UG_r4_2d
1579 module procedure mpp_pass_SG_to_UG_r4_3d
1580 module procedure mpp_pass_SG_to_UG_i4_2d
1581 module procedure mpp_pass_SG_to_UG_i4_3d
1582 module procedure mpp_pass_SG_to_UG_l4_2d
1583 module procedure mpp_pass_SG_to_UG_l4_3d
1586 !> Passes a data field from a structured grid to an unstructured grid
1587 !! <br>Example usage:
1589 !! call mpp_pass_SG_to_UG(SG_domain, field_SG, field_UG)
1590 !> @ingroup mpp_domains_mod
1591 interface mpp_pass_UG_to_SG
1592 module procedure mpp_pass_UG_to_SG_r8_2d
1593 module procedure mpp_pass_UG_to_SG_r8_3d
1594 module procedure mpp_pass_UG_to_SG_r4_2d
1595 module procedure mpp_pass_UG_to_SG_r4_3d
1596 module procedure mpp_pass_UG_to_SG_i4_2d
1597 module procedure mpp_pass_UG_to_SG_i4_3d
1598 module procedure mpp_pass_UG_to_SG_l4_2d
1599 module procedure mpp_pass_UG_to_SG_l4_3d
1602 !> Passes a data field from a unstructured grid to an structured grid
1603 !! <br>Example usage:
1605 !! call mpp_pass_UG_to_SG(UG_domain, field_UG, field_SG)
1607 !> @ingroup mpp_domains_mod
1608 interface mpp_do_update_ad
1609 module procedure mpp_do_update_ad_r8_3d
1610 module procedure mpp_do_update_ad_r8_3dv
1611 module procedure mpp_do_update_ad_r4_3d
1612 module procedure mpp_do_update_ad_r4_3dv
1615 !> Get the boundary data for symmetric domain when the data is at C, E, or N-cell center.<br>
1616 !! @ref mpp_get_boundary is used to get the boundary data for symmetric domain
1617 !! when the data is at C, E, or N-cell center. For cubic grid, the data should always
1618 !! at C-cell center.
1619 !! <br>Example usage:
1621 !! call mpp_get_boundary(domain, field, ebuffer, sbuffer, wbuffer, nbuffer)
1622 !! Get boundary information from domain and field and store in buffers
1623 !> @ingroup mpp_domains_mod
1624 interface mpp_get_boundary
1625 module procedure mpp_get_boundary_r8_2d
1626 module procedure mpp_get_boundary_r8_3d
1627 ! module procedure mpp_get_boundary_r8_4d
1628 ! module procedure mpp_get_boundary_r8_5d
1629 module procedure mpp_get_boundary_r8_2dv
1630 module procedure mpp_get_boundary_r8_3dv
1631 ! module procedure mpp_get_boundary_r8_4dv
1632 ! module procedure mpp_get_boundary_r8_5dv
1633 module procedure mpp_get_boundary_r4_2d
1634 module procedure mpp_get_boundary_r4_3d
1635 ! module procedure mpp_get_boundary_r4_4d
1636 ! module procedure mpp_get_boundary_r4_5d
1637 module procedure mpp_get_boundary_r4_2dv
1638 module procedure mpp_get_boundary_r4_3dv
1639 ! module procedure mpp_get_boundary_r4_4dv
1640 ! module procedure mpp_get_boundary_r4_5dv
1643 !> @ingroup mpp_domains_mod
1644 interface mpp_get_boundary_ad
1645 module procedure mpp_get_boundary_ad_r8_2d
1646 module procedure mpp_get_boundary_ad_r8_3d
1647 module procedure mpp_get_boundary_ad_r8_2dv
1648 module procedure mpp_get_boundary_ad_r8_3dv
1649 module procedure mpp_get_boundary_ad_r4_2d
1650 module procedure mpp_get_boundary_ad_r4_3d
1651 module procedure mpp_get_boundary_ad_r4_2dv
1652 module procedure mpp_get_boundary_ad_r4_3dv
1655 !> @ingroup mpp_domains_mod
1656 interface mpp_do_get_boundary
1657 module procedure mpp_do_get_boundary_r8_3d
1658 module procedure mpp_do_get_boundary_r8_3dv
1659 module procedure mpp_do_get_boundary_r4_3d
1660 module procedure mpp_do_get_boundary_r4_3dv
1663 !> @ingroup mpp_domains_mod
1664 interface mpp_do_get_boundary_ad
1665 module procedure mpp_do_get_boundary_ad_r8_3d
1666 module procedure mpp_do_get_boundary_ad_r8_3dv
1667 module procedure mpp_do_get_boundary_ad_r4_3d
1668 module procedure mpp_do_get_boundary_ad_r4_3dv
1671 !> Reorganization of distributed global arrays.<br>
1672 !! \e mpp_redistribute is used to reorganize a distributed array.
1673 !! \e MPP_TYPE_can be of type \e integer, \e complex, or \e real;
1674 !! of 4-byte or 8-byte kind; of rank up to 5.
1675 !! <br>Example usage:
1676 !! call mpp_redistribute( domain_in, field_in, domain_out, field_out )
1677 !> @ingroup mpp_domains_mod
1678 interface mpp_redistribute
1679 module procedure mpp_redistribute_r8_2D
1680 module procedure mpp_redistribute_r8_3D
1681 module procedure mpp_redistribute_r8_4D
1682 module procedure mpp_redistribute_r8_5D
1684 module procedure mpp_redistribute_c8_2D
1685 module procedure mpp_redistribute_c8_3D
1686 module procedure mpp_redistribute_c8_4D
1687 module procedure mpp_redistribute_c8_5D
1689 module procedure mpp_redistribute_i8_2D
1690 module procedure mpp_redistribute_i8_3D
1691 module procedure mpp_redistribute_i8_4D
1692 module procedure mpp_redistribute_i8_5D
1693 !!$ module procedure mpp_redistribute_l8_2D
1694 !!$ module procedure mpp_redistribute_l8_3D
1695 !!$ module procedure mpp_redistribute_l8_4D
1696 !!$ module procedure mpp_redistribute_l8_5D
1697 module procedure mpp_redistribute_r4_2D
1698 module procedure mpp_redistribute_r4_3D
1699 module procedure mpp_redistribute_r4_4D
1700 module procedure mpp_redistribute_r4_5D
1702 module procedure mpp_redistribute_c4_2D
1703 module procedure mpp_redistribute_c4_3D
1704 module procedure mpp_redistribute_c4_4D
1705 module procedure mpp_redistribute_c4_5D
1707 module procedure mpp_redistribute_i4_2D
1708 module procedure mpp_redistribute_i4_3D
1709 module procedure mpp_redistribute_i4_4D
1710 module procedure mpp_redistribute_i4_5D
1711 !!$ module procedure mpp_redistribute_l4_2D
1712 !!$ module procedure mpp_redistribute_l4_3D
1713 !!$ module procedure mpp_redistribute_l4_4D
1714 !!$ module procedure mpp_redistribute_l4_5D
1717 !> @ingroup mpp_domains_mod
1718 interface mpp_do_redistribute
1719 module procedure mpp_do_redistribute_r8_3D
1721 module procedure mpp_do_redistribute_c8_3D
1723 module procedure mpp_do_redistribute_i8_3D
1724 module procedure mpp_do_redistribute_l8_3D
1725 module procedure mpp_do_redistribute_r4_3D
1727 module procedure mpp_do_redistribute_c4_3D
1729 module procedure mpp_do_redistribute_i4_3D
1730 module procedure mpp_do_redistribute_l4_3D
1733 !> Parallel checking between two ensembles which run on different set pes at the same time<br>
1734 !! There are two forms for the <TT>mpp_check_field</TT> call. The 2D
1735 !! version is generally to be used and 3D version is built by repeated calls to the
1737 !! <br>Example usage:
1739 !! call mpp_check_field(field_in, pelist1, pelist2, domain, mesg, &
1740 !! w_halo, s_halo, e_halo, n_halo, force_abort )
1742 !! @param field_in Field to be checked
1743 !! @param domain Domain of current pe
1744 !! @param mesg Message to be printed out
1745 !! @param w_halo Halo size to be checked, default is 0
1746 !! @param s_halo Halo size to be checked, default is 0
1747 !! @param e_halo Halo size to be checked, default is 0
1748 !! @param n_halo Halo size to be checked, default is 0
1749 !! @param force_abort When true, abort program when any difference found. Default is false.
1750 !> @ingroup mpp_domains_mod
1751 interface mpp_check_field
1752 module procedure mpp_check_field_2D
1753 module procedure mpp_check_field_3D
1756 !***********************************************************************
1758 ! public interface from mpp_domains_reduce.h
1760 !***********************************************************************
1762 !> Fill in a global array from domain-decomposed arrays.<br>
1764 !> <TT>mpp_global_field</TT> is used to get an entire
1765 !! domain-decomposed array on each PE. <TT>MPP_TYPE_</TT> can be of type
1766 !! <TT>complex</TT>, <TT>integer</TT>, <TT>logical</TT> or <TT>real</TT>;
1767 !! of 4-byte or 8-byte kind; of rank up to 5.<br>
1769 !! All PEs in a domain decomposition must call
1770 !! <TT>mpp_global_field</TT>, and each will have a complete global field
1771 !! at the end. Please note that a global array of rank 3 or higher could
1772 !! occupy a lot of memory.
1774 !! @param domain 2D domain
1775 !! @param local Data dimensioned on either the compute or data domains of 'domain'
1776 !! @param[out] global output data dimensioned on the corresponding global domain
1777 !! @param flags can be either XONLY or YONLY parameters to specify a globalization on one axis only
1779 !! <br> Example usage:
1781 !! call mpp_global_field( domain, local, global, flags )
1783 !> @ingroup mpp_domains_mod
1784 interface mpp_global_field
1785 module procedure mpp_global_field2D_r8_2d
1786 module procedure mpp_global_field2D_r8_3d
1787 module procedure mpp_global_field2D_r8_4d
1788 module procedure mpp_global_field2D_r8_5d
1790 module procedure mpp_global_field2D_c8_2d
1791 module procedure mpp_global_field2D_c8_3d
1792 module procedure mpp_global_field2D_c8_4d
1793 module procedure mpp_global_field2D_c8_5d
1795 module procedure mpp_global_field2D_i8_2d
1796 module procedure mpp_global_field2D_i8_3d
1797 module procedure mpp_global_field2D_i8_4d
1798 module procedure mpp_global_field2D_i8_5d
1799 module procedure mpp_global_field2D_l8_2d
1800 module procedure mpp_global_field2D_l8_3d
1801 module procedure mpp_global_field2D_l8_4d
1802 module procedure mpp_global_field2D_l8_5d
1803 module procedure mpp_global_field2D_r4_2d
1804 module procedure mpp_global_field2D_r4_3d
1805 module procedure mpp_global_field2D_r4_4d
1806 module procedure mpp_global_field2D_r4_5d
1808 module procedure mpp_global_field2D_c4_2d
1809 module procedure mpp_global_field2D_c4_3d
1810 module procedure mpp_global_field2D_c4_4d
1811 module procedure mpp_global_field2D_c4_5d
1813 module procedure mpp_global_field2D_i4_2d
1814 module procedure mpp_global_field2D_i4_3d
1815 module procedure mpp_global_field2D_i4_4d
1816 module procedure mpp_global_field2D_i4_5d
1817 module procedure mpp_global_field2D_l4_2d
1818 module procedure mpp_global_field2D_l4_3d
1819 module procedure mpp_global_field2D_l4_4d
1820 module procedure mpp_global_field2D_l4_5d
1823 !> @ingroup mpp_domains_mod
1824 interface mpp_global_field_ad
1825 module procedure mpp_global_field2D_r8_2d_ad
1826 module procedure mpp_global_field2D_r8_3d_ad
1827 module procedure mpp_global_field2D_r8_4d_ad
1828 module procedure mpp_global_field2D_r8_5d_ad
1830 module procedure mpp_global_field2D_c8_2d_ad
1831 module procedure mpp_global_field2D_c8_3d_ad
1832 module procedure mpp_global_field2D_c8_4d_ad
1833 module procedure mpp_global_field2D_c8_5d_ad
1835 module procedure mpp_global_field2D_i8_2d_ad
1836 module procedure mpp_global_field2D_i8_3d_ad
1837 module procedure mpp_global_field2D_i8_4d_ad
1838 module procedure mpp_global_field2D_i8_5d_ad
1839 module procedure mpp_global_field2D_l8_2d_ad
1840 module procedure mpp_global_field2D_l8_3d_ad
1841 module procedure mpp_global_field2D_l8_4d_ad
1842 module procedure mpp_global_field2D_l8_5d_ad
1843 module procedure mpp_global_field2D_r4_2d_ad
1844 module procedure mpp_global_field2D_r4_3d_ad
1845 module procedure mpp_global_field2D_r4_4d_ad
1846 module procedure mpp_global_field2D_r4_5d_ad
1848 module procedure mpp_global_field2D_c4_2d_ad
1849 module procedure mpp_global_field2D_c4_3d_ad
1850 module procedure mpp_global_field2D_c4_4d_ad
1851 module procedure mpp_global_field2D_c4_5d_ad
1853 module procedure mpp_global_field2D_i4_2d_ad
1854 module procedure mpp_global_field2D_i4_3d_ad
1855 module procedure mpp_global_field2D_i4_4d_ad
1856 module procedure mpp_global_field2D_i4_5d_ad
1857 module procedure mpp_global_field2D_l4_2d_ad
1858 module procedure mpp_global_field2D_l4_3d_ad
1859 module procedure mpp_global_field2D_l4_4d_ad
1860 module procedure mpp_global_field2D_l4_5d_ad
1863 !> Private helper interface used by @ref mpp_global_field
1864 !> @ingroup mpp_domains_mod
1865 interface mpp_do_global_field
1866 module procedure mpp_do_global_field2D_r8_3d
1868 module procedure mpp_do_global_field2D_c8_3d
1870 module procedure mpp_do_global_field2D_i8_3d
1871 module procedure mpp_do_global_field2D_l8_3d
1872 module procedure mpp_do_global_field2D_r4_3d
1874 module procedure mpp_do_global_field2D_c4_3d
1876 module procedure mpp_do_global_field2D_i4_3d
1877 module procedure mpp_do_global_field2D_l4_3d
1880 interface mpp_do_global_field_a2a
1881 module procedure mpp_do_global_field2D_a2a_r8_3d
1883 module procedure mpp_do_global_field2D_a2a_c8_3d
1885 module procedure mpp_do_global_field2D_a2a_i8_3d
1886 module procedure mpp_do_global_field2D_a2a_l8_3d
1887 module procedure mpp_do_global_field2D_a2a_r4_3d
1889 module procedure mpp_do_global_field2D_a2a_c4_3d
1891 module procedure mpp_do_global_field2D_a2a_i4_3d
1892 module procedure mpp_do_global_field2D_a2a_l4_3d
1895 !> Same functionality as @ref mpp_global_field but for unstructured domains
1896 !> @ingroup mpp_domains_mod
1897 interface mpp_global_field_ug
1898 module procedure mpp_global_field2D_ug_r8_2d
1899 module procedure mpp_global_field2D_ug_r8_3d
1900 module procedure mpp_global_field2D_ug_r8_4d
1901 module procedure mpp_global_field2D_ug_r8_5d
1902 module procedure mpp_global_field2D_ug_i8_2d
1903 module procedure mpp_global_field2D_ug_i8_3d
1904 module procedure mpp_global_field2D_ug_i8_4d
1905 module procedure mpp_global_field2D_ug_i8_5d
1906 module procedure mpp_global_field2D_ug_r4_2d
1907 module procedure mpp_global_field2D_ug_r4_3d
1908 module procedure mpp_global_field2D_ug_r4_4d
1909 module procedure mpp_global_field2D_ug_r4_5d
1910 module procedure mpp_global_field2D_ug_i4_2d
1911 module procedure mpp_global_field2D_ug_i4_3d
1912 module procedure mpp_global_field2D_ug_i4_4d
1913 module procedure mpp_global_field2D_ug_i4_5d
1916 !> @ingroup mpp_domains_mod
1917 interface mpp_do_global_field_ad
1918 module procedure mpp_do_global_field2D_r8_3d_ad
1920 module procedure mpp_do_global_field2D_c8_3d_ad
1922 module procedure mpp_do_global_field2D_i8_3d_ad
1923 module procedure mpp_do_global_field2D_l8_3d_ad
1924 module procedure mpp_do_global_field2D_r4_3d_ad
1926 module procedure mpp_do_global_field2D_c4_3d_ad
1928 module procedure mpp_do_global_field2D_i4_3d_ad
1929 module procedure mpp_do_global_field2D_l4_3d_ad
1932 !> Global max of domain-decomposed arrays.<br>
1933 !! \e mpp_global_max is used to get the maximum value of a
1934 !! domain-decomposed array on each PE. \e MPP_TYPE_can be of type
1935 !! \e integer or \e real; of 4-byte or 8-byte kind; of rank
1936 !! up to 5. The dimension of \e locus must equal the rank of \e field.<br>
1938 !! All PEs in a domain decomposition must call \e mpp_global_max,
1939 !! and each will have the result upon exit.
1940 !! The function \e mpp_global_min, with an identical syntax. is also available.
1942 !! @param domain 2D domain
1943 !! @param field field data dimensioned on either the compute or data domains of 'domain'
1944 !! @param locus If present, van be used to retrieve the location of the maximum
1946 !! <br>Example usage:
1947 !! mpp_global_max( domain, field, locus )
1948 !> @ingroup mpp_domains_mod
1949 interface mpp_global_max
1950 module procedure mpp_global_max_r8_2d
1951 module procedure mpp_global_max_r8_3d
1952 module procedure mpp_global_max_r8_4d
1953 module procedure mpp_global_max_r8_5d
1954 module procedure mpp_global_max_r4_2d
1955 module procedure mpp_global_max_r4_3d
1956 module procedure mpp_global_max_r4_4d
1957 module procedure mpp_global_max_r4_5d
1958 module procedure mpp_global_max_i8_2d
1959 module procedure mpp_global_max_i8_3d
1960 module procedure mpp_global_max_i8_4d
1961 module procedure mpp_global_max_i8_5d
1962 module procedure mpp_global_max_i4_2d
1963 module procedure mpp_global_max_i4_3d
1964 module procedure mpp_global_max_i4_4d
1965 module procedure mpp_global_max_i4_5d
1968 !> Global min of domain-decomposed arrays.<br>
1969 !! \e mpp_global_min is used to get the minimum value of a
1970 !! domain-decomposed array on each PE. \e MPP_TYPE_can be of type
1971 !! \e integer or \e real; of 4-byte or 8-byte kind; of rank
1972 !! up to 5. The dimension of \e locus must equal the rank of \e field.<br>
1974 !! All PEs in a domain decomposition must call \e mpp_global_min,
1975 !! and each will have the result upon exit.
1976 !! The function \e mpp_global_max, with an identical syntax. is also available.
1978 !! @param domain 2D domain
1979 !! @param field field data dimensioned on either the compute or data domains of 'domain'
1980 !! @param locus If present, van be used to retrieve the location of the minimum
1982 !! <br>Example usage:
1983 !! mpp_global_min( domain, field, locus )
1984 !> @ingroup mpp_domains_mod
1985 interface mpp_global_min
1986 module procedure mpp_global_min_r8_2d
1987 module procedure mpp_global_min_r8_3d
1988 module procedure mpp_global_min_r8_4d
1989 module procedure mpp_global_min_r8_5d
1990 module procedure mpp_global_min_r4_2d
1991 module procedure mpp_global_min_r4_3d
1992 module procedure mpp_global_min_r4_4d
1993 module procedure mpp_global_min_r4_5d
1994 module procedure mpp_global_min_i8_2d
1995 module procedure mpp_global_min_i8_3d
1996 module procedure mpp_global_min_i8_4d
1997 module procedure mpp_global_min_i8_5d
1998 module procedure mpp_global_min_i4_2d
1999 module procedure mpp_global_min_i4_3d
2000 module procedure mpp_global_min_i4_4d
2001 module procedure mpp_global_min_i4_5d
2004 !> Global sum of domain-decomposed arrays.<br>
2005 !! \e mpp_global_sum is used to get the sum of a domain-decomposed array
2006 !! on each PE. \e MPP_TYPE_ can be of type \e integer, \e complex, or \e real; of 4-byte or
2007 !! 8-byte kind; of rank up to 5.
2009 !! @param domain 2D domain
2010 !! @param field field data dimensioned on either the compute or data domain of 'domain'
2011 !! @param flags If present must have the value BITWISE_EXACT_SUM. This produces a sum that
2012 !! is guaranteed to produce the identical result irrespective of how the domain is decomposed.
2013 !! This method does the sum first along the ranks beyond 2, and then calls mpp_global_field
2014 !! to produce a global 2D array which is then summed. The default method, which is
2015 !! considerably faster, does a local sum followed by mpp_sum across the domain
2018 !! <br>Example usage:
2019 !! call mpp_global_sum( domain, field, flags )
2020 !! @note All PEs in a domain decomposition must call \e mpp_global_sum,
2021 !! and each will have the result upon exit.
2022 !> @ingroup mpp_domains_mod
2023 interface mpp_global_sum
2024 module procedure mpp_global_sum_r8_2d
2025 module procedure mpp_global_sum_r8_3d
2026 module procedure mpp_global_sum_r8_4d
2027 module procedure mpp_global_sum_r8_5d
2029 module procedure mpp_global_sum_c8_2d
2030 module procedure mpp_global_sum_c8_3d
2031 module procedure mpp_global_sum_c8_4d
2032 module procedure mpp_global_sum_c8_5d
2034 module procedure mpp_global_sum_r4_2d
2035 module procedure mpp_global_sum_r4_3d
2036 module procedure mpp_global_sum_r4_4d
2037 module procedure mpp_global_sum_r4_5d
2039 module procedure mpp_global_sum_c4_2d
2040 module procedure mpp_global_sum_c4_3d
2041 module procedure mpp_global_sum_c4_4d
2042 module procedure mpp_global_sum_c4_5d
2044 module procedure mpp_global_sum_i8_2d
2045 module procedure mpp_global_sum_i8_3d
2046 module procedure mpp_global_sum_i8_4d
2047 module procedure mpp_global_sum_i8_5d
2048 module procedure mpp_global_sum_i4_2d
2049 module procedure mpp_global_sum_i4_3d
2050 module procedure mpp_global_sum_i4_4d
2051 module procedure mpp_global_sum_i4_5d
2055 !> @ingroup mpp_domains_mod
2056 interface mpp_global_sum_tl
2057 module procedure mpp_global_sum_tl_r8_2d
2058 module procedure mpp_global_sum_tl_r8_3d
2059 module procedure mpp_global_sum_tl_r8_4d
2060 module procedure mpp_global_sum_tl_r8_5d
2062 module procedure mpp_global_sum_tl_c8_2d
2063 module procedure mpp_global_sum_tl_c8_3d
2064 module procedure mpp_global_sum_tl_c8_4d
2065 module procedure mpp_global_sum_tl_c8_5d
2067 module procedure mpp_global_sum_tl_r4_2d
2068 module procedure mpp_global_sum_tl_r4_3d
2069 module procedure mpp_global_sum_tl_r4_4d
2070 module procedure mpp_global_sum_tl_r4_5d
2072 module procedure mpp_global_sum_tl_c4_2d
2073 module procedure mpp_global_sum_tl_c4_3d
2074 module procedure mpp_global_sum_tl_c4_4d
2075 module procedure mpp_global_sum_tl_c4_5d
2077 module procedure mpp_global_sum_tl_i8_2d
2078 module procedure mpp_global_sum_tl_i8_3d
2079 module procedure mpp_global_sum_tl_i8_4d
2080 module procedure mpp_global_sum_tl_i8_5d
2081 module procedure mpp_global_sum_tl_i4_2d
2082 module procedure mpp_global_sum_tl_i4_3d
2083 module procedure mpp_global_sum_tl_i4_4d
2084 module procedure mpp_global_sum_tl_i4_5d
2089 !> @ingroup mpp_domains_mod
2090 interface mpp_global_sum_ad
2091 module procedure mpp_global_sum_ad_r8_2d
2092 module procedure mpp_global_sum_ad_r8_3d
2093 module procedure mpp_global_sum_ad_r8_4d
2094 module procedure mpp_global_sum_ad_r8_5d
2096 module procedure mpp_global_sum_ad_c8_2d
2097 module procedure mpp_global_sum_ad_c8_3d
2098 module procedure mpp_global_sum_ad_c8_4d
2099 module procedure mpp_global_sum_ad_c8_5d
2101 module procedure mpp_global_sum_ad_r4_2d
2102 module procedure mpp_global_sum_ad_r4_3d
2103 module procedure mpp_global_sum_ad_r4_4d
2104 module procedure mpp_global_sum_ad_r4_5d
2106 module procedure mpp_global_sum_ad_c4_2d
2107 module procedure mpp_global_sum_ad_c4_3d
2108 module procedure mpp_global_sum_ad_c4_4d
2109 module procedure mpp_global_sum_ad_c4_5d
2111 module procedure mpp_global_sum_ad_i8_2d
2112 module procedure mpp_global_sum_ad_i8_3d
2113 module procedure mpp_global_sum_ad_i8_4d
2114 module procedure mpp_global_sum_ad_i8_5d
2115 module procedure mpp_global_sum_ad_i4_2d
2116 module procedure mpp_global_sum_ad_i4_3d
2117 module procedure mpp_global_sum_ad_i4_4d
2118 module procedure mpp_global_sum_ad_i4_5d
2122 !***********************************************************************
2124 ! public interface from mpp_domain_util.h
2126 !***********************************************************************
2127 !> @brief Retrieve PE number of a neighboring domain.
2129 !> Given a 1-D or 2-D domain decomposition, this call allows users to retrieve
2130 !! the PE number of an adjacent PE-domain while taking into account that the
2131 !! domain may have holes (masked) and/or have cyclic boundary conditions and/or a
2132 !! folded edge. Which PE-domain will be retrived will depend on "direction":
2133 !! +1 (right) or -1 (left) for a 1-D domain decomposition and either NORTH, SOUTH,
2134 !! EAST, WEST, NORTH_EAST, SOUTH_EAST, SOUTH_WEST, or NORTH_WEST for a 2-D
2135 !! decomposition. If no neighboring domain exists (masked domain), then the
2136 !! returned "pe" value will be set to NULL_PE.<br>
2137 !! <br>Example usage:
2139 !! call mpp_get_neighbor_pe( domain1d, direction=+1 , pe)
2141 !! Set pe to the neighbor pe number that is to the right of the current pe
2143 !! call mpp_get_neighbor_pe( domain2d, direction=NORTH, pe)
2145 !! Get neighbor pe number that's above/north of the current pe
2146 !> @ingroup mpp_domains_mod
2147 interface mpp_get_neighbor_pe
2148 module procedure mpp_get_neighbor_pe_1d
2149 module procedure mpp_get_neighbor_pe_2d
2152 !> Equality/inequality operators for domaintypes. <br>
2154 !! <br>The module provides public operators to check for
2155 !! equality/inequality of domaintypes, e.g:<br>
2157 !! type(domain1D) :: a, b
2158 !! type(domain2D) :: c, d
2167 !! Domains are considered equal if and only if the start and end
2168 !! indices of each of their component global, data and compute domains are equal.
2169 !> @ingroup mpp_domains_mod
2170 interface operator(.EQ.)
2171 module procedure mpp_domain1D_eq
2172 module procedure mpp_domain2D_eq
2173 module procedure mpp_domainUG_eq
2176 !> @ingroup mpp_domains_mod
2177 interface operator(.NE.)
2178 module procedure mpp_domain1D_ne
2179 module procedure mpp_domain2D_ne
2180 module procedure mpp_domainUG_ne
2183 !> These routines retrieve the axis specifications associated with the compute domains.
2184 !! The domain is a derived type with private elements. These routines
2185 !! retrieve the axis specifications associated with the compute domains
2186 !! The 2D version of these is a simple extension of 1D.
2187 !! <br>Example usage:
2189 !! call mpp_get_compute_domain(domain_1D, is, ie)
2190 !! call mpp_get_compute_domain(domain_2D, is, ie, js, je)
2191 !> @ingroup mpp_domains_mod
2192 interface mpp_get_compute_domain
2193 module procedure mpp_get_compute_domain1D
2194 module procedure mpp_get_compute_domain2D
2197 !> Retrieve the entire array of compute domain extents associated with a decomposition.
2199 !> @param domain 2D domain
2200 !> @param[out] xbegin,ybegin x and y domain starting indices
2201 !> @param[out] xsize,ysize x and y domain sizes
2202 !! <br>Example usage:
2204 !! call mpp_get_compute_domains( domain, xbegin, xend, xsize, &
2205 !! ybegin, yend, ysize )
2206 !> @ingroup mpp_domains_mod
2207 interface mpp_get_compute_domains
2208 module procedure mpp_get_compute_domains1D
2209 module procedure mpp_get_compute_domains2D
2212 !> @ingroup mpp_domains_mod
2213 interface mpp_get_global_domains
2214 module procedure mpp_get_global_domains1D
2215 module procedure mpp_get_global_domains2D
2218 !> These routines retrieve the axis specifications associated with the data domains.
2219 !! The domain is a derived type with private elements. These routines
2220 !! retrieve the axis specifications associated with the data domains.
2221 !! The 2D version of these is a simple extension of 1D.
2222 !! <br>Example usage:
2224 !! call mpp_get_data_domain(domain_1d, isd, ied)
2225 !! call mpp_get_data_domain(domain_2d, isd, ied, jsd, jed)
2226 !> @ingroup mpp_domains_mod
2227 interface mpp_get_data_domain
2228 module procedure mpp_get_data_domain1D
2229 module procedure mpp_get_data_domain2D
2232 !> These routines retrieve the axis specifications associated with the global domains.
2233 !! The domain is a derived type with private elements. These routines
2234 !! retrieve the axis specifications associated with the global domains.
2235 !! The 2D version of these is a simple extension of 1D.
2236 !! <br>Example usage:
2238 !! call mpp_get_global_domain(domain_1d, isg, ieg)
2239 !! call mpp_get_global_domain(domain_2d, isg, ieg, jsg, jeg)
2240 !> @ingroup mpp_domains_mod
2241 interface mpp_get_global_domain
2242 module procedure mpp_get_global_domain1D
2243 module procedure mpp_get_global_domain2D
2246 !> These routines retrieve the axis specifications associated with the memory domains.
2247 !! The domain is a derived type with private elements. These routines
2248 !! retrieve the axis specifications associated with the memory domains.
2249 !! The 2D version of these is a simple extension of 1D.
2250 !! <br>Example usage:
2252 !! call mpp_get_memory_domain(domain_1d, ism, iem)
2253 !! call mpp_get_memory_domain(domain_2d, ism, iem, jsm, jem)
2254 !> @ingroup mpp_domains_mod
2255 interface mpp_get_memory_domain
2256 module procedure mpp_get_memory_domain1D
2257 module procedure mpp_get_memory_domain2D
2260 !> @ingroup mpp_domains_mod
2261 interface mpp_get_domain_extents
2262 module procedure mpp_get_domain_extents1D
2263 module procedure mpp_get_domain_extents2D
2266 !> These routines set the axis specifications associated with the compute domains.
2267 !! The domain is a derived type with private elements. These routines
2268 !! set the axis specifications associated with the compute domains
2269 !! The 2D version of these is a simple extension of 1D.
2270 !! <br>Example usage:
2272 !! call mpp_get_data_domain(domain_1d, isd, ied)
2273 !! call mpp_get_data_domain(domain_2d, isd, ied, jsd, jed)
2274 !> @ingroup mpp_domains_mod
2275 interface mpp_set_compute_domain
2276 module procedure mpp_set_compute_domain1D
2277 module procedure mpp_set_compute_domain2D
2280 !> These routines set the axis specifications associated with the data domains.
2281 !! The domain is a derived type with private elements. These routines
2282 !! set the axis specifications associated with the data domains.
2283 !! The 2D version of these is a simple extension of 1D.
2284 !! <br>Example usage:
2286 !! call mpp_set_data_domain(domain_1d, isd, ied)
2287 !! call mpp_set_data_domain(domain_2d, isd, ied, jsd, jed)
2288 !> @ingroup mpp_domains_mod
2289 interface mpp_set_data_domain
2290 module procedure mpp_set_data_domain1D
2291 module procedure mpp_set_data_domain2D
2294 !> These routines set the axis specifications associated with the global domains.
2295 !! The domain is a derived type with private elements. These routines
2296 !! set the axis specifications associated with the global domains.
2297 !! The 2D version of these is a simple extension of 1D.
2298 !! <br>Example usage:
2300 !! call mpp_set_global_domain(domain_1d, isg, ieg)
2301 !! call mpp_set_global_domain(domain_2d, isg, ieg, jsg, jeg)
2302 !> @ingroup mpp_domains_mod
2303 interface mpp_set_global_domain
2304 module procedure mpp_set_global_domain1D
2305 module procedure mpp_set_global_domain2D
2308 !> Retrieve list of PEs associated with a domain decomposition.
2309 !! The 1D version of this call returns an array of the PEs assigned to
2310 !! this 1D domain decomposition. In addition the optional argument pos may be
2311 !! used to retrieve the 0-based position of the domain local to the
2312 !! calling PE, i.e., <TT> domain\%list(pos)\%pe</TT> is the local PE,
2313 !! as returned by @ref mpp_pe().
2314 !! The 2D version of this call is identical to 1D version.
2315 !> @ingroup mpp_domains_mod
2316 interface mpp_get_pelist
2317 module procedure mpp_get_pelist1D
2318 module procedure mpp_get_pelist2D
2321 !> Retrieve layout associated with a domain decomposition
2322 !! The 1D version of this call returns the number of divisions that was assigned to this
2323 !! decomposition axis. The 2D version of this call returns an array of dimension 2 holding the
2324 !! results on two axes.
2325 !! <br>Example usage:
2327 !! call mpp_get_layout( domain, layout )
2328 !> @ingroup mpp_domains_mod
2329 interface mpp_get_layout
2330 module procedure mpp_get_layout1D
2331 module procedure mpp_get_layout2D
2333 !> Private interface for internal usage, compares two sizes
2334 !> @ingroup mpp_domains_mod
2335 interface check_data_size
2336 module procedure check_data_size_1d
2337 module procedure check_data_size_2d
2340 !> Nullify domain list. This interface is needed in mpp_domains_test.
2341 !! 1-D case can be added in if needed.
2342 !! <br>Example usage:
2344 !! call mpp_nullify_domain_list(domain)
2345 !> @ingroup mpp_domains_mod
2346 interface mpp_nullify_domain_list
2347 module procedure nullify_domain2d_list
2350 ! Include variable "version" to be written to log file.
2351 #include<file_version.h>
2357 #include <mpp_define_nest_domains.inc>
2358 #include <mpp_domains_util.inc>
2359 #include <mpp_domains_comm.inc>
2360 #include <mpp_domains_define.inc>
2361 #include <mpp_domains_misc.inc>
2362 #include <mpp_domains_reduce.inc>
2363 #include <mpp_unstruct_domain.inc>
2365 end module mpp_domains_mod