4 !#define mpi_x_comm_size(i,j,k) Mpi_Comm_Size ( i,j,k ) ; write(0,*) __LINE__
5 #define mpi_x_comm_size(i,j,k) Mpi_Comm_Size ( i,j,k )
7 ! Workaround for bug in the IBM MPI implementation. Look near the
8 ! bottom of this file for an explanation.
9 #ifdef IBM_REDUCE_BUG_WORKAROUND
10 #define mpi_x_reduce(sb,rb,c,dt,op,r,com,ierr) reduce_add_integer(sb,rb,c,r,com)
12 #define mpi_x_reduce(sb,rb,c,dt,op,r,com,ierr) MPI_Reduce(sb,rb,c,dt,op,r,com,ierr)
13 !#define mpi_x_reduce(sb,rb,c,dt,op,r,com,ierr) MPI_Reduce(sb,rb,c,dt,op,r,com,ierr) ; write(0,*)__LINE__
16 MODULE module_wrf_quilt
19 ! This module contains WRF-specific I/O quilt routines called by both
20 ! client (compute) and server (I/O quilt) tasks. I/O quilt servers are
21 ! a run-time optimization that allow I/O operations, executed on the I/O
22 ! quilt server tasks, to be overlapped with useful computation, executed on
23 ! the compute tasks. Since I/O operations are often quite slow compared to
24 ! computation, this performance optimization can increase parallel
27 ! Currently, one group of I/O servers can be specified at run-time. Namelist
28 ! variable "nio_tasks_per_group" is used to specify the number of I/O server
29 ! tasks in this group. In most cases, parallel efficiency is optimized when
30 ! the minimum number of I/O server tasks are used. If memory needed to cache
31 ! I/O operations fits on a single processor, then set nio_tasks_per_group=1.
32 ! If not, increase the number of I/O server tasks until I/O operations fit in
33 ! memory. In the future, multiple groups of I/O server tasks will be
34 ! supported. The number of groups will be specified by namelist variable
35 ! "nio_groups". For now, nio_groups must be set to 1. Currently, I/O servers
36 ! only support overlap of output operations with computation. Also, only I/O
37 ! packages that do no support native parallel I/O may be used with I/O server
38 ! tasks. This excludes PHDF5 and MCEL.
40 ! In this module, the I/O quilt server tasks call package-dependent
41 ! WRF-specific I/O interfaces to perform I/O operations requested by the
42 ! client (compute) tasks. All of these calls occur inside subroutine
45 ! The client (compute) tasks call package-independent WRF-specific "quilt I/O"
46 ! interfaces that send requests to the I/O quilt servers. All of these calls
47 ! are made from module_io.F.
49 ! These routines have the same names and (roughly) the same arguments as those
50 ! specified in the WRF I/O API except that:
51 ! - "Quilt I/O" routines defined in this file and called by routines in
52 ! module_io.F have the "wrf_quilt_" prefix.
53 ! - Package-dependent routines called from routines in this file are defined
54 ! in the external I/O packages and have the "ext_" prefix.
56 ! Both client (compute) and server tasks call routine init_module_wrf_quilt()
57 ! which then calls setup_quilt_servers() determine which tasks are compute
58 ! tasks and which are server tasks. Before the end of init_module_wrf_quilt()
59 ! server tasks call routine quilt() and remain there for the rest of the model
60 ! run. Compute tasks return from init_module_wrf_quilt() to perform model
63 ! See http://www.mmm.ucar.edu/wrf/WG2/software_2.0/IOAPI.doc for the latest
64 ! version of the WRF I/O API. This document includes detailed descriptions
65 ! of subroutines and their arguments that are not duplicated here.
68 USE module_internal_header_util
71 USE module_cpl, ONLY : coupler_on, cpl_set_dm_communicator, cpl_finalize
74 INTEGER, PARAMETER :: int_num_handles = 99
75 INTEGER, PARAMETER :: max_servers = int_num_handles+1 ! why +1?
76 LOGICAL, DIMENSION(0:int_num_handles) :: okay_to_write, int_handle_in_use, okay_to_commit
77 INTEGER, DIMENSION(0:int_num_handles) :: int_num_bytes_to_write, io_form
78 REAL, POINTER,SAVE :: int_local_output_buffer(:)
79 INTEGER, SAVE :: int_local_output_cursor
80 LOGICAL :: quilting_enabled
81 LOGICAL :: disable_quilt = .FALSE.
82 INTEGER :: prev_server_for_handle = -1
83 INTEGER :: server_for_handle(int_num_handles)
84 INTEGER :: reduced(2), reduced_dummy(2)
85 LOGICAL, EXTERNAL :: wrf_dm_on_monitor
87 INTEGER :: mpi_comm_avail,availrank
88 LOGICAL :: in_avail=.false., poll_servers=.false.
92 INTEGER :: mpi_comm_local
93 LOGICAL :: compute_node
94 LOGICAL :: compute_group_master(max_servers)
95 INTEGER :: mpi_comm_io_groups(max_servers)
96 INTEGER :: nio_tasks_in_group
97 INTEGER :: nio_tasks_per_group
98 INTEGER :: ncompute_tasks
102 INTEGER, PARAMETER :: onebyte = 1
103 INTEGER comm_io_servers, iserver, hdrbufsize, obufsize
104 INTEGER, DIMENSION(4096) :: hdrbuf
105 INTEGER, DIMENSION(int_num_handles) :: handle
108 #ifdef IBM_REDUCE_BUG_WORKAROUND
109 ! Workaround for bug in the IBM MPI implementation. Look near the
110 ! bottom of this file for an explanation.
111 interface reduce_add_integer
112 module procedure reduce_add_int_arr
113 module procedure reduce_add_int_scl
119 #if defined(DM_PARALLEL) && !defined( STUBMPI )
120 INTEGER FUNCTION get_server_id ( dhandle )
122 ! Logic in the client side to know which io server
123 ! group to send to. If the unit corresponds to a file that's
124 ! already been opened, then we have no choice but to send the
125 ! data to that group again, regardless of whether there are
126 ! other server-groups. If it's a new file, we can chose a new
127 ! server group. I.e. opening a file locks it onto a server
128 ! group. Closing the file unlocks it.
131 INTEGER, INTENT(IN) :: dhandle
132 IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
133 IF ( server_for_handle ( dhandle ) .GE. 1 ) THEN
134 get_server_id = server_for_handle ( dhandle )
136 IF(poll_servers) THEN
137 ! Poll server group masters to find an inactive I/O server group:
138 call wrf_quilt_find_server(server_for_handle(dhandle))
140 ! Server polling is disabled, so cycle through servers:
141 prev_server_for_handle = mod ( prev_server_for_handle + 1 , nio_groups )
142 server_for_handle( dhandle ) = prev_server_for_handle+1
144 get_server_id=server_for_handle(dhandle)
147 CALL wrf_message('module_io_quilt: get_server_id bad dhandle' )
149 END FUNCTION get_server_id
152 SUBROUTINE set_server_id ( dhandle, value )
154 INTEGER, INTENT(IN) :: dhandle, value
155 IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
156 server_for_handle(dhandle) = value
158 CALL wrf_message('module_io_quilt: set_server_id bad dhandle' )
160 END SUBROUTINE set_server_id
162 LOGICAL FUNCTION get_poll_servers()
164 get_poll_servers=poll_servers
165 end FUNCTION get_poll_servers
167 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
168 SUBROUTINE int_get_fresh_handle( retval )
170 ! Find an unused "client file handle" and return it in retval.
171 ! The "client file handle" is used to remember how a file was opened
172 ! so clients do not need to ask the I/O quilt servers for this information.
173 ! It is also used as a file identifier in communications with the I/O
176 ! Note that client tasks know nothing about package-specific handles.
177 ! Only the I/O quilt servers know about them.
181 DO i = 1, int_num_handles
182 IF ( .NOT. int_handle_in_use(i) ) THEN
188 IF ( retval < 0 ) THEN
189 CALL wrf_error_fatal("frame/module_io_quilt.F: int_get_fresh_handle() can not")
191 int_handle_in_use(i) = .TRUE.
192 NULLIFY ( int_local_output_buffer )
193 END SUBROUTINE int_get_fresh_handle
195 SUBROUTINE setup_quilt_servers ( nio_tasks_per_group, &
206 ! Both client (compute) and server tasks call this routine to
207 ! determine which tasks are compute tasks and which are I/O server tasks.
209 ! Module variables MPI_COMM_LOCAL and MPI_COMM_IO_GROUPS(:) are set up to
210 ! contain MPI communicators as follows:
212 ! MPI_COMM_LOCAL is the Communicator for the local groups of tasks. For the
213 ! compute tasks it is the group of compute tasks; for a server group it the
214 ! communicator of tasks in the server group.
216 ! Elements of MPI_COMM_IO_GROUPS are communicators that each contain one or
217 ! more compute tasks and a single I/O server assigned to those compute tasks.
218 ! The I/O server tasks is always the last task in these communicators.
219 ! On a compute task, which has a single associate in each of the server
220 ! groups, MPI_COMM_IO_GROUPS is treated as an array; each element corresponds
221 ! to a different server group.
222 ! On a server task only the first element of MPI_COMM_IO_GROUPS is used
223 ! because each server task is part of only one io_group.
225 ! I/O server tasks in each I/O server group are divided among compute tasks as
226 ! evenly as possible.
228 ! When multiple I/O server groups are used, each must have the same number of
229 ! tasks. When the total number of extra I/O tasks does not divide evenly by
230 ! the number of io server groups requested, the remainder tasks are not used
233 ! For example, communicator membership for 18 tasks with nio_groups=2 and
234 ! nio_tasks_per_group=3 is shown below:
237 ! Membership for MPI_COMM_LOCAL communicators:
238 ! COMPUTE TASKS: 0 1 2 3 4 5 6 7 8 9 10 11
239 ! 1ST I/O SERVER GROUP: 12 13 14
240 ! 2ND I/O SERVER GROUP: 15 16 17
242 ! Membership for MPI_COMM_IO_GROUPS(1):
243 ! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 12
244 ! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 13
245 ! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 14
246 ! I/O SERVER TASK 12: 0 3 6 9 12
247 ! I/O SERVER TASK 13: 1 4 7 10 13
248 ! I/O SERVER TASK 14: 2 5 8 11 14
249 ! I/O SERVER TASK 15: 0 3 6 9 15
250 ! I/O SERVER TASK 16: 1 4 7 10 16
251 ! I/O SERVER TASK 17: 2 5 8 11 17
253 ! Membership for MPI_COMM_IO_GROUPS(2):
254 ! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 15
255 ! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 16
256 ! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 17
257 ! I/O SERVER TASK 12: ** not used **
258 ! I/O SERVER TASK 13: ** not used **
259 ! I/O SERVER TASK 14: ** not used **
260 ! I/O SERVER TASK 15: ** not used **
261 ! I/O SERVER TASK 16: ** not used **
262 ! I/O SERVER TASK 17: ** not used **
267 USE module_dm, ONLY : compute_mesh
271 INTEGER, INTENT(IN) :: nio_tasks_per_group, mytask, ntasks, &
272 n_groups_arg, mpi_comm_wrld
273 INTEGER, INTENT(IN) :: nproc_x, nproc_y
274 INTEGER, INTENT(OUT) :: mpi_comm_local, nio
275 INTEGER, DIMENSION(100), INTENT(OUT) :: mpi_comm_io_groups
277 INTEGER :: i, j, ii, comdup, ierr, niotasks, n_groups, iisize
278 INTEGER, DIMENSION(ntasks) :: icolor
280 INTEGER :: io_form_setting
282 INTEGER :: k, m, nprocx, nprocy
283 LOGICAL :: reorder_mesh
285 !check the namelist and make sure there are no output forms specified
286 !that cannot be quilted
287 CALL nl_get_io_form_history(1, io_form_setting) ; call sokay( 'history', io_form_setting )
288 CALL nl_get_io_form_restart(1, io_form_setting) ; call sokay( 'restart', io_form_setting )
289 CALL nl_get_io_form_auxhist1(1, io_form_setting) ; call sokay( 'auxhist1', io_form_setting )
290 CALL nl_get_io_form_auxhist2(1, io_form_setting) ; call sokay( 'auxhist2', io_form_setting )
291 CALL nl_get_io_form_auxhist3(1, io_form_setting) ; call sokay( 'auxhist3', io_form_setting )
292 CALL nl_get_io_form_auxhist4(1, io_form_setting) ; call sokay( 'auxhist4', io_form_setting )
293 CALL nl_get_io_form_auxhist5(1, io_form_setting) ; call sokay( 'auxhist5', io_form_setting )
294 CALL nl_get_io_form_auxhist6(1, io_form_setting) ; call sokay( 'auxhist6', io_form_setting )
295 CALL nl_get_io_form_auxhist7(1, io_form_setting) ; call sokay( 'auxhist7', io_form_setting )
296 CALL nl_get_io_form_auxhist8(1, io_form_setting) ; call sokay( 'auxhist8', io_form_setting )
297 CALL nl_get_io_form_auxhist9(1, io_form_setting) ; call sokay( 'auxhist9', io_form_setting )
298 CALL nl_get_io_form_auxhist10(1, io_form_setting) ; call sokay( 'auxhist10', io_form_setting )
299 CALL nl_get_io_form_auxhist11(1, io_form_setting) ; call sokay( 'auxhist11', io_form_setting )
301 n_groups = n_groups_arg
302 IF ( n_groups .LT. 1 ) n_groups = 1
304 compute_node = .TRUE.
307 ! nio is number of io tasks per group. If there arent enough tasks to satisfy
308 ! the requirement that there be at least as many compute tasks as io tasks in
309 ! each group, then just print a warning and dump out of quilting
312 nio = nio_tasks_per_group
313 ncompute_tasks = ntasks - (nio * n_groups)
314 IF ( ncompute_tasks .LT. nio ) THEN
315 WRITE(mess,'("Not enough tasks to have ",I3," groups of ",I3," I/O tasks. No quilting.")')n_groups,nio
317 ncompute_tasks = ntasks
319 WRITE(mess,'("Quilting with ",I3," groups of ",I3," I/O tasks.")')n_groups,nio
321 CALL wrf_message(mess)
323 IF ( nio .LT. 0 ) THEN
326 IF ( nio .EQ. 0 ) THEN
327 quilting_enabled = .FALSE.
328 mpi_comm_local = mpi_comm_wrld
329 mpi_comm_io_groups = mpi_comm_wrld
332 quilting_enabled = .TRUE.
334 ! First construct the local communicators
335 ! prepare to split the communicator by designating compute-only tasks
336 DO i = 1, ncompute_tasks
340 ! and designating the groups of i/o tasks
341 DO i = ncompute_tasks+1, ntasks, nio
347 CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
348 CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_local,ierr)
350 ! Now construct the communicators for the io_groups
351 CALL nl_get_reorder_mesh(1,reorder_mesh)
352 IF ( reorder_mesh ) THEN
353 reorder_mesh = .FALSE.
354 CALL nl_set_reorder_mesh(1,reorder_mesh)
355 CALL wrf_message('Warning: reorder_mesh does not work with quilting. Disabled reorder_mesh.')
357 ! assign the compute tasks to the i/o tasks in full rows
358 IF ( nproc_x .NE. -1 .AND. nproc_y .NE. -1 ) THEN
362 CALL compute_mesh( ncompute_tasks, nprocx, nprocy )
365 nio = min(nio,nprocy)
366 m = mod(nprocy,nio) ! divide up remainder, 1 row per, until gone
369 DO k = 1,nprocy/nio+min(m,1)
378 ! ... and add the io servers as the last task in each group
380 ! TBH: each I/O group will contain only one I/O server
381 DO i = ncompute_tasks+1,ntasks
382 icolor(i) = MPI_UNDEFINED
385 DO i = ncompute_tasks+(j-1)*nio+1,ncompute_tasks+j*nio
389 CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
390 CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask, &
391 mpi_comm_io_groups(j),ierr)
395 if(poll_servers) then
397 call wrf_message('Warning: server polling does not work with pnetcdf_quilt. Disabled poll_servers.')
400 if(nio_groups==1) then
402 call wrf_message('Server polling is does not work with one io group. Disabled poll_servers.')
408 if(poll_servers) then
409 ! If server polling is enabled, we need to create mpi_comm_avail,
410 ! which contains the monitor process, and the I/O server master process
411 ! for each I/O server group. This will be used in the routines
412 ! wrf_quilt_find_server and wrf_quilt_server_ready to find inactive
413 ! I/O servers for new data handles in get_server_id.
415 ! The "in_avail" is set to true iff I am in the mpi_comm_avail.
417 call mpi_comm_rank(mpi_comm_wrld,me,ierr)
422 if(wrf_dm_on_monitor()) then
423 in_avail=.true. ! monitor process is in mpi_comm_avail
428 i=ncompute_tasks+j*nio-1
430 in_avail=.true. ! I/O server masters are in mpi_comm_avail
435 CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
436 CALL MPI_Comm_split(comdup,icolor(me+1),me, &
439 availrank=MPI_UNDEFINED
441 call mpi_comm_rank(mpi_comm_avail,availrank,ierr)
446 compute_group_master = .FALSE.
447 compute_node = .FALSE.
451 IF ( mytask .LT. ncompute_tasks .OR. & ! I am a compute task
452 (ncompute_tasks+(j-1)*nio .LE. mytask .AND. mytask .LT. ncompute_tasks+j*nio) & ! I am the I/O server for this group
455 CALL MPI_Comm_Size( mpi_comm_io_groups(j) , iisize, ierr )
456 ! Get the rank of this compute task in the compute+io
457 ! communicator to which it belongs
458 CALL MPI_Comm_Rank( mpi_comm_io_groups(j) , me , ierr )
460 ! If I am an I/O server for this group then make that group's
461 ! communicator the first element in the mpi_comm_io_groups array
462 ! (I will ignore all of the other elements).
463 IF (ncompute_tasks+(j-1)*nio .LE. mytask .AND. mytask .LT. ncompute_tasks+j*nio) THEN
464 mpi_comm_io_groups(1) = mpi_comm_io_groups(j)
466 compute_node = .TRUE.
467 ! If I am a compute task, check whether I am the member of my
468 ! group that will communicate things that should be sent just
469 ! once (e.g. commands) to the IO server of my group.
470 compute_group_master(j) = (me .EQ. 0)
472 ! IF( compute_group_master(j) ) WRITE(*,*) mytask,': ARPDBG : I will talk to IO server in group ',j
477 END SUBROUTINE setup_quilt_servers
479 SUBROUTINE sokay ( stream, io_form )
480 USE module_state_description
485 SELECT CASE (io_form)
509 WRITE(mess,*)' An output format has been specified that is incompatible with quilting: io_form: ',io_form,' ',TRIM(stream)
510 CALL wrf_error_fatal(mess)
516 ! I/O server tasks call this routine and remain in it for the rest of the
517 ! model run. I/O servers receive I/O requests from compute tasks and
518 ! perform requested I/O operations by calling package-dependent WRF-specific
519 ! I/O interfaces. Requests are sent in the form of "data headers". Each
520 ! request has a unique "header" message associated with it. For requests that
521 ! contain large amounts of data, the data is appended to the header. See
522 ! file module_internal_header_util.F for detailed descriptions of all
525 ! We wish to be able to link to different packages depending on whether
526 ! the I/O is restart, initial, history, or boundary.
528 USE module_state_description
529 USE module_quilt_outbuf_ops
530 USE module_configure, only : grid_config_rec_type, model_config_rec, model_to_grid_config_rec
533 #include "intio_tags.h"
534 #include "wrf_io_flags.h"
535 TYPE (grid_config_rec_type) :: config_flags
536 INTEGER itag, ninbuf, ntasks_io_group, ntasks_local_group, mytask_local, ierr
538 INTEGER mytask_io_group
539 INTEGER :: nout_set = 0
540 INTEGER :: obufsize, bigbufsize, chunksize, sz
541 REAL, DIMENSION(1) :: dummy
542 INTEGER, ALLOCATABLE, DIMENSION(:) :: obuf, bigbuf
543 REAL, ALLOCATABLE, DIMENSION(:) :: RDATA
544 INTEGER, ALLOCATABLE, DIMENSION(:) :: IDATA
545 CHARACTER (LEN=512) :: CDATA
546 CHARACTER (LEN=80) :: fname
547 INTEGER icurs, hdrbufsize, itypesize, ftypesize, rtypesize, Status, fstat, io_form_arg
548 INTEGER :: DataHandle, FieldType, Comm, IOComm, DomainDesc, code, Count
549 INTEGER, DIMENSION(3) :: DomainStart , DomainEnd , MemoryStart , MemoryEnd , PatchStart , PatchEnd
550 INTEGER :: dummybuf(1)
551 INTEGER :: num_noops, num_commit_messages, num_field_training_msgs, hdr_tag
552 CHARACTER (len=256) :: DateStr , Element, VarName, MemoryOrder , Stagger , DimNames(3), FileName, SysDepInfo, mess
553 INTEGER, EXTERNAL :: use_package
554 LOGICAL :: stored_write_record, retval
555 INTEGER iii, jjj, vid, CC, DD, dom_id
556 LOGICAL :: call_server_ready
559 character*120 sysline
561 dom_id = 1 ! always a valid assumption for domain id for this netcdf setting
562 CALL model_to_grid_config_rec ( dom_id , model_config_rec , config_flags )
564 ! If we've been built with PNETCDF_QUILT defined then we use parallel I/O
565 ! within the group of I/O servers rather than gathering the data onto the
566 ! root I/O server. Unfortunately, this approach means that we can no-longer
567 ! select different I/O layers for use with quilting at run time. ARPDBG.
568 ! This code is sufficiently different that it is kept in the separate
569 ! quilt_pnc() routine.
575 ! Call ext_pkg_ioinit() routines to initialize I/O packages.
578 if ( config_flags%use_netcdf_classic ) SysDepInfo="use_netcdf_classic"
579 CALL ext_ncd_ioinit( SysDepInfo, ierr )
583 CALL ext_int_ioinit( SysDepInfo, ierr )
586 CALL ext_xxx_ioinit( SysDepInfo, ierr)
589 CALL ext_yyy_ioinit( SysDepInfo, ierr)
592 CALL ext_zzz_ioinit( SysDepInfo, ierr)
595 CALL ext_gr1_ioinit( SysDepInfo, ierr)
598 CALL ext_gr2_ioinit( SysDepInfo, ierr)
601 call_server_ready = .true. ! = true when the server is ready for a new file
603 okay_to_commit = .false.
604 stored_write_record = .false.
606 ! get info. about the I/O server group that this I/O server task
608 ! Last task in this I/O server group is the I/O server "root"
609 ! The I/O server "root" actually writes data to disk
610 ! TBH: WARNING: This is also implicit in the call to collect_on_comm().
611 CALL mpi_x_comm_size( mpi_comm_io_groups(1), ntasks_io_group, ierr )
612 CALL MPI_COMM_RANK( mpi_comm_io_groups(1), mytask_io_group, ierr )
613 CALL mpi_x_comm_size( mpi_comm_local, ntasks_local_group, ierr )
614 CALL MPI_COMM_RANK( mpi_comm_local, mytask_local, ierr )
616 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
617 IF ( itypesize <= 0 ) THEN
618 CALL wrf_error_fatal("external/RSL/module_dm.F: quilt: type size <= 0 invalid")
621 ! Work out whether this i/o server processor has one fewer associated compute proc than
622 ! the most any processor has. Can happen when number of i/o tasks does not evenly divide
623 ! the number of compute tasks. This is needed to keep the i/o tasks sychronized on the
624 ! same message when they start commmunicating to stitch together an output.
626 ! Compute processes associated with this task:
627 CC = ntasks_io_group - 1
628 ! Number of compute tasks per I/O task (less remainder)
629 DD = ncompute_tasks / ntasks_local_group
631 ! If CC-DD is 1 on servrs with the maximum number of compute clients,
632 ! 0 on servrs with one less than maximum
635 ! infinite loop until shutdown message received
636 ! This is the main request-handling loop. I/O quilt servers stay in this loop
637 ! until the model run ends.
639 DO WHILE (.TRUE.) ! {
642 ! Each I/O server receives requests from its compute tasks. Each request
643 ! is contained in a data header (see module_internal_header_util.F for
644 ! detailed descriptions of data headers).
645 ! Each request is sent in two phases. First, sizes of all messages that
646 ! will be sent from the compute tasks to this I/O server are summed on the
647 ! I/O server via MPI_reduce(). The I/O server then allocates buffer "obuf"
648 ! and receives concatenated messages from the compute tasks in it via the
649 ! call to collect_on_comm(). Note that "sizes" are generally expressed in
650 ! *bytes* in this code so conversion to "count" (number of Fortran words) is
651 ! required for Fortran indexing and MPI calls.
654 if(poll_servers .and. call_server_ready) then
655 call_server_ready=.false.
656 ! Send a message to the monitor telling it we're ready
657 ! for a new data handle.
658 call wrf_quilt_server_ready()
661 ! wait for info from compute tasks in the I/O group that we're ready to rock
662 ! obufsize will contain number of *bytes*
664 ! first element of reduced is obufsize, second is DataHandle
665 ! if needed (currently needed only for ioclose).
667 CALL mpi_x_reduce( reduced_dummy, reduced, 2, MPI_INTEGER, MPI_SUM, mytask_io_group, mpi_comm_io_groups(1), ierr )
668 obufsize = reduced(1)
669 !CALL end_timing("MPI_Reduce at top of forever loop")
670 !JMDEBUGwrite(0,*)'obufsize = ',obufsize
671 ! Negative obufsize will trigger I/O server exit.
672 IF ( obufsize .LT. 0 ) THEN
673 IF ( obufsize .EQ. -100 ) THEN ! magic number
675 CALL ext_ncd_ioexit( Status )
678 CALL ext_int_ioexit( Status )
681 CALL ext_xxx_ioexit( Status )
684 CALL ext_yyy_ioexit( Status )
687 CALL ext_zzz_ioexit( Status )
690 CALL ext_gr1_ioexit( Status )
693 CALL ext_gr2_ioexit( Status )
695 CALL wrf_message ( 'I/O QUILT SERVERS DONE' )
701 CALL mpi_finalize(ierr)
707 WRITE(mess,*)'Possible 32-bit overflow on output server. Try larger nio_tasks_per_group in namelist.'
708 CALL wrf_error_fatal(mess)
712 ! CALL start_timing()
713 ! Obufsize of zero signals a close
715 ! Allocate buffer obuf to be big enough for the data the compute tasks
716 ! will send. Note: obuf is size in *bytes* so we need to pare this
717 ! down, since the buffer is INTEGER.
718 IF ( obufsize .GT. 0 ) THEN
719 ALLOCATE( obuf( (obufsize+1)/itypesize ) )
721 ! let's roll; get the data from the compute procs and put in obuf
722 CALL collect_on_comm_debug(__FILE__,__LINE__, mpi_comm_io_groups(1), &
726 ! CALL end_timing( "quilt on server: collecting data from compute procs" )
728 ! Necessarily, the compute processes send the ioclose signal,
729 ! if there is one, after the iosync, which means they
730 ! will stall on the ioclose message waiting for the quilt
731 ! processes if we handle the way other messages are collected,
732 ! using collect_on_comm. This avoids this, but we need
733 ! a special signal (obufsize zero) and the DataHandle
734 ! to be closed. That handle is send as the second
735 ! word of the io_close message received by the MPI_Reduce above.
736 ! Then a header representing the ioclose message is constructed
737 ! here and handled below as if it were received from the
738 ! compute processes. The clients (compute processes) must be
739 ! careful to send this correctly (one compule process sends the actual
740 ! handle and everone else sends a zero, so the result sums to
741 ! the value of the handle).
743 ALLOCATE( obuf( 4096 ) )
744 ! DataHandle is provided as second element of reduced
745 CALL int_gen_handle_header( obuf, obufsize, itypesize, &
746 reduced(2) , int_ioclose )
748 if(poll_servers) then
749 ! Once we're done closing, we need to tell the master
750 ! process that we're ready for more data.
751 call_server_ready=.true.
755 !write(0,*)'calling init_store_piece_of_field'
756 ! Now all messages received from the compute clients are stored in
757 ! obuf. Scan through obuf and extract headers and field data and store in
758 ! internal buffers. The scan is done twice, first to determine sizes of
759 ! internal buffers required for storage of headers and fields and second to
760 ! actually store the headers and fields. This bit of code does not do the
761 ! "quilting" (assembly of patches into full domains). For each field, it
762 ! simply concatenates all received patches for the field into a separate
763 ! internal buffer (i.e. one buffer per field). Quilting is done later by
764 ! routine store_patch_in_outbuf().
765 CALL init_store_piece_of_field
766 CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
767 !write(0,*)'mpi_type_size returns ', itypesize
768 ! Scan obuf the first time to calculate the size of the buffer required for
769 ! each field. Calls to add_to_bufsize_for_field() accumulate sizes.
773 num_commit_messages = 0
774 num_field_training_msgs = 0
775 DO WHILE ( icurs .lt. obufsize ) ! {
776 hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
777 SELECT CASE ( hdr_tag )
779 CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
780 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
781 DomainDesc , MemoryOrder , Stagger , DimNames , &
782 DomainStart , DomainEnd , &
783 MemoryStart , MemoryEnd , &
784 PatchStart , PatchEnd )
785 chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
786 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
788 IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
789 IF ( num_field_training_msgs .EQ. 0 ) THEN
790 call add_to_bufsize_for_field( VarName, hdrbufsize )
791 !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
793 num_field_training_msgs = num_field_training_msgs + 1
795 call add_to_bufsize_for_field( VarName, hdrbufsize )
796 !write(0,*) 'X-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
798 icurs = icurs + hdrbufsize
800 !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
802 ! If this is a real write (i.e. not a training write), accumulate
803 ! buffersize for this field.
804 IF ( DomainDesc .NE. 333933 ) THEN ! magic number
805 !write(0,*) 'X-1a', chunksize, TRIM(VarName)
806 call add_to_bufsize_for_field( VarName, chunksize )
807 icurs = icurs + chunksize
809 CASE ( int_open_for_write_commit ) ! only one per group of tasks
810 hdrbufsize = obuf(icurs/itypesize)
811 IF (num_commit_messages.EQ.0) THEN
812 call add_to_bufsize_for_field( 'COMMIT', hdrbufsize )
814 num_commit_messages = num_commit_messages + 1
815 icurs = icurs + hdrbufsize
817 hdrbufsize = obuf(icurs/itypesize)
819 ! This logic and the logic in the loop below is used to determine whether
820 ! to send a noop records sent by the compute processes to allow to go
821 ! through. The purpose is to make sure that the communications between this
822 ! server and the other servers in this quilt group stay synchronized in
823 ! the collection loop below, even when the servers are serving different
824 ! numbers of clients. Here are some conditions:
826 ! 1. The number of compute clients served will not differ by more than 1
827 ! 2. The servers with +1 number of compute clients begin with task 0
828 ! of mpi_comm_local, the commicator shared by this group of servers
830 ! 3. For each collective field or metadata output from the compute tasks,
831 ! there will be one record sent to the associated i/o server task. The
832 ! i/o server task collects these records and stores them contiguously
833 ! in a buffer (obuf) using collect_on_comm above. Thus, obuf on this
834 ! server task will contain one record from each associated compute
837 ! 4. In the case of replicated output from the compute tasks
838 ! (e.g. put_dom_ti records and control records like
839 ! open_for_write_commit type records), compute task 0 is the only
840 ! one that sends the record. The other compute tasks send noop
841 ! records. Thus, obuf on server task zero will contain the output
842 ! record from task 0 followed by noop records from the rest of the
843 ! compute tasks associated with task 0. Obuf on the other server
844 ! tasks will contain nothing but noop records.
846 ! 5. The logic below will not allow any noop records from server task 0.
847 ! It allows only one noop record from each of the other server tasks
848 ! in the i/o group. This way, for replicated output, when the records
849 ! are collected on one server task below, using collect_on_comm on
850 ! mpi_comm_local, each task will provide exactly one record for each
851 ! call to collect_on_comm: 1 bona fide output record from server task
852 ! 0 and noops from the rest.
854 IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0) &
855 .OR.hdr_tag.NE.int_noop) THEN
856 write(VarName,'(I5.5)')vid
857 !write(0,*) 'X-2', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
858 call add_to_bufsize_for_field( VarName, hdrbufsize )
861 IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
862 icurs = icurs + hdrbufsize
865 ! Store the headers and field data in internal buffers. The first call to
866 ! store_piece_of_field() allocates internal buffers using sizes computed by
867 ! calls to add_to_bufsize_for_field().
871 num_commit_messages = 0
872 num_field_training_msgs = 0
873 DO WHILE ( icurs .lt. obufsize ) !{
874 !write(0,*) 'A icurs ', icurs, ' obufsize ', obufsize
875 hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
876 SELECT CASE ( hdr_tag )
878 CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
879 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
880 DomainDesc , MemoryOrder , Stagger , DimNames , &
881 DomainStart , DomainEnd , &
882 MemoryStart , MemoryEnd , &
883 PatchStart , PatchEnd )
884 chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
885 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
887 IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
888 IF ( num_field_training_msgs .EQ. 0 ) THEN
889 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
890 !write(0,*) 'A-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
892 num_field_training_msgs = num_field_training_msgs + 1
894 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
895 !write(0,*) 'A-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
897 icurs = icurs + hdrbufsize
898 ! If this is a real write (i.e. not a training write), store
899 ! this piece of this field.
900 IF ( DomainDesc .NE. 333933 ) THEN ! magic number
901 !write(0,*) 'A-1a', chunksize, TRIM(VarName),PatchStart(1:3),PatchEnd(1:3)
902 call store_piece_of_field( obuf(icurs/itypesize), VarName, chunksize )
903 icurs = icurs + chunksize
905 CASE ( int_open_for_write_commit ) ! only one per group of tasks
906 hdrbufsize = obuf(icurs/itypesize)
907 IF (num_commit_messages.EQ.0) THEN
908 call store_piece_of_field( obuf(icurs/itypesize), 'COMMIT', hdrbufsize )
910 num_commit_messages = num_commit_messages + 1
911 icurs = icurs + hdrbufsize
913 hdrbufsize = obuf(icurs/itypesize)
914 IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0) &
915 .OR.hdr_tag.NE.int_noop) THEN
916 write(VarName,'(I5.5)')vid
917 !write(0,*) 'A-2b', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
918 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
921 IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
922 icurs = icurs + hdrbufsize
926 ! Now, for each field, retrieve headers and patches (data) from the internal
927 ! buffers and collect them all on the I/O quilt server "root" task.
928 CALL init_retrieve_pieces_of_field
929 ! Retrieve header and all patches for the first field from the internal
931 CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
932 ! Sum sizes of all headers and patches (data) for this field from all I/O
933 ! servers in this I/O server group onto the I/O server "root".
934 CALL mpi_x_reduce( sz, bigbufsize, 1, MPI_INTEGER, MPI_SUM, ntasks_local_group-1, mpi_comm_local, ierr )
935 !write(0,*)'seed: sz ',sz,' bigbufsize ',bigbufsize,' VarName ', TRIM(VarName),' retval ',retval
937 ! Loop until there are no more fields to retrieve from the internal buffers.
938 DO WHILE ( retval ) !{
942 ! I/O server "root" allocates space to collect headers and fields from all
943 ! other servers in this I/O server group.
944 IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
945 ALLOCATE( bigbuf( (bigbufsize+1)/itypesize ) )
947 ALLOCATE( bigbuf(1) )
950 ! Collect buffers and fields from all I/O servers in this I/O server group
951 ! onto the I/O server "root"
952 CALL collect_on_comm_debug2(__FILE__,__LINE__,Trim(VarName), &
953 get_hdr_tag(obuf),sz,get_hdr_rec_size(obuf), &
958 ! The I/O server "root" now handles collected requests from all compute
959 ! tasks served by this I/O server group (i.e. all compute tasks).
960 IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
962 !do iii = 1, ntasks_local_group
963 ! write(0,*)'i,j,tag,size ', iii, jjj, get_hdr_tag(bigbuf(jjj/4)),get_hdr_rec_size(bigbuf(jjj/4))
964 ! jjj = jjj + get_hdr_rec_size(bigbuf(jjj/4))
967 icurs = itypesize ! icurs is a byte counter, but buffer is integer
969 stored_write_record = .false.
971 ! The I/O server "root" loops over the collected requests.
972 DO WHILE ( icurs .lt. bigbufsize ) !{
973 CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
975 !write(0,*)'B tag,size ',icurs,get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
976 ! The I/O server "root" gets the request out of the next header and
977 ! handles it by, in most cases, calling the appropriate external I/O package
979 SELECT CASE ( get_hdr_tag( bigbuf(icurs/itypesize) ) )
980 ! The I/O server "root" handles the "noop" (do nothing) request. This is
981 ! actually quite easy. "Noop" requests exist to help avoid race conditions.
982 ! In some cases, only one compute task will everything about a request so
983 ! other compute tasks send "noop" requests.
985 CALL int_get_noop_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize )
986 icurs = icurs + hdrbufsize
988 ! The I/O server "root" handles the "put_dom_td_real" request.
989 CASE ( int_dom_td_real )
990 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
991 ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
992 CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
993 DataHandle, DateStr, Element, RData, Count, code )
994 icurs = icurs + hdrbufsize
996 SELECT CASE (use_package(io_form(DataHandle)))
999 CALL ext_ncd_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1003 CALL ext_int_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1007 CALL ext_yyy_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1011 CALL ext_gr1_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1015 CALL ext_gr2_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1022 ! The I/O server "root" handles the "put_dom_ti_real" request.
1023 CASE ( int_dom_ti_real )
1024 !write(0,*)' int_dom_ti_real '
1025 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
1026 ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
1027 CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
1028 DataHandle, Element, RData, Count, code )
1029 icurs = icurs + hdrbufsize
1031 SELECT CASE (use_package(io_form(DataHandle)))
1034 CALL ext_ncd_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1035 !write(0,*)'ext_ncd_put_dom_ti_real ',handle(DataHandle),TRIM(Element),RData,Status
1039 CALL ext_int_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1043 CALL ext_yyy_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1047 CALL ext_gr1_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1051 CALL ext_gr2_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1059 ! The I/O server "root" handles the "put_dom_td_integer" request.
1060 CASE ( int_dom_td_integer )
1061 !write(0,*)' int_dom_td_integer '
1062 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1063 ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
1064 CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
1065 DataHandle, DateStr, Element, IData, Count, code )
1066 icurs = icurs + hdrbufsize
1068 SELECT CASE (use_package(io_form(DataHandle)))
1071 CALL ext_ncd_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1075 CALL ext_int_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1079 CALL ext_yyy_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1083 CALL ext_gr1_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1087 CALL ext_gr2_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1095 ! The I/O server "root" handles the "put_dom_ti_integer" request.
1096 CASE ( int_dom_ti_integer )
1097 !write(0,*)' int_dom_ti_integer '
1099 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1100 ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
1101 CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
1102 DataHandle, Element, IData, Count, code )
1103 icurs = icurs + hdrbufsize
1104 SELECT CASE (use_package(io_form(DataHandle)))
1107 CALL ext_ncd_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1108 !write(0,*)'ext_ncd_put_dom_ti_integer ',handle(DataHandle),TRIM(Element),IData,Status
1112 CALL ext_int_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1116 CALL ext_yyy_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1120 CALL ext_gr1_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1124 CALL ext_gr2_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1133 ! The I/O server "root" handles the "set_time" request.
1134 CASE ( int_set_time )
1135 !write(0,*)' int_set_time '
1136 CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1137 DataHandle, Element, VarName, CData, code )
1138 SELECT CASE (use_package(io_form(DataHandle)))
1141 CALL ext_int_set_time ( handle(DataHandle), TRIM(CData), Status)
1147 icurs = icurs + hdrbufsize
1149 ! The I/O server "root" handles the "put_dom_ti_char" request.
1150 CASE ( int_dom_ti_char )
1151 !write(0,*)' before int_get_ti_header_char '
1152 CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1153 DataHandle, Element, VarName, CData, code )
1154 !write(0,*)' after int_get_ti_header_char ',VarName
1156 SELECT CASE (use_package(io_form(DataHandle)))
1159 CALL ext_ncd_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1163 CALL ext_int_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1167 CALL ext_yyy_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1171 CALL ext_gr1_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1175 CALL ext_gr2_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1181 icurs = icurs + hdrbufsize
1183 ! The I/O server "root" handles the "put_var_ti_char" request.
1184 CASE ( int_var_ti_char )
1185 !write(0,*)' int_var_ti_char '
1186 CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1187 DataHandle, Element, VarName, CData, code )
1189 SELECT CASE (use_package(io_form(DataHandle)))
1192 CALL ext_ncd_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1196 CALL ext_int_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1200 CALL ext_yyy_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1204 CALL ext_gr1_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1208 CALL ext_gr2_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1214 icurs = icurs + hdrbufsize
1217 ! ioexit is now handled by sending negative message length to server
1218 CALL wrf_error_fatal( &
1219 "quilt: should have handled int_ioexit already")
1220 ! The I/O server "root" handles the "ioclose" request.
1221 CASE ( int_ioclose )
1222 CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1224 icurs = icurs + hdrbufsize
1226 IF ( DataHandle .GE. 1 ) THEN
1228 SELECT CASE (use_package(io_form(DataHandle)))
1231 CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
1232 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1233 CALL ext_ncd_ioclose(handle(DataHandle),Status)
1238 CALL ext_pnc_inquire_filename( handle(DataHandle), fname, fstat, Status )
1239 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1240 CALL ext_pnc_ioclose(handle(DataHandle),Status)
1245 CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
1246 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1247 CALL ext_int_ioclose(handle(DataHandle),Status)
1252 CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
1253 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1254 CALL ext_yyy_ioclose(handle(DataHandle),Status)
1259 CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
1260 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1261 CALL ext_gr1_ioclose(handle(DataHandle),Status)
1266 CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
1267 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1268 CALL ext_gr2_ioclose(handle(DataHandle),Status)
1276 ! If desired, outputs a ready flag after quilting subroutine closes the data handle for history (wrfout) file.
1278 IF (fname(1:6) .EQ. 'wrfout' .AND. config_flags%output_ready_flag ) THEN
1279 OPEN (unit=99,file='wrfoutReady' // fname(7:30), status='unknown', access='sequential')
1283 ! The I/O server "root" handles the "open_for_write_begin" request.
1284 CASE ( int_open_for_write_begin )
1286 CALL int_get_ofwb_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1287 FileName,SysDepInfo,io_form_arg,DataHandle )
1289 !write(0,*)' int_open_for_write_begin itypesize ',itypesize,' itypesize ',itypesize
1290 !write(0,*)' int_open_for_write_begin icurs ', icurs, hdrbufsize
1291 !JMDEBUGwrite(0,*)' int_open_for_write_begin FileName ',TRIM(FileName) , ' DataHandle ', DataHandle
1292 !write(0,*)' int_open_for_write_begin SysDepInfo ',TRIM(SysDepInfo)
1293 icurs = icurs + hdrbufsize
1294 !write(0,*)' int_open_for_write_begin new icurs,tag,size ', icurs, get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
1296 io_form(DataHandle) = io_form_arg
1298 SELECT CASE (use_package(io_form(DataHandle)))
1301 CALL ext_ncd_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1302 !write(0,*)'ext_ncd_open_for_write_begin ',Trim(FileName),DataHandle,handle(DataHandle),Status
1306 CALL ext_int_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1310 CALL ext_yyy_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1314 CALL ext_gr1_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1318 CALL ext_gr2_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1324 okay_to_write(DataHandle) = .false.
1326 ! The I/O server "root" handles the "open_for_write_commit" request.
1327 ! In this case, the "okay_to_commit" is simply set to .true. so "write_field"
1328 ! requests will initiate writes to disk. Actual commit will be done after
1329 ! all requests in this batch have been handled.
1330 CASE ( int_open_for_write_commit )
1332 CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1334 icurs = icurs + hdrbufsize
1335 okay_to_commit(DataHandle) = .true.
1337 ! The I/O server "root" handles the "write_field" (int_field) request.
1338 ! If okay_to_write(DataHandle) is .true. then the patch in the
1339 ! header (bigbuf) is written to a globally-sized internal output buffer via
1340 ! the call to store_patch_in_outbuf(). Note that this is where the actual
1341 ! "quilting" (reassembly of patches onto a full-size domain) is done. If
1342 ! okay_to_write(DataHandle) is .false. then external I/O package interfaces
1343 ! are called to write metadata for I/O formats that support native metadata.
1345 ! NOTE that the I/O server "root" will only see write_field (int_field)
1346 ! requests AFTER an "iosync" request.
1348 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1349 CALL int_get_write_field_header ( bigbuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
1350 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
1351 DomainDesc , MemoryOrder , Stagger , DimNames , &
1352 DomainStart , DomainEnd , &
1353 MemoryStart , MemoryEnd , &
1354 PatchStart , PatchEnd )
1355 !write(0,*)' int_field ',TRIM(VarName),DataHandle,okay_to_write(DataHandle)
1356 icurs = icurs + hdrbufsize
1358 IF ( okay_to_write(DataHandle) ) THEN
1360 ! WRITE(0,*)'>>> ',TRIM(DateStr), ' ', TRIM(VarName), ' ', TRIM(MemoryOrder), ' ', &
1361 ! (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)*(PatchEnd(3)-PatchStart(3)+1)
1363 IF ( FieldType .EQ. WRF_FLOAT .OR. FieldType .EQ. WRF_DOUBLE) THEN
1364 ! Note that the WRF_DOUBLE branch of this IF statement must come first since
1365 ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds.
1366 IF ( FieldType .EQ. WRF_DOUBLE) THEN
1367 ! this branch has not been tested TBH: 20050406
1368 CALL mpi_type_size( MPI_DOUBLE_PRECISION, ftypesize, ierr )
1370 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
1372 stored_write_record = .true.
1373 CALL store_patch_in_outbuf ( bigbuf(icurs/itypesize), dummybuf, TRIM(DateStr), TRIM(VarName) , &
1374 FieldType, TRIM(MemoryOrder), TRIM(Stagger), DimNames, &
1375 DomainStart , DomainEnd , &
1376 MemoryStart , MemoryEnd , &
1377 PatchStart , PatchEnd )
1379 ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
1380 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1381 stored_write_record = .true.
1382 CALL store_patch_in_outbuf ( dummybuf, bigbuf(icurs/itypesize), TRIM(DateStr), TRIM(VarName) , &
1383 FieldType, TRIM(MemoryOrder), TRIM(Stagger), DimNames, &
1384 DomainStart , DomainEnd , &
1385 MemoryStart , MemoryEnd , &
1386 PatchStart , PatchEnd )
1387 ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
1388 ftypesize = LWORDSIZE
1390 icurs = icurs + (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
1391 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
1393 SELECT CASE (use_package(io_form(DataHandle)))
1396 CALL ext_ncd_write_field ( handle(DataHandle) , TRIM(DateStr) , &
1397 TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
1398 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
1399 DomainStart , DomainEnd , &
1400 DomainStart , DomainEnd , &
1401 DomainStart , DomainEnd , &
1405 ! since this is training and the grib output doesn't need training, disable this branch.
1408 CALL ext_YYY_write_field ( handle(DataHandle) , TRIM(DateStr) , &
1409 TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
1410 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
1411 DomainStart , DomainEnd , &
1412 DomainStart , DomainEnd , &
1413 DomainStart , DomainEnd , &
1422 CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1424 icurs = icurs + hdrbufsize
1426 WRITE(mess,*)'quilt: bad tag: ',get_hdr_tag( bigbuf(icurs/itypesize) ),' icurs ',icurs/itypesize
1427 CALL wrf_error_fatal( mess )
1431 ! Now, the I/O server "root" has finshed handling all commands from the latest
1432 ! call to retrieve_pieces_of_field().
1434 IF (stored_write_record) THEN
1435 ! If any fields have been stored in a globally-sized internal output buffer
1436 ! (via a call to store_patch_in_outbuf()) then call write_outbuf() to write
1438 ! NOTE that the I/O server "root" will only have called
1439 ! store_patch_in_outbuf() when handling write_field (int_field)
1440 ! commands which only arrive AFTER an "iosync" command.
1442 CALL write_outbuf ( handle(DataHandle), use_package(io_form(DataHandle)))
1443 ! CALL end_timing( "quilt: call to write_outbuf" )
1446 ! If one or more "open_for_write_commit" commands were encountered from the
1447 ! latest call to retrieve_pieces_of_field() then call the package-specific
1448 ! routine to do the commit.
1449 IF (okay_to_commit(DataHandle)) THEN
1451 SELECT CASE (use_package(io_form(DataHandle)))
1454 CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
1455 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1456 CALL ext_ncd_open_for_write_commit(handle(DataHandle),Status)
1457 okay_to_write(DataHandle) = .true.
1462 CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
1463 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1464 CALL ext_int_open_for_write_commit(handle(DataHandle),Status)
1465 okay_to_write(DataHandle) = .true.
1470 CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
1471 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1472 CALL ext_yyy_open_for_write_commit(handle(DataHandle),Status)
1473 okay_to_write(DataHandle) = .true.
1478 CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
1479 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1480 CALL ext_gr1_open_for_write_commit(handle(DataHandle),Status)
1481 okay_to_write(DataHandle) = .true.
1486 CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
1487 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1488 CALL ext_gr2_open_for_write_commit(handle(DataHandle),Status)
1489 okay_to_write(DataHandle) = .true.
1497 okay_to_commit(DataHandle) = .false.
1499 DEALLOCATE( bigbuf )
1502 if(allocated(bigbuf)) deallocate(bigbuf)
1503 ! Retrieve header and all patches for the next field from the internal
1505 CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
1506 ! Sum sizes of all headers and patches (data) for this field from all I/O
1507 ! servers in this I/O server group onto the I/O server "root".
1508 CALL mpi_x_reduce( sz, bigbufsize, 1, MPI_INTEGER,MPI_SUM, ntasks_local_group-1,mpi_comm_local, ierr )
1509 ! Then, return to the top of the loop to collect headers and data from all
1510 ! I/O servers in this I/O server group onto the I/O server "root" and handle
1511 ! the next batch of commands.
1516 ! flush output files if needed
1517 IF (stored_write_record) THEN
1518 ! CALL start_timing()
1519 SELECT CASE ( use_package(io_form) )
1522 CALL ext_ncd_iosync( handle(DataHandle), Status )
1526 CALL ext_xxx_iosync( handle(DataHandle), Status )
1530 CALL ext_yyy_iosync( handle(DataHandle), Status )
1534 CALL ext_zzz_iosync( handle(DataHandle), Status )
1538 CALL ext_gr1_iosync( handle(DataHandle), Status )
1542 CALL ext_gr2_iosync( handle(DataHandle), Status )
1546 CALL ext_int_iosync( handle(DataHandle), Status )
1551 !CALL end_timing( "quilt: flush" )
1556 END SUBROUTINE quilt
1558 SUBROUTINE quilt_pnc
1560 ! Same as quilt() routine except that _all_ of the IO servers that call it
1561 ! actually write data to disk using pNetCDF. This version is only used when
1562 ! the code is compiled with PNETCDF_QUILT defined.
1564 USE module_state_description
1565 USE module_quilt_outbuf_ops
1568 #include "intio_tags.h"
1569 #include "wrf_io_flags.h"
1570 INTEGER itag, ninbuf, ntasks_io_group, ntasks_local_group, mytask_local, ierr
1572 INTEGER mytask_io_group
1573 INTEGER :: nout_set = 0
1574 INTEGER :: obufsize, bigbufsize, chunksize, sz
1575 REAL, DIMENSION(1) :: dummy
1576 INTEGER, ALLOCATABLE, DIMENSION(:) :: obuf, bigbuf
1577 REAL, ALLOCATABLE, DIMENSION(:) :: RDATA
1578 INTEGER, ALLOCATABLE, DIMENSION(:) :: IDATA
1579 CHARACTER (LEN=512) :: CDATA
1580 CHARACTER (LEN=80) :: fname
1581 INTEGER icurs, hdrbufsize, itypesize, ftypesize, rtypesize, Status, fstat, io_form_arg
1582 INTEGER :: DataHandle, FieldType, Comm, IOComm, DomainDesc, code, Count
1583 INTEGER, DIMENSION(3) :: DomainStart , DomainEnd , MemoryStart , MemoryEnd , PatchStart , PatchEnd
1584 INTEGER :: dummybuf(1)
1585 INTEGER :: num_noops, num_commit_messages, num_field_training_msgs, hdr_tag
1586 CHARACTER (len=256) :: DateStr , Element, VarName, MemoryOrder , Stagger , DimNames(3), FileName, SysDepInfo, mess
1587 INTEGER, EXTERNAL :: use_package
1588 LOGICAL :: stored_write_record, retval, written_record
1589 INTEGER iii, jjj, vid, CC, DD
1592 ! character*120 sysline
1594 ! Call ext_pkg_ioinit() routines to initialize I/O packages.
1597 CALL ext_ncd_ioinit( SysDepInfo, ierr)
1599 #ifdef PNETCDF_QUILT
1600 CALL ext_pnc_ioinit( SysDepInfo, ierr)
1603 CALL ext_int_ioinit( SysDepInfo, ierr )
1606 CALL ext_xxx_ioinit( SysDepInfo, ierr)
1609 CALL ext_yyy_ioinit( SysDepInfo, ierr)
1612 CALL ext_zzz_ioinit( SysDepInfo, ierr)
1615 CALL ext_gr1_ioinit( SysDepInfo, ierr)
1618 CALL ext_gr2_ioinit( SysDepInfo, ierr)
1621 okay_to_commit = .false.
1622 stored_write_record = .false.
1624 ! get info. about the I/O server group that this I/O server task
1626 CALL mpi_x_comm_size( mpi_comm_io_groups(1), ntasks_io_group, ierr )
1627 CALL MPI_COMM_RANK( mpi_comm_io_groups(1), mytask_io_group, ierr )
1628 CALL mpi_x_comm_size( mpi_comm_local, ntasks_local_group, ierr )
1629 CALL MPI_COMM_RANK( mpi_comm_local, mytask_local, ierr )
1631 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1632 IF ( itypesize <= 0 ) THEN
1633 CALL wrf_error_fatal("external/RSL/module_dm.F: quilt: type size <= 0 invalid")
1636 ! Work out whether this i/o server processor has one fewer associated compute proc than
1637 ! the most any processor has. Can happen when number of i/o tasks does not evenly divide
1638 ! the number of compute tasks. This is needed to keep the i/o tasks sychronized on the
1639 ! same message when they start commmunicating to stitch together an output.
1641 ! Compute processes associated with this task:
1642 CC = ntasks_io_group - 1
1643 ! Number of compute tasks per I/O task (less remainder)
1644 DD = ncompute_tasks / ntasks_local_group
1646 ! If CC-DD is 1 on servrs with the maximum number of compute clients,
1647 ! 0 on servrs with one less than maximum
1650 ! infinite loop until shutdown message received
1651 ! This is the main request-handling loop. I/O quilt servers stay in this loop
1652 ! until the model run ends.
1653 !okay_to_w = .false.
1654 DO WHILE (.TRUE.) ! {
1657 ! Each I/O server receives requests from its compute tasks. Each request
1658 ! is contained in a data header (see module_internal_header_util.F for
1659 ! detailed descriptions of data headers).
1660 ! Each request is sent in two phases. First, sizes of all messages that
1661 ! will be sent from the compute tasks to this I/O server are summed on the
1662 ! I/O server via MPI_reduce(). The I/O server then allocates buffer "obuf"
1663 ! and receives concatenated messages from the compute tasks in it via the
1664 ! call to collect_on_comm(). Note that "sizes" are generally expressed in
1665 ! *bytes* in this code so conversion to "count" (number of Fortran words) is
1666 ! required for Fortran indexing and MPI calls.
1668 ! wait for info from compute tasks in the I/O group that we're ready to rock
1669 ! obufsize will contain number of *bytes*
1671 ! first element of reduced is obufsize, second is DataHandle
1672 ! if needed (currently needed only for ioclose).
1674 CALL mpi_x_reduce( reduced_dummy, reduced, 2, MPI_INTEGER, MPI_SUM, mytask_io_group, mpi_comm_io_groups(1), ierr )
1675 obufsize = reduced(1)
1676 !CALL end_timing("MPI_Reduce at top of forever loop")
1677 !JMDEBUGwrite(0,*)'obufsize = ',obufsize
1678 ! Negative obufsize will trigger I/O server exit.
1679 IF ( obufsize .LT. 0 ) THEN
1680 IF ( obufsize .EQ. -100 ) THEN ! magic number
1682 CALL ext_ncd_ioexit( Status )
1684 #ifdef PNETCDF_QUILT
1685 CALL ext_pnc_ioexit( Status )
1688 CALL ext_int_ioexit( Status )
1691 CALL ext_xxx_ioexit( Status )
1694 CALL ext_yyy_ioexit( Status )
1697 CALL ext_zzz_ioexit( Status )
1700 CALL ext_gr1_ioexit( Status )
1703 CALL ext_gr2_ioexit( Status )
1705 CALL wrf_message ( 'I/O QUILT SERVERS DONE' )
1706 CALL mpi_finalize(ierr)
1709 WRITE(mess,*)'Possible 32-bit overflow on output server. Try larger nio_tasks_per_group in namelist.'
1710 CALL wrf_error_fatal(mess)
1715 ! Obufsize of zero signals a close
1717 ! Allocate buffer obuf to be big enough for the data the compute tasks
1718 ! will send. Note: obuf is size in *bytes* so we need to pare this
1719 ! down, since the buffer is INTEGER.
1720 IF ( obufsize .GT. 0 ) THEN
1721 ALLOCATE( obuf( (obufsize+1)/itypesize ) )
1723 ! let's roll; get the data from the compute procs and put in obuf
1724 CALL collect_on_comm_debug(__FILE__,__LINE__, mpi_comm_io_groups(1), &
1728 ! CALL end_timing( "quilt on server: collecting data from compute procs" )
1730 ! Necessarily, the compute processes send the ioclose signal,
1731 ! if there is one, after the iosync, which means they
1732 ! will stall on the ioclose message waiting for the quilt
1733 ! processes if we handle the way other messages are collected,
1734 ! using collect_on_comm. This avoids this, but we need
1735 ! a special signal (obufsize zero) and the DataHandle
1736 ! to be closed. That handle is send as the second
1737 ! word of the io_close message received by the MPI_Reduce above.
1738 ! Then a header representing the ioclose message is constructed
1739 ! here and handled below as if it were received from the
1740 ! compute processes. The clients (compute processes) must be
1741 ! careful to send this correctly (one compule process sends the actual
1742 ! handle and everone else sends a zero, so the result sums to
1743 ! the value of the handle).
1745 ALLOCATE( obuf( 4096 ) )
1746 ! DataHandle is provided as second element of reduced
1747 CALL int_gen_handle_header( obuf, obufsize, itypesize, &
1748 reduced(2) , int_ioclose )
1751 !write(0,*)'calling init_store_piece_of_field'
1752 ! Now all messages received from the compute clients are stored in
1753 ! obuf. Scan through obuf and extract headers and field data and store in
1754 ! internal buffers. The scan is done twice, first to determine sizes of
1755 ! internal buffers required for storage of headers and fields and second to
1756 ! actually store the headers and fields. This bit of code does not do any
1757 ! "quilting" (assembly of patches into full domains). For each field, it
1758 ! simply writes all received patches for the field to disk.
1759 ! ARPDBG we can vastly reduce the number of writes to disk by stitching
1760 ! any contiguous patches together first. Has implications for synchronisation
1761 ! of pNetCDF calls though.
1762 CALL init_store_piece_of_field
1763 CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
1764 !write(0,*)'mpi_type_size returns ', itypesize
1765 ! Scan obuf the first time to calculate the size of the buffer required for
1766 ! each field. Calls to add_to_bufsize_for_field() accumulate sizes.
1770 num_commit_messages = 0
1771 num_field_training_msgs = 0
1772 DO WHILE ( icurs .lt. obufsize ) ! {
1773 hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
1774 SELECT CASE ( hdr_tag )
1776 CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
1777 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
1778 DomainDesc , MemoryOrder , Stagger , DimNames , &
1779 DomainStart , DomainEnd , &
1780 MemoryStart , MemoryEnd , &
1781 PatchStart , PatchEnd )
1782 chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
1783 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
1785 IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
1786 IF ( num_field_training_msgs .EQ. 0 ) THEN
1787 call add_to_bufsize_for_field( VarName, hdrbufsize )
1788 !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1790 num_field_training_msgs = num_field_training_msgs + 1
1792 call add_to_bufsize_for_field( VarName, hdrbufsize )
1793 !write(0,*) 'X-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1795 icurs = icurs + hdrbufsize
1797 !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1799 ! If this is a real write (i.e. not a training write), accumulate
1800 ! buffersize for this field.
1801 IF ( DomainDesc .NE. 333933 ) THEN ! magic number
1802 !write(0,*) 'X-1a', chunksize, TRIM(VarName)
1803 call add_to_bufsize_for_field( VarName, chunksize )
1804 icurs = icurs + chunksize
1806 CASE ( int_open_for_write_commit ) ! only one per group of tasks
1807 hdrbufsize = obuf(icurs/itypesize)
1808 IF (num_commit_messages.EQ.0) THEN
1809 call add_to_bufsize_for_field( 'COMMIT', hdrbufsize )
1811 num_commit_messages = num_commit_messages + 1
1812 icurs = icurs + hdrbufsize
1814 hdrbufsize = obuf(icurs/itypesize)
1816 ! This logic and the logic in the loop below is used to determine whether
1817 ! to send a noop records sent by the compute processes to allow to go
1818 ! through. The purpose is to make sure that the communications between this
1819 ! server and the other servers in this quilt group stay synchronized in
1820 ! the collection loop below, even when the servers are serving different
1821 ! numbers of clients. Here are some conditions:
1823 ! 1. The number of compute clients served will not differ by more than 1
1824 ! 2. The servers with +1 number of compute clients begin with task 0
1825 ! of mpi_comm_local, the commicator shared by this group of servers
1827 ! 3. For each collective field or metadata output from the compute tasks,
1828 ! there will be one record sent to the associated i/o server task. The
1829 ! i/o server task collects these records and stores them contiguously
1830 ! in a buffer (obuf) using collect_on_comm above. Thus, obuf on this
1831 ! server task will contain one record from each associated compute
1834 ! 4. In the case of replicated output from the compute tasks
1835 ! (e.g. put_dom_ti records and control records like
1836 ! open_for_write_commit type records), only compute tasks for which
1837 ! (compute_group_master == .TRUE) send the record. The other compute
1838 ! tasks send noop records. This is done so that each server task
1839 ! receives exactly one record plus noops from the other compute tasks.
1841 ! 5. Logic below does not allow any noop records through since each IO
1842 ! server task now receives a valid record (from the 'compute-group master'
1843 ! when doing replicated output
1844 IF (hdr_tag.NE.int_noop) THEN
1845 write(VarName,'(I5.5)')vid
1846 !write(0,*) 'X-2', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1847 call add_to_bufsize_for_field( VarName, hdrbufsize )
1850 IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
1851 icurs = icurs + hdrbufsize
1855 ! Store the headers and field data in internal buffers. The first call to
1856 ! store_piece_of_field() allocates internal buffers using sizes computed by
1857 ! calls to add_to_bufsize_for_field().
1861 num_commit_messages = 0
1862 num_field_training_msgs = 0
1863 DO WHILE ( icurs .lt. obufsize ) !{
1864 !write(0,*) 'A icurs ', icurs, ' obufsize ', obufsize
1865 hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
1866 SELECT CASE ( hdr_tag )
1868 CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
1869 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
1870 DomainDesc , MemoryOrder , Stagger , DimNames , &
1871 DomainStart , DomainEnd , &
1872 MemoryStart , MemoryEnd , &
1873 PatchStart , PatchEnd )
1874 chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
1875 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
1877 IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
1878 IF ( num_field_training_msgs .EQ. 0 ) THEN
1879 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
1880 !write(0,*) 'A-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1882 num_field_training_msgs = num_field_training_msgs + 1
1884 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
1885 !write(0,*) 'A-2a', icurs, hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1887 icurs = icurs + hdrbufsize
1888 ! If this is a real write (i.e. not a training write), store
1889 ! this piece of this field.
1890 IF ( DomainDesc .NE. 333933 ) THEN ! magic number
1891 call store_piece_of_field( obuf(icurs/itypesize), VarName, chunksize )
1892 icurs = icurs + chunksize
1893 !write(0,*) 'A-1a',TRIM(VarName),' icurs ',icurs,PatchStart(1:3),PatchEnd(1:3)
1895 CASE ( int_open_for_write_commit ) ! only one per group of tasks
1896 hdrbufsize = obuf(icurs/itypesize)
1897 IF (num_commit_messages.EQ.0) THEN
1898 call store_piece_of_field( obuf(icurs/itypesize), 'COMMIT', hdrbufsize )
1900 num_commit_messages = num_commit_messages + 1
1901 icurs = icurs + hdrbufsize
1903 hdrbufsize = obuf(icurs/itypesize)
1904 IF (hdr_tag.NE.int_noop) THEN
1906 write(VarName,'(I5.5)')vid
1907 !write(0,*) 'A-2b', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1908 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
1911 IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
1912 icurs = icurs + hdrbufsize
1914 ENDDO !} while(icurs < obufsize)
1916 ! Now, for each field, retrieve headers and patches (data) from the internal
1918 CALL init_retrieve_pieces_of_field
1919 ! Retrieve header and all patches for the first field from the internal
1921 CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
1922 written_record = .false.
1924 ! Loop until there are no more fields to retrieve from the internal buffers.
1925 DO WHILE ( retval ) !{
1927 ! This I/O server now handles the collected requests from the compute
1930 icurs = itypesize ! icurs is a byte counter, but buffer is integer
1932 stored_write_record = .false.
1934 ! ALL I/O servers in this group loop over the collected requests they have
1936 DO WHILE ( icurs .lt. sz)! bigbufsize ) !{
1938 ! The I/O server gets the request out of the next header and
1939 ! handles it by, in most cases, calling the appropriate external I/O package
1941 !write(0,*)__FILE__,__LINE__,'get_hdr_tag ',icurs,sz,get_hdr_tag( obuf(icurs/itypesize) )
1942 SELECT CASE ( get_hdr_tag( obuf(icurs/itypesize) ) )
1943 ! The I/O server handles the "noop" (do nothing) request. This is
1944 ! actually quite easy. "Noop" requests exist to help avoid race conditions.
1946 CALL int_get_noop_header( obuf(icurs/itypesize), &
1947 hdrbufsize, itypesize )
1948 icurs = icurs + hdrbufsize
1950 ! The I/O server "root" handles the "put_dom_td_real" request.
1951 CASE ( int_dom_td_real )
1952 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
1953 ALLOCATE( RData( obuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
1954 CALL int_get_td_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
1955 DataHandle, DateStr, Element, RData, Count, code )
1956 icurs = icurs + hdrbufsize
1958 SELECT CASE (use_package(io_form(DataHandle)))
1959 #ifdef PNETCDF_QUILT
1961 CALL ext_pnc_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1965 CALL ext_ncd_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1969 CALL ext_int_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1973 CALL ext_yyy_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1977 CALL ext_gr1_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1981 CALL ext_gr2_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1988 ! Every I/O server handles the "put_dom_ti_real" request.
1989 CASE ( int_dom_ti_real )
1991 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
1992 ALLOCATE( RData( obuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
1993 CALL int_get_ti_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
1994 DataHandle, Element, RData, Count, code )
1995 icurs = icurs + hdrbufsize
1997 SELECT CASE (use_package(io_form(DataHandle)))
1998 #ifdef PNETCDF_QUILT
2000 CALL ext_pnc_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
2004 CALL ext_ncd_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
2008 CALL ext_int_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
2012 CALL ext_yyy_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
2016 CALL ext_gr1_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
2020 CALL ext_gr2_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
2028 ! Every I/O server handles the "put_dom_td_integer" request.
2029 CASE ( int_dom_td_integer )
2031 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
2032 ALLOCATE( IData( obuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
2033 CALL int_get_td_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
2034 DataHandle, DateStr, Element, IData, Count, code )
2035 icurs = icurs + hdrbufsize
2037 SELECT CASE (use_package(io_form(DataHandle)))
2038 #ifdef PNETCDF_QUILT
2040 CALL ext_pnc_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
2044 CALL ext_ncd_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
2048 CALL ext_int_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
2052 CALL ext_yyy_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
2056 CALL ext_gr1_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
2060 CALL ext_gr2_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
2068 ! Every I/O server handles the "put_dom_ti_integer" request.
2069 CASE ( int_dom_ti_integer )
2071 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
2072 ALLOCATE( IData( obuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
2073 CALL int_get_ti_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
2074 DataHandle, Element, IData, Count, code )
2075 icurs = icurs + hdrbufsize
2076 SELECT CASE (use_package(io_form(DataHandle)))
2077 #ifdef PNETCDF_QUILT
2079 CALL ext_pnc_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
2083 CALL ext_ncd_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
2087 CALL ext_int_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
2091 CALL ext_yyy_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
2095 CALL ext_gr1_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
2099 CALL ext_gr2_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
2108 ! Every I/O server handles the "set_time" request.
2109 CASE ( int_set_time )
2111 CALL int_get_ti_header_char( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2112 DataHandle, Element, VarName, CData, code )
2113 SELECT CASE (use_package(io_form(DataHandle)))
2116 CALL ext_int_set_time ( handle(DataHandle), TRIM(CData), Status)
2122 icurs = icurs + hdrbufsize
2124 ! Every I/O server handles the "put_dom_ti_char" request.
2125 CASE ( int_dom_ti_char )
2127 CALL int_get_ti_header_char( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2128 DataHandle, Element, VarName, CData, code )
2130 SELECT CASE (use_package(io_form(DataHandle)))
2131 #ifdef PNETCDF_QUILT
2133 CALL ext_pnc_put_dom_ti_char ( handle(DataHandle), TRIM(Element), Trim(CData), Status)
2137 CALL ext_ncd_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2141 CALL ext_int_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2145 CALL ext_yyy_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2149 CALL ext_gr1_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2153 CALL ext_gr2_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2159 icurs = icurs + hdrbufsize
2161 ! Every I/O server handles the "put_var_ti_char" request.
2162 CASE ( int_var_ti_char )
2164 CALL int_get_ti_header_char( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2165 DataHandle, Element, VarName, CData, code )
2167 SELECT CASE (use_package(io_form(DataHandle)))
2168 #ifdef PNETCDF_QUILT
2170 CALL ext_pnc_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status )
2174 CALL ext_ncd_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2178 CALL ext_int_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2182 CALL ext_yyy_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2186 CALL ext_gr1_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2190 CALL ext_gr2_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2196 icurs = icurs + hdrbufsize
2199 ! ioexit is now handled by sending negative message length to server
2200 CALL wrf_error_fatal( &
2201 "quilt: should have handled int_ioexit already")
2202 ! Every I/O server handles the "ioclose" request.
2203 CASE ( int_ioclose )
2204 CALL int_get_handle_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2206 icurs = icurs + hdrbufsize
2208 IF ( DataHandle .GE. 1 ) THEN
2210 SELECT CASE (use_package(io_form(DataHandle)))
2211 #ifdef PNETCDF_QUILT
2213 CALL ext_pnc_inquire_filename( handle(DataHandle), fname, fstat, Status )
2214 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2215 CALL ext_pnc_ioclose(handle(DataHandle),Status)
2220 CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
2221 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2222 CALL ext_ncd_ioclose(handle(DataHandle),Status)
2227 CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
2228 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2229 CALL ext_int_ioclose(handle(DataHandle),Status)
2234 CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
2235 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2236 CALL ext_yyy_ioclose(handle(DataHandle),Status)
2241 CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
2242 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2243 CALL ext_gr1_ioclose(handle(DataHandle),Status)
2248 CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
2249 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2250 CALL ext_gr2_ioclose(handle(DataHandle),Status)
2258 ! Every I/O server handles the "open_for_write_begin" request.
2259 CASE ( int_open_for_write_begin )
2261 CALL int_get_ofwb_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2262 FileName,SysDepInfo,io_form_arg,DataHandle )
2264 !write(0,*)' int_open_for_write_begin itypesize ',itypesize,' itypesize ',itypesize
2265 !write(0,*)' int_open_for_write_begin icurs ', icurs, hdrbufsize
2266 !JMDEBUGwrite(0,*)' int_open_for_write_begin FileName ',TRIM(FileName) , ' DataHandle ', DataHandle
2267 !write(0,*)' int_open_for_write_begin SysDepInfo ',TRIM(SysDepInfo)
2268 icurs = icurs + hdrbufsize
2269 !write(0,*)' int_open_for_write_begin new icurs,tag,size ', icurs, get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
2271 io_form(DataHandle) = io_form_arg
2273 SELECT CASE (use_package(io_form(DataHandle)))
2274 #ifdef PNETCDF_QUILT
2276 CALL ext_pnc_open_for_write_begin(FileName,mpi_comm_local,mpi_comm_local,SysDepInfo,handle(DataHandle),Status )
2280 CALL ext_ncd_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2281 !write(0,*)'ext_ncd_open_for_write_begin ',Trim(FileName),DataHandle,handle(DataHandle),Status
2285 CALL ext_int_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2289 CALL ext_yyy_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2293 CALL ext_gr1_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2297 CALL ext_gr2_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2303 okay_to_write(DataHandle) = .false.
2305 ! Every I/O server handles the "open_for_write_commit" request.
2306 ! In this case, the "okay_to_commit" is simply set to .true. so "write_field"
2307 ! (int_field) requests will initiate writes to disk. Actual commit will be done after
2308 ! all requests in this batch have been handled.
2309 CASE ( int_open_for_write_commit )
2311 CALL int_get_handle_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2313 icurs = icurs + hdrbufsize
2314 okay_to_commit(DataHandle) = .true.
2316 ! Every I/O server handles the "write_field" (int_field) request.
2317 ! If okay_to_write(DataHandle) is .true. then the patch in the
2318 ! header (bigbuf) is written to disk using pNetCDF. Note that this is where the actual
2319 ! "quilting" (reassembly of patches onto a full-size domain) is done. If
2320 ! okay_to_write(DataHandle) is .false. then external I/O package interfaces
2321 ! are called to write metadata for I/O formats that support native metadata.
2323 ! NOTE that the I/O servers will only see write_field (int_field)
2324 ! requests AFTER an "iosync" request.
2326 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
2327 CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
2328 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
2329 DomainDesc , MemoryOrder , Stagger , DimNames , &
2330 DomainStart , DomainEnd , &
2331 MemoryStart , MemoryEnd , &
2332 PatchStart , PatchEnd )
2333 !write(0,*)' int_field ',TRIM(VarName),DataHandle,okay_to_write(DataHandle)
2334 icurs = icurs + hdrbufsize
2336 IF ( okay_to_write(DataHandle) ) THEN
2338 !!$ WRITE(0,FMT="('>>> ',(A),1x,(A),1x,A2,I6,1x,3('[',I3,',',I3,'] '))") &
2339 !!$ TRIM(DateStr), TRIM(VarName), TRIM(MemoryOrder), &
2340 !!$ (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)*(PatchEnd(3)-PatchStart(3)+1), &
2341 !!$PatchStart(1),PatchEnd(1),PatchStart(2),PatchEnd(2),PatchStart(3),PatchEnd(3)
2342 !!$ WRITE(0,FMT="('>>> ',(A),1x,(A),1x,I6,1x,3('[',I3,',',I3,'] '))") &
2343 !!$ TRIM(DateStr), TRIM(VarName), DomainDesc, &
2344 !!$ DomainStart(1),DomainEnd(1),DomainStart(2),DomainEnd(2),DomainStart(3),DomainEnd(3)
2346 IF ( FieldType .EQ. WRF_FLOAT .OR. FieldType .EQ. WRF_DOUBLE) THEN
2347 ! Note that the WRF_DOUBLE branch of this IF statement must come first since
2348 ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds.
2349 IF ( FieldType .EQ. WRF_DOUBLE) THEN
2350 ! this branch has not been tested TBH: 20050406
2351 CALL mpi_type_size( MPI_DOUBLE_PRECISION, ftypesize, ierr )
2353 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
2356 #ifdef PNETCDF_QUILT
2357 ! WRITE(mess,FMT="('>>> ',(A),1x,(A),1x,I6,1x,3('[',I3,',',I3,'] '))") &
2358 ! TRIM(DateStr), TRIM(VarName), DomainDesc, &
2359 ! DomainStart(1),DomainEnd(1), &
2360 ! DomainStart(2),DomainEnd(2),DomainStart(3),DomainEnd(3)
2361 ! CALL wrf_message(mess)
2363 CALL store_patch_in_outbuf_pnc(obuf(icurs/itypesize), &
2364 dummybuf, TRIM(DateStr), &
2367 TRIM(MemoryOrder), &
2370 DomainStart , DomainEnd ,&
2371 MemoryStart , MemoryEnd ,&
2372 PatchStart , PatchEnd, &
2374 stored_write_record = .true.
2376 !!$ IF(VarName .eq. "PSFC")THEN
2377 !!$ CALL dump_real_array_c(obuf(icurs/itypesize), DomainStart,&
2378 !!$ DomainEnd, PatchStart, PatchEnd, &
2379 !!$ mytask_local, DomainDesc)
2383 ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
2384 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
2385 #ifdef PNETCDF_QUILT
2386 CALL store_patch_in_outbuf_pnc ( dummybuf, &
2387 obuf(icurs/itypesize) , &
2391 TRIM(MemoryOrder) , &
2392 TRIM(Stagger), DimNames, &
2393 DomainStart , DomainEnd , &
2394 MemoryStart , MemoryEnd , &
2395 PatchStart , PatchEnd , &
2397 stored_write_record = .true.
2399 ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
2400 ftypesize = LWORDSIZE
2403 icurs = icurs + (PatchEnd(1)-PatchStart(1)+1)* &
2404 (PatchEnd(2)-PatchStart(2)+1)* &
2405 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
2407 ELSE ! Write metadata only (or do 'training'?)
2409 SELECT CASE (use_package(io_form(DataHandle)))
2411 #ifdef PNETCDF_QUILT
2413 CALL ext_pnc_write_field ( handle(DataHandle) , TRIM(DateStr), &
2414 TRIM(VarName) , dummy , FieldType , mpi_comm_local , mpi_comm_local, &
2415 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger), DimNames , &
2416 DomainStart , DomainEnd , &
2417 MemoryStart , MemoryEnd , &
2418 PatchStart , PatchEnd, &
2423 CALL ext_ncd_write_field ( handle(DataHandle) , TRIM(DateStr) , &
2424 TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
2425 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
2426 DomainStart , DomainEnd , &
2427 DomainStart , DomainEnd , &
2428 DomainStart , DomainEnd , &
2432 ! since this is training and the grib output doesn't need training, disable this branch.
2435 CALL ext_YYY_write_field ( handle(DataHandle) , TRIM(DateStr) , &
2436 TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
2437 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
2438 DomainStart , DomainEnd , &
2439 DomainStart , DomainEnd , &
2440 DomainStart , DomainEnd , &
2449 CALL int_get_handle_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2451 icurs = icurs + hdrbufsize
2453 WRITE(mess,*)'quilt: bad tag: ', &
2454 get_hdr_tag( obuf(icurs/itypesize) ),' icurs ',&
2456 CALL wrf_error_fatal( mess )
2460 ! Now, we have finshed handling all commands from the latest
2461 ! call to retrieve_pieces_of_field().
2463 IF (stored_write_record) THEN
2464 ! If any field patches have been stored in internal output buffers
2465 ! (via a call to store_patch_in_outbuf_pnc()) then call write_outbuf_pnc()
2466 ! to write them to disk now.
2467 ! NOTE that the I/O server will only have called
2468 ! store_patch_in_outbuf() when handling write_field (int_field)
2469 ! commands which only arrive AFTER an "iosync" command.
2471 #ifdef PNETCDF_QUILT
2472 CALL write_outbuf_pnc( handle(DataHandle), &
2473 use_package(io_form(DataHandle)), &
2474 mpi_comm_local, mytask_local, &
2477 ! CALL end_timing( "quilt_pnc: call to write_outbuf_pnc" )
2478 stored_write_record = .false.
2479 written_record = .true.
2482 ! If one or more "open_for_write_commit" commands were encountered from the
2483 ! latest call to retrieve_pieces_of_field() then call the package-specific
2484 ! routine to do the commit.
2485 IF (okay_to_commit(DataHandle)) THEN
2487 SELECT CASE (use_package(io_form(DataHandle)))
2488 #ifdef PNETCDF_QUILT
2490 CALL ext_pnc_inquire_filename( handle(DataHandle), fname, fstat, Status )
2491 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2492 CALL ext_pnc_open_for_write_commit(handle(DataHandle),Status)
2493 okay_to_write(DataHandle) = .true.
2498 CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
2499 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2500 CALL ext_ncd_open_for_write_commit(handle(DataHandle),Status)
2501 okay_to_write(DataHandle) = .true.
2506 CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
2507 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2508 CALL ext_int_open_for_write_commit(handle(DataHandle),Status)
2509 okay_to_write(DataHandle) = .true.
2514 CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
2515 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2516 CALL ext_yyy_open_for_write_commit(handle(DataHandle),Status)
2517 okay_to_write(DataHandle) = .true.
2522 CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
2523 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2524 CALL ext_gr1_open_for_write_commit(handle(DataHandle),Status)
2525 okay_to_write(DataHandle) = .true.
2530 CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
2531 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2532 CALL ext_gr2_open_for_write_commit(handle(DataHandle),Status)
2533 okay_to_write(DataHandle) = .true.
2541 okay_to_commit(DataHandle) = .false.
2545 ! Retrieve header and all patches for the next field from the internal
2547 CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
2552 ! flush output files if needed
2553 IF (written_record) THEN
2555 SELECT CASE ( use_package(io_form) )
2556 #ifdef PNETCDF_QUILT
2558 CALL ext_pnc_iosync( handle(DataHandle), Status )
2563 written_record = .false.
2564 !CALL end_timing( "quilt_pnc: flush" )
2569 END SUBROUTINE quilt_pnc
2571 ! end of #endif of DM_PARALLEL
2574 SUBROUTINE init_module_wrf_quilt
2575 USE module_wrf_error, only: init_module_wrf_error
2576 USE module_driver_constants
2577 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2578 USE module_dm, only: mpi_comm_allcompute
2581 ! Both client (compute) and server tasks call this routine to initialize the
2582 ! module. Routine setup_quilt_servers() is called from this routine to
2583 ! determine which tasks are compute tasks and which are server tasks. Server
2584 ! tasks then call routine quilt() and remain there for the rest of the model
2585 ! run. Compute tasks return from init_module_wrf_quilt() to perform model
2588 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2592 NAMELIST /namelist_quilt/ nio_tasks_per_group, nio_groups, poll_servers
2593 INTEGER ntasks, mytask, ierr, io_status
2594 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
2595 INTEGER thread_support_provided, thread_support_requested
2597 INTEGER mpi_comm_here, temp_poll
2599 LOGICAL esmf_coupling
2601 !!!!! needed to sneak-peek the namelist to get parent_id
2602 ! define as temporaries
2603 # include "namelist_defines.inc"
2605 ! Statements that specify the namelists
2606 # include "namelist_statements.inc"
2607 !TODO: Change this to run-time switch
2609 esmf_coupling = .TRUE.
2611 esmf_coupling = .FALSE.
2614 quilting_enabled = .FALSE.
2615 IF ( disable_quilt ) RETURN
2617 DO i = 1,int_num_handles
2618 okay_to_write(i) = .FALSE.
2619 int_handle_in_use(i) = .FALSE.
2620 server_for_handle(i) = 0
2621 int_num_bytes_to_write(i) = 0
2624 CALL MPI_INITIALIZED( mpi_inited, ierr )
2625 IF ( .NOT. mpi_inited ) THEN
2626 CALL WRF_ERROR_FATAL( "module_io_quilt_old.F : MPI not init'd" )
2628 CALL wrf_get_dm_quilt_comm( mpi_comm_here ) ! jm 20151212
2630 CALL MPI_Comm_rank( mpi_comm_here, mytask, ierr ) ;
2631 CALL mpi_x_comm_size( mpi_comm_here, ntasks, ierr ) ;
2633 IF ( mytask .EQ. 0 ) THEN
2634 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
2636 nio_tasks_per_group = 0
2637 poll_servers = .false.
2638 READ ( 27 , NML = namelist_quilt, IOSTAT=io_status )
2639 IF (io_status .NE. 0) THEN
2640 CALL wrf_error_fatal( "ERROR reading namelist namelist_quilt" )
2645 READ ( UNIT = 27 , NML = domains , IOSTAT=io_status )
2646 IF (io_status .NE. 0) THEN
2647 CALL wrf_error_fatal( "ERROR reading namelist domains" )
2650 IF ( esmf_coupling ) THEN
2651 IF ( nio_tasks_per_group > 0 ) THEN
2652 CALL wrf_error_fatal("frame/module_io_quilt.F: cannot use "// &
2653 "ESMF coupling with quilt tasks") ;
2656 if(poll_servers) then
2663 CALL mpi_bcast( nio_tasks_per_group , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
2664 CALL mpi_bcast( nio_groups , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
2665 CALL mpi_bcast( temp_poll , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
2666 CALL mpi_bcast( nproc_x , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
2667 CALL mpi_bcast( nproc_y , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
2669 poll_servers = (temp_poll == 1)
2671 CALL setup_quilt_servers( nio_tasks_per_group, &
2677 nio_tasks_in_group, &
2682 call init_module_wrf_error(on_io_server=.true.)
2684 ! provide the communicator for the integration tasks to RSL
2685 IF ( compute_node ) THEN
2686 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2687 mpi_comm_allcompute = mpi_comm_local
2689 CALL wrf_set_dm_communicator( mpi_comm_local )
2690 # if ( DA_CORE != 1 )
2691 IF (coupler_on) CALL cpl_set_dm_communicator( mpi_comm_local )
2694 # if ( DA_CORE != 1 )
2695 IF (coupler_on) CALL cpl_set_dm_communicator( MPI_COMM_NULL )
2697 CALL quilt ! will not return on io server tasks
2701 END SUBROUTINE init_module_wrf_quilt
2704 #ifdef IBM_REDUCE_BUG_WORKAROUND
2706 ! These three subroutines re-implement MPI_Reduce on MPI_INTEGER
2709 ! This is a workaround for a bug in the IBM MPI implementation.
2710 ! Some MPI processes will get stuck in MPI_Reduce and not
2711 ! return until the PREVIOUS I/O server group finishes writing.
2713 ! This workaround replaces the MPI_Reduce call with many
2714 ! MPI_Send and MPI_Recv calls that perform the sum on the
2715 ! root of the communicator.
2717 ! There are two reduce routines: one for a sum of scalars
2718 ! and one for a sum of arrays. The get_reduce_tag generates
2719 ! MPI tags for the communication.
2721 integer function get_reduce_tag(root,comm)
2724 integer, intent(in) :: comm,root
2725 integer :: i,j, tag, here
2726 integer :: ierr,me,size
2728 integer, pointer :: nexttags(:)
2729 integer, target :: dummy(1)
2730 character(255) :: message
2731 integer(kind=4) :: comm4,hashed
2733 integer, parameter :: hashsize = 113 ! should be prime, >max_servers+1
2734 integer, parameter :: tagloop = 100000 ! number of tags reserved per communicator
2735 integer, parameter :: origin = 1031102 ! lowest tag number we'll use
2736 integer, save :: nexttag=origin ! next tag to use for a new communicator
2737 integer, save :: comms(hashsize)=-1, firsttag(hashsize)=0, curtag(hashsize)=0
2739 ! If integers are not four bytes, this implementation will still
2740 ! work, but it may be inefficient (O(N) lookup instead of O(1)).
2741 ! To fix that, an eight byte hash function would be needed, but
2742 ! integers are four bytes in WRF, so that is not a problem right
2746 call int_hash(comm4,hashed)
2747 hashed=mod(abs(hashed),hashsize)+1
2748 if(hashed<0) call wrf_error_fatal('hashed<0')
2751 j=1+mod(i+hashed-1,hashsize)
2753 if(firsttag(j)/=0 .and. comms(j)==comm) then
2754 ! Found the communicator
2755 if(curtag(j)-firsttag(j) >= tagloop) then
2756 ! Hit the max tag number so we need to reset.
2757 ! To make sure >tagloop reduces don't happen
2758 ! before someone finishes an old reduce, we
2759 ! have an MPI_Barrier here.
2760 !call wrf_message('Hit tagloop limit so calling mpi_barrier in get_reduce_tag...')
2761 call mpi_barrier(comm,ierr)
2762 if(ierr/=0) call wrf_error_fatal('cannot call mpi_barrier')
2763 !call wrf_message(' ...back from mpi_barrier in get_reduce_tag.')
2765 curtag(j)=firsttag(j)
2776 ! ==================== HANDLE NEW COMMUNICATORS ====================
2778 !write(message,'("Found a new communicator ",I0," in get_reduce_tag, so making a tag range for it")') comm
2780 ! If we get here, the communicator is new to us, so we need
2781 ! to add it to the hash and give it a new tag.
2783 ! First, figure out where we'll put the tag in the hashtable
2786 j=1+mod(i+hashed-1,hashsize)
2788 if(firsttag(j)==0) then
2793 if(here==-1) call wrf_error_fatal('no room in hashtable; increase hashsize in get_reduce_tag (should be >max_servers+1)')
2795 ! Now, find out the new tag's number. To do this, we need to
2796 ! get the next tag number that is not used by any ranks.
2798 call mpi_comm_rank(comm,me,ierr)
2799 if(ierr/=0) call wrf_error_fatal('cannot call mpi_comm_rank')
2801 call mpi_comm_size(comm,size,ierr)
2802 if(ierr/=0) call wrf_error_fatal('cannot call mpi_comm_size')
2805 allocate(nexttags(size))
2810 call mpi_gather(nexttag,1,MPI_INTEGER,nexttags,1,MPI_INTEGER,root,comm,ierr)
2811 if(ierr/=0) call wrf_error_fatal('cannot call mpi_gather')
2814 nexttag=max(nexttag,maxval(nexttags))
2815 deallocate(nexttags)
2817 call mpi_bcast(nexttag,1,MPI_INTEGER,root,comm,ierr)
2820 firsttag(here)=nexttag
2821 curtag(here)=nexttag
2822 get_reduce_tag=nexttag
2824 !write(message,'("Stored comm ",I0," with tag ",I0,"=",I0," in hash element ",I0)') &
2825 ! comms(here),firsttag(here),curtag(here),here
2826 !call wrf_message(message)
2828 nexttag=nexttag+tagloop
2830 end function get_reduce_tag
2831 subroutine reduce_add_int_scl(send,recv,count,root,comm)
2834 integer, intent(in) :: count,root,comm
2835 integer, intent(inout) :: recv
2836 integer, intent(in) :: send
2837 integer :: me, size, ierr, you, temp, tag
2838 character*255 :: message
2839 if(root<0) call wrf_error_fatal('root is less than 0')
2841 tag=get_reduce_tag(root,comm)
2843 !write(message,'("Send/recv to tag ",I0)') tag
2844 !call wrf_message(message)
2846 call mpi_comm_rank(comm,me,ierr)
2847 if(ierr/=0) call wrf_error_fatal('cannot call mpi_comm_rank')
2849 call mpi_comm_size(comm,size,ierr)
2850 if(ierr/=0) call wrf_error_fatal('cannot call mpi_comm_size')
2852 if(root>=size) call wrf_error_fatal('root is beyond highest communicator rank')
2857 call mpi_recv(temp,1,MPI_INTEGER,MPI_ANY_SOURCE,tag,comm,MPI_STATUS_IGNORE,ierr)
2858 if(ierr/=0) call wrf_error_fatal('error calling mpi_recv')
2862 call mpi_send(send,1,MPI_INTEGER,root,tag,comm,ierr)
2863 if(ierr/=0) call wrf_error_fatal('error calling mpi_send')
2865 end subroutine reduce_add_int_scl
2866 subroutine reduce_add_int_arr(sendbuf,recvbuf,count,root,comm)
2869 integer, intent(in) :: count,root,comm
2870 integer, intent(in) :: sendbuf(count)
2871 integer, intent(inout) :: recvbuf(count)
2872 integer :: me, size, ierr, you, tempbuf(count), tag
2873 character*255 :: message
2875 if(root<0) call wrf_error_fatal('root is less than 0')
2877 tag=get_reduce_tag(root,comm)
2879 !write(message,'("Send/recv to tag ",I0)') tag
2880 !call wrf_message(message)
2882 call mpi_comm_rank(comm,me,ierr)
2883 if(ierr/=0) call wrf_error_fatal('cannot call mpi_comm_rank')
2885 call mpi_comm_size(comm,size,ierr)
2886 if(ierr/=0) call wrf_error_fatal('cannot call mpi_comm_size')
2888 if(root>=size) call wrf_error_fatal('root is beyond highest communicator rank')
2893 call mpi_recv(tempbuf,count,MPI_INTEGER,MPI_ANY_SOURCE,tag,comm,MPI_STATUS_IGNORE,ierr)
2894 if(ierr/=0) call wrf_error_fatal('error calling mpi_recv')
2895 recvbuf=recvbuf+tempbuf
2898 call mpi_send(sendbuf,count,MPI_INTEGER,root,tag,comm,ierr)
2899 if(ierr/=0) call wrf_error_fatal('error calling mpi_send')
2901 end subroutine reduce_add_int_arr
2905 END MODULE module_wrf_quilt
2908 ! Remaining routines in this file are defined outside of the module
2909 ! either to defeat arg/param type checking or to avoid an explicit use
2913 SUBROUTINE disable_quilting
2915 ! Call this in programs that you never want to be quilting (e.g. real)
2916 ! Must call before call to init_module_wrf_quilt().
2918 USE module_wrf_quilt
2919 disable_quilt = .TRUE.
2921 END SUBROUTINE disable_quilting
2923 SUBROUTINE quilting_disabled( reslt )
2925 ! Call this in programs that you never want to be quilting (e.g. real)
2926 ! Must call before call to init_module_wrf_quilt().
2928 USE module_wrf_quilt
2929 LOGICAL, INTENT(OUT) :: reslt
2930 reslt = disable_quilt
2931 write(0,*)__FILE__,__LINE__,disable_quilt
2933 END SUBROUTINE quilting_disabled
2935 LOGICAL FUNCTION use_output_servers_for(ioform)
2937 ! Returns .TRUE. if I/O quilt servers are in-use for write operations
2938 ! AND the output servers can handle the given I/O form. If the I/O
2939 ! form is 0, then the io form is not considered and the result is the
2940 ! same as calling use_output_servers.
2941 ! This routine is called only by client (compute) tasks.
2943 USE module_wrf_quilt
2944 integer, intent(in) :: ioform
2945 use_output_servers_for = quilting_enabled
2946 use_output_servers_for = ( use_output_servers_for .and. ioform<100 )
2948 END FUNCTION use_output_servers_for
2950 LOGICAL FUNCTION use_output_servers()
2952 ! Returns .TRUE. if I/O quilt servers are in-use for write operations.
2953 ! This routine is called only by client (compute) tasks.
2955 USE module_wrf_quilt
2956 use_output_servers = quilting_enabled
2958 END FUNCTION use_output_servers
2960 LOGICAL FUNCTION use_input_servers()
2962 ! Returns .TRUE. if I/O quilt servers are in-use for read operations.
2963 ! This routine is called only by client (compute) tasks.
2965 USE module_wrf_quilt
2966 use_input_servers = .FALSE.
2968 END FUNCTION use_input_servers
2970 SUBROUTINE wrf_quilt_open_for_write_begin( FileName , gridid, Comm_compute, Comm_io, SysDepInfo, &
2971 DataHandle , io_form_arg, Status )
2973 ! Instruct the I/O quilt servers to begin data definition ("training") phase
2974 ! for writing to WRF dataset FileName. io_form_arg indicates file format.
2975 ! This routine is called only by client (compute) tasks.
2977 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2978 USE module_wrf_quilt
2979 USE module_state_description, ONLY: IO_PNETCDF
2982 #include "intio_tags.h"
2983 CHARACTER *(*), INTENT(IN) :: FileName
2984 INTEGER , INTENT(IN) :: gridid
2985 INTEGER , INTENT(IN) :: Comm_compute , Comm_io
2986 CHARACTER *(*), INTENT(IN) :: SysDepInfo
2987 INTEGER , INTENT(OUT) :: DataHandle
2988 INTEGER , INTENT(IN) :: io_form_arg
2989 INTEGER , INTENT(OUT) :: Status
2991 CHARACTER*132 :: locFileName, locSysDepInfo
2992 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
2994 INTEGER, EXTERNAL :: use_package
2996 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_write_begin' )
2997 CALL int_get_fresh_handle(i)
2998 okay_to_write(i) = .false.
3001 locFileName = FileName
3002 locSysDepInfo = SysDepInfo
3004 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3006 SELECT CASE(use_package(io_form_arg))
3008 #ifdef PNETCDF_QUILT
3010 IF(compute_group_master(1)) THEN
3011 CALL int_gen_ofwb_header( hdrbuf, hdrbufsize, itypesize, &
3012 locFileName,locSysDepInfo,io_form_arg,&
3015 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3020 IF ( wrf_dm_on_monitor() ) THEN
3021 CALL int_gen_ofwb_header( hdrbuf, hdrbufsize, itypesize, &
3022 locFileName,locSysDepInfo,io_form_arg,DataHandle )
3024 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3029 iserver = get_server_id ( DataHandle )
3030 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3032 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3034 !!JMTIMING CALL start_timing
3035 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3037 reduced(1) = hdrbufsize
3038 #ifdef PNETCDF_QUILT
3039 IF ( compute_group_master(1) ) reduced(2) = i
3041 IF ( wrf_dm_on_monitor() ) reduced(2) = i
3043 CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
3044 !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_open_for_write_begin")
3046 ! send data to the i/o processor
3047 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
3049 hdrbuf, hdrbufsize , &
3057 END SUBROUTINE wrf_quilt_open_for_write_begin
3059 SUBROUTINE wrf_quilt_open_for_write_commit( DataHandle , Status )
3061 ! Instruct the I/O quilt servers to switch an internal flag to enable output
3062 ! for the dataset referenced by DataHandle. The call to
3063 ! wrf_quilt_open_for_write_commit() must be paired with a call to
3064 ! wrf_quilt_open_for_write_begin().
3065 ! This routine is called only by client (compute) tasks.
3067 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3068 USE module_wrf_quilt
3071 #include "intio_tags.h"
3072 INTEGER , INTENT(IN ) :: DataHandle
3073 INTEGER , INTENT(OUT) :: Status
3074 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
3077 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_write_commit' )
3078 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3079 IF ( int_handle_in_use( DataHandle ) ) THEN
3080 okay_to_write( DataHandle ) = .true.
3084 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3086 #ifdef PNETCDF_QUILT
3087 !ARP Only want one command to be received by each IO server when using
3089 IF(compute_group_master(1)) THEN
3090 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
3091 DataHandle, int_open_for_write_commit )
3093 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3097 IF ( wrf_dm_on_monitor() ) THEN
3098 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
3099 DataHandle, int_open_for_write_commit )
3101 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3105 iserver = get_server_id ( DataHandle )
3106 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3108 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3110 !!JMTIMING CALL start_timing
3111 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3113 reduced(1) = hdrbufsize
3114 #ifdef PNETCDF_QUILT
3115 IF ( compute_group_master(1) ) reduced(2) = DataHandle
3117 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
3119 CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
3120 !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_open_for_write_commit")
3122 ! send data to the i/o processor
3123 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
3125 hdrbuf, hdrbufsize , &
3132 END SUBROUTINE wrf_quilt_open_for_write_commit
3134 SUBROUTINE wrf_quilt_open_for_read ( FileName , Comm_compute, Comm_io, SysDepInfo, &
3135 DataHandle , Status )
3137 ! Instruct the I/O quilt servers to open WRF dataset FileName for reading.
3138 ! This routine is called only by client (compute) tasks.
3139 ! This is not yet supported.
3141 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3143 CHARACTER *(*), INTENT(IN) :: FileName
3144 INTEGER , INTENT(IN) :: Comm_compute , Comm_io
3145 CHARACTER *(*), INTENT(IN) :: SysDepInfo
3146 INTEGER , INTENT(OUT) :: DataHandle
3147 INTEGER , INTENT(OUT) :: Status
3149 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_read' )
3152 CALL wrf_error_fatal ( "frame/module_io_quilt.F: wrf_quilt_open_for_read not yet supported" )
3155 END SUBROUTINE wrf_quilt_open_for_read
3157 SUBROUTINE wrf_quilt_inquire_opened ( DataHandle, FileName , FileStatus, Status )
3159 ! Inquire if the dataset referenced by DataHandle is open.
3160 ! Does not require communication with I/O servers.
3161 ! This routine is called only by client (compute) tasks.
3163 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3164 USE module_wrf_quilt
3166 #include "wrf_io_flags.h"
3167 INTEGER , INTENT(IN) :: DataHandle
3168 CHARACTER *(*), INTENT(IN) :: FileName
3169 INTEGER , INTENT(OUT) :: FileStatus
3170 INTEGER , INTENT(OUT) :: Status
3174 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_inquire_opened' )
3175 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3176 IF ( int_handle_in_use( DataHandle ) ) THEN
3177 IF ( okay_to_write( DataHandle ) ) THEN
3178 FileStatus = WRF_FILE_OPENED_FOR_WRITE
3186 END SUBROUTINE wrf_quilt_inquire_opened
3188 SUBROUTINE wrf_quilt_inquire_filename ( DataHandle, FileName , FileStatus, Status )
3190 ! Return the Filename and FileStatus associated with DataHandle.
3191 ! Does not require communication with I/O servers.
3193 ! Note that the current implementation does not actually return FileName.
3194 ! Currenlty, WRF does not use this returned value. Fixing this would simply
3195 ! require saving the file names on the client tasks in an array similar to
3197 ! This routine is called only by client (compute) tasks.
3199 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3200 USE module_wrf_quilt
3202 #include "wrf_io_flags.h"
3203 INTEGER , INTENT(IN) :: DataHandle
3204 CHARACTER *(*), INTENT(OUT) :: FileName
3205 INTEGER , INTENT(OUT) :: FileStatus
3206 INTEGER , INTENT(OUT) :: Status
3207 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_inquire_filename' )
3209 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3210 IF ( int_handle_in_use( DataHandle ) ) THEN
3211 IF ( okay_to_write( DataHandle ) ) THEN
3212 FileStatus = WRF_FILE_OPENED_FOR_WRITE
3214 FileStatus = WRF_FILE_OPENED_NOT_COMMITTED
3217 FileStatus = WRF_FILE_NOT_OPENED
3220 FileName = "bogusfornow"
3226 END SUBROUTINE wrf_quilt_inquire_filename
3228 SUBROUTINE wrf_quilt_iosync ( DataHandle, Status )
3230 ! Instruct the I/O quilt servers to synchronize the disk copy of a dataset
3231 ! with memory buffers.
3233 ! After the "iosync" header (request) is sent to the I/O quilt server,
3234 ! the compute tasks will then send the entire contents (headers and data) of
3235 ! int_local_output_buffer to their I/O quilt server. This communication is
3236 ! done in subroutine send_to_io_quilt_servers(). After the I/O quilt servers
3237 ! receive this data, they will write all accumulated fields to disk.
3239 ! Significant time may be required for the I/O quilt servers to organize
3240 ! fields and write them to disk. Therefore, the "iosync" request should be
3241 ! sent only when the compute tasks are ready to run for a while without
3242 ! needing to communicate with the servers. Otherwise, the compute tasks
3243 ! will end up waiting for the servers to finish writing to disk, thus wasting
3244 ! any performance benefits of having servers at all.
3246 ! This routine is called only by client (compute) tasks.
3248 #if defined( DM_PARALLEL ) && ! defined (STUBMPI)
3249 USE module_wrf_quilt
3252 INTEGER , INTENT(IN) :: DataHandle
3253 INTEGER , INTENT(OUT) :: Status
3255 INTEGER locsize , itypesize
3256 INTEGER ierr, tasks_in_group, comm_io_group, dummy, i
3258 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_iosync' )
3261 IF ( associated ( int_local_output_buffer ) ) THEN
3263 iserver = get_server_id ( DataHandle )
3264 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3266 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3268 locsize = int_num_bytes_to_write(DataHandle)
3271 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3273 reduced(1) = locsize
3274 #ifdef PNETCDF_QUILT
3275 ! ARP Only want one command per IOServer if doing parallel IO
3276 IF ( compute_group_master(1) ) reduced(2) = DataHandle
3278 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
3280 CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
3281 ! CALL end_timing("MPI_Reduce in wrf_quilt_iosync")
3283 ! send data to the i/o processor
3285 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
3287 int_local_output_buffer(1), locsize , &
3290 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
3292 int_local_output_buffer, locsize , &
3297 int_local_output_cursor = 1
3298 ! int_num_bytes_to_write(DataHandle) = 0
3299 DEALLOCATE ( int_local_output_buffer )
3300 NULLIFY ( int_local_output_buffer )
3302 CALL wrf_message ("frame/module_io_quilt.F: wrf_quilt_iosync: no buffer allocated")
3304 ! CALL end_timing("wrf_quilt_iosync")
3308 END SUBROUTINE wrf_quilt_iosync
3310 SUBROUTINE wrf_quilt_ioclose ( DataHandle, Status )
3312 ! Instruct the I/O quilt servers to close the dataset referenced by
3314 ! This routine also clears the client file handle and, if needed, deallocates
3315 ! int_local_output_buffer.
3316 ! This routine is called only by client (compute) tasks.
3318 #if defined( DM_PARALLEL ) && ! defined( STUBMPI)
3319 USE module_wrf_quilt
3323 #include "intio_tags.h"
3324 INTEGER , INTENT(IN) :: DataHandle
3325 INTEGER , INTENT(OUT) :: Status
3326 INTEGER i, itypesize, tasks_in_group, comm_io_group, ierr
3329 !!JMTIMING CALL start_timing
3330 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_ioclose' )
3331 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3333 ! If we're using pnetcdf then each IO server will need to receive the
3334 ! handle just once as there is
3335 ! no longer a reduce over the IO servers to get it.
3336 #ifdef PNETCDF_QUILT
3337 IF ( compute_group_master(1) )THEN
3338 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
3339 DataHandle, int_ioclose )
3341 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3344 IF ( wrf_dm_on_monitor() ) THEN
3345 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
3346 DataHandle , int_ioclose )
3348 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3352 iserver = get_server_id ( DataHandle )
3353 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3355 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3357 !!JMTIMING CALL start_timing
3358 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3360 #ifdef PNETCDF_QUILT
3361 ! If we're using pnetcdf then each IO server will need the handle as there is
3362 ! no longer a reduce over the IO servers to get it.
3363 IF ( compute_group_master(1) ) reduced(2) = DataHandle
3365 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
3367 CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
3368 !!JMTIMING CALL end_timing("MPI_Reduce in ioclose")
3371 ! send data to the i/o processor
3372 !!JMTIMING CALL start_timing
3373 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
3375 hdrbuf, hdrbufsize , &
3377 !!JMTIMING CALL end_timing("collect_on_comm in io_close")
3380 int_handle_in_use(DataHandle) = .false.
3381 CALL set_server_id( DataHandle, 0 )
3382 okay_to_write(DataHandle) = .false.
3383 okay_to_commit(DataHandle) = .false.
3384 int_local_output_cursor = 1
3385 int_num_bytes_to_write(DataHandle) = 0
3386 IF ( associated ( int_local_output_buffer ) ) THEN
3387 DEALLOCATE ( int_local_output_buffer )
3388 NULLIFY ( int_local_output_buffer )
3392 !!JMTIMING CALL end_timing( "wrf_quilt_ioclose" )
3396 END SUBROUTINE wrf_quilt_ioclose
3398 SUBROUTINE wrf_quilt_ioexit( Status )
3400 ! Instruct the I/O quilt servers to shut down the WRF I/O system.
3401 ! Do not call any wrf_quilt_*() routines after this routine has been called.
3402 ! This routine is called only by client (compute) tasks.
3404 #if defined( DM_PARALLEL ) && ! defined (STUBMPI )
3405 USE module_wrf_quilt
3408 #include "intio_tags.h"
3409 INTEGER , INTENT(OUT) :: Status
3410 INTEGER :: DataHandle, actual_iserver
3411 INTEGER i, itypesize, tasks_in_group, comm_io_group, me, ierr
3414 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_ioexit' )
3415 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3417 !ARPDBG - potential bug. Have no access to what type of IO is being used for
3418 ! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
3419 #ifdef PNETCDF_QUILT
3420 !ARP Send the ioexit message just once to each IOServer when using parallel IO
3421 IF( compute_group_master(1) ) THEN
3422 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
3423 DataHandle, int_ioexit )
3425 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3429 IF ( wrf_dm_on_monitor() ) THEN
3430 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
3431 DataHandle , int_ioexit ) ! Handle is dummy
3433 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3437 DO iserver = 1, nio_groups
3438 if(poll_servers) then
3439 ! We're using server polling mode, so we must call
3440 ! *_find_server to receive the mpi_ssend sent by the servers,
3441 ! otherwise WRF will hang at the mpi_x_reduce below.
3443 call wrf_quilt_find_server(actual_iserver)
3445 ! The actual_iserver is now set to the next available I/O server.
3446 ! That may not be the same as iserver, but that's okay as long
3447 ! as we run through this loop exactly nio_groups times.
3449 ! Not using server polling, so just access servers in numeric order.
3450 actual_iserver=iserver
3452 CALL get_mpi_comm_io_groups( comm_io_group , actual_iserver )
3454 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3455 CALL mpi_comm_rank( comm_io_group , me , ierr )
3457 ! BY SENDING A NEGATIVE SIZE WE GET THE SERVERS TO SHUT DOWN
3460 IF ( me .eq. 0 ) reduced(1) = hdrbufsize
3461 CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
3468 END SUBROUTINE wrf_quilt_ioexit
3470 SUBROUTINE wrf_quilt_get_next_time ( DataHandle, DateStr, Status )
3472 ! Instruct the I/O quilt servers to return the next time stamp.
3473 ! This is not yet supported.
3474 ! This routine is called only by client (compute) tasks.
3476 #if defined( DM_PARALLEL ) && ! defined (STUBMPI)
3478 INTEGER , INTENT(IN) :: DataHandle
3479 CHARACTER*(*) :: DateStr
3483 END SUBROUTINE wrf_quilt_get_next_time
3485 SUBROUTINE wrf_quilt_get_previous_time ( DataHandle, DateStr, Status )
3487 ! Instruct the I/O quilt servers to return the previous time stamp.
3488 ! This is not yet supported.
3489 ! This routine is called only by client (compute) tasks.
3491 #if defined( DM_PARALLEL ) && ! defined (STUBMPI)
3493 INTEGER , INTENT(IN) :: DataHandle
3494 CHARACTER*(*) :: DateStr
3498 END SUBROUTINE wrf_quilt_get_previous_time
3500 SUBROUTINE wrf_quilt_set_time ( DataHandle, Data, Status )
3502 ! Instruct the I/O quilt servers to set the time stamp in the dataset
3503 ! referenced by DataHandle.
3504 ! This routine is called only by client (compute) tasks.
3506 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3507 USE module_wrf_quilt
3508 USE module_state_description, ONLY: IO_PNETCDF
3511 #include "intio_tags.h"
3512 INTEGER , INTENT(IN) :: DataHandle
3513 CHARACTER*(*) , INTENT(IN) :: Data
3515 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
3518 INTEGER, EXTERNAL :: use_package
3520 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_set_time' )
3522 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3523 IF ( int_handle_in_use( DataHandle ) ) THEN
3524 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3525 Count = 0 ! there is no count for character strings
3527 !ARPDBG - potential bug. Have no access to what type of IO is being used for
3528 ! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
3529 #ifdef PNETCDF_QUILT
3530 IF(compute_group_master(1) )THEN
3531 ! Only want to send one time header to each IO server as
3532 ! can't tell that's what they are on the IO servers themselves - therefore use
3533 ! the compute_group_master process.
3534 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
3535 DataHandle, "TIMESTAMP", "", Data, int_set_time )
3537 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3540 IF ( wrf_dm_on_monitor() ) THEN
3541 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
3542 DataHandle, "TIMESTAMP", "", Data, int_set_time )
3544 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3548 iserver = get_server_id ( DataHandle )
3549 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3550 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3552 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3554 reduced(1) = hdrbufsize
3555 #ifdef PNETCDF_QUILT
3556 IF ( compute_group_master(1) ) reduced(2) = DataHandle
3558 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
3560 CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
3561 ! send data to the i/o processor
3562 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
3564 hdrbuf, hdrbufsize , &
3571 END SUBROUTINE wrf_quilt_set_time
3573 SUBROUTINE wrf_quilt_get_next_var ( DataHandle, VarName, Status )
3575 ! When reading, instruct the I/O quilt servers to return the name of the next
3576 ! variable in the current time frame.
3577 ! This is not yet supported.
3578 ! This routine is called only by client (compute) tasks.
3580 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3582 INTEGER , INTENT(IN) :: DataHandle
3583 CHARACTER*(*) :: VarName
3587 END SUBROUTINE wrf_quilt_get_next_var
3589 SUBROUTINE wrf_quilt_get_dom_ti_real ( DataHandle,Element, Data, Count, Outcount, Status )
3591 ! Instruct the I/O quilt servers to attempt to read Count words of time
3592 ! independent domain metadata named "Element"
3593 ! from the open dataset described by DataHandle.
3594 ! Metadata of type real are
3595 ! stored in array Data.
3596 ! Actual number of words read is returned in OutCount.
3597 ! This routine is called only by client (compute) tasks.
3599 ! This is not yet supported.
3601 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3603 INTEGER , INTENT(IN) :: DataHandle
3604 CHARACTER*(*) , INTENT(IN) :: Element
3605 REAL, INTENT(IN) :: Data(*)
3606 INTEGER , INTENT(IN) :: Count
3609 CALL wrf_message('wrf_quilt_get_dom_ti_real not supported yet')
3612 END SUBROUTINE wrf_quilt_get_dom_ti_real
3614 SUBROUTINE wrf_quilt_put_dom_ti_real ( DataHandle,Element, Data, Count, Status )
3616 ! Instruct the I/O quilt servers to write Count words of time independent
3617 ! domain metadata named "Element"
3618 ! to the open dataset described by DataHandle.
3619 ! Metadata of type real are
3620 ! copied from array Data.
3621 ! This routine is called only by client (compute) tasks.
3623 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3624 USE module_wrf_quilt
3627 #include "intio_tags.h"
3628 INTEGER , INTENT(IN) :: DataHandle
3629 CHARACTER*(*) , INTENT(IN) :: Element
3630 REAL , INTENT(IN) :: Data(*)
3631 INTEGER , INTENT(IN) :: Count
3634 CHARACTER*132 :: locElement
3635 INTEGER i, typesize, itypesize, tasks_in_group, ierr, comm_io_group
3638 !!JMTIMING CALL start_timing
3639 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_real' )
3640 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3641 locElement = Element
3643 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3644 IF ( int_handle_in_use( DataHandle ) ) THEN
3645 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3646 CALL MPI_TYPE_SIZE( MPI_REAL, typesize, ierr )
3648 #ifdef PNETCDF_QUILT
3649 IF ( compute_group_master(1) ) THEN
3650 CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
3651 DataHandle, locElement, Data, Count, int_dom_ti_real )
3653 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3656 IF ( wrf_dm_on_monitor() ) THEN
3657 CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
3658 DataHandle, locElement, Data, Count, int_dom_ti_real )
3660 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3664 iserver = get_server_id ( DataHandle )
3665 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3666 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3668 !!JMTIMING CALL start_timing
3669 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3671 reduced(1) = hdrbufsize
3672 #ifdef PNETCDF_QUILT
3673 IF( compute_group_master(1) ) reduced(2) = DataHandle
3675 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
3677 CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
3678 !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_real")
3679 ! send data to the i/o processor
3680 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
3682 hdrbuf, hdrbufsize , &
3688 !!JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_real")
3691 END SUBROUTINE wrf_quilt_put_dom_ti_real
3693 SUBROUTINE wrf_quilt_get_dom_ti_double ( DataHandle,Element, Data, Count, Outcount, Status )
3695 ! Instruct the I/O quilt servers to attempt to read Count words of time
3696 ! independent domain metadata named "Element"
3697 ! from the open dataset described by DataHandle.
3698 ! Metadata of type double are
3699 ! stored in array Data.
3700 ! Actual number of words read is returned in OutCount.
3701 ! This routine is called only by client (compute) tasks.
3703 ! This is not yet supported.
3705 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3707 INTEGER , INTENT(IN) :: DataHandle
3708 CHARACTER*(*) , INTENT(IN) :: Element
3710 INTEGER , INTENT(IN) :: Count
3713 CALL wrf_error_fatal('wrf_quilt_get_dom_ti_double not supported yet')
3716 END SUBROUTINE wrf_quilt_get_dom_ti_double
3718 SUBROUTINE wrf_quilt_put_dom_ti_double ( DataHandle,Element, Data, Count, Status )
3720 ! Instruct the I/O quilt servers to write Count words of time independent
3721 ! domain metadata named "Element"
3722 ! to the open dataset described by DataHandle.
3723 ! Metadata of type double are
3724 ! copied from array Data.
3725 ! This routine is called only by client (compute) tasks.
3727 ! This is not yet supported.
3729 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3731 INTEGER , INTENT(IN) :: DataHandle
3732 CHARACTER*(*) , INTENT(IN) :: Element
3733 REAL*8 , INTENT(IN) :: Data(*)
3734 INTEGER , INTENT(IN) :: Count
3736 CALL wrf_error_fatal('wrf_quilt_put_dom_ti_double not supported yet')
3739 END SUBROUTINE wrf_quilt_put_dom_ti_double
3741 SUBROUTINE wrf_quilt_get_dom_ti_integer ( DataHandle,Element, Data, Count, Outcount, Status )
3743 ! Instruct the I/O quilt servers to attempt to read Count words of time
3744 ! independent domain metadata named "Element"
3745 ! from the open dataset described by DataHandle.
3746 ! Metadata of type integer are
3747 ! stored in array Data.
3748 ! Actual number of words read is returned in OutCount.
3749 ! This routine is called only by client (compute) tasks.
3751 ! This is not yet supported.
3753 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3755 INTEGER , INTENT(IN) :: DataHandle
3756 CHARACTER*(*) , INTENT(IN) :: Element
3758 INTEGER , INTENT(IN) :: Count
3761 CALL wrf_message('wrf_quilt_get_dom_ti_integer not supported yet')
3764 END SUBROUTINE wrf_quilt_get_dom_ti_integer
3766 SUBROUTINE wrf_quilt_put_dom_ti_integer ( DataHandle,Element, Data, Count, Status )
3768 ! Instruct the I/O quilt servers to write Count words of time independent
3769 ! domain metadata named "Element"
3770 ! to the open dataset described by DataHandle.
3771 ! Metadata of type integer are
3772 ! copied from array Data.
3773 ! This routine is called only by client (compute) tasks.
3775 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3776 USE module_wrf_quilt
3777 USE module_state_description, ONLY: IO_PNETCDF
3780 #include "intio_tags.h"
3781 INTEGER , INTENT(IN) :: DataHandle
3782 CHARACTER*(*) , INTENT(IN) :: Element
3783 INTEGER , INTENT(IN) :: Data(*)
3784 INTEGER , INTENT(IN) :: Count
3787 CHARACTER*132 :: locElement
3788 INTEGER i, typesize, itypesize, tasks_in_group, ierr, comm_io_group
3790 INTEGER, EXTERNAL :: use_package
3793 !!JMTIMING CALL start_timing
3794 locElement = Element
3796 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_integer' )
3798 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3799 IF ( int_handle_in_use( DataHandle ) ) THEN
3800 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3801 CALL MPI_TYPE_SIZE( MPI_INTEGER, typesize, ierr )
3803 !ARPDBG - potential bug. Have no access to what type of IO is being used for
3804 ! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
3805 #ifdef PNETCDF_QUILT
3806 IF ( compute_group_master(1) )THEN
3807 CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
3808 DataHandle, locElement, Data, Count, &
3809 int_dom_ti_integer )
3811 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3814 IF ( wrf_dm_on_monitor() ) THEN
3815 CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
3816 DataHandle, locElement, Data, Count, &
3817 int_dom_ti_integer )
3819 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3823 iserver = get_server_id ( DataHandle )
3824 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3825 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3827 !!JMTIMING CALL start_timing
3828 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3830 reduced(1) = hdrbufsize
3831 #ifdef PNETCDF_QUILT
3832 IF ( compute_group_master(1) ) reduced(2) = DataHandle
3834 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
3836 CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
3838 !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_integer")
3839 ! send data to the i/o processor
3840 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
3842 hdrbuf, hdrbufsize , &
3846 CALL wrf_debug ( DEBUG_LVL, 'returning from wrf_quilt_put_dom_ti_integer' )
3847 !!JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_integer" )
3851 END SUBROUTINE wrf_quilt_put_dom_ti_integer
3853 SUBROUTINE wrf_quilt_get_dom_ti_logical ( DataHandle,Element, Data, Count, Outcount, Status )
3855 ! Instruct the I/O quilt servers to attempt to read Count words of time
3856 ! independent domain metadata named "Element"
3857 ! from the open dataset described by DataHandle.
3858 ! Metadata of type logical are
3859 ! stored in array Data.
3860 ! Actual number of words read is returned in OutCount.
3861 ! This routine is called only by client (compute) tasks.
3863 ! This is not yet supported.
3865 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3867 INTEGER , INTENT(IN) :: DataHandle
3868 CHARACTER*(*) , INTENT(IN) :: Element
3870 INTEGER , INTENT(IN) :: Count
3873 ! CALL wrf_message('wrf_quilt_get_dom_ti_logical not supported yet')
3876 END SUBROUTINE wrf_quilt_get_dom_ti_logical
3878 SUBROUTINE wrf_quilt_put_dom_ti_logical ( DataHandle,Element, Data, Count, Status )
3880 ! Instruct the I/O quilt servers to write Count words of time independent
3881 ! domain metadata named "Element"
3882 ! to the open dataset described by DataHandle.
3883 ! Metadata of type logical are
3884 ! copied from array Data.
3885 ! This routine is called only by client (compute) tasks.
3887 ! This is not yet supported.
3889 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3891 INTEGER , INTENT(IN) :: DataHandle
3892 CHARACTER*(*) , INTENT(IN) :: Element
3893 logical , INTENT(IN) :: Data(*)
3894 INTEGER , INTENT(IN) :: Count
3898 INTEGER one_or_zero(Count)
3908 CALL wrf_quilt_put_dom_ti_integer ( DataHandle,Element, one_or_zero, Count, Status )
3911 END SUBROUTINE wrf_quilt_put_dom_ti_logical
3913 SUBROUTINE wrf_quilt_get_dom_ti_char ( DataHandle,Element, Data, Status )
3915 ! Instruct the I/O quilt servers to attempt to read time independent
3916 ! domain metadata named "Element"
3917 ! from the open dataset described by DataHandle.
3918 ! Metadata of type char are
3919 ! stored in string Data.
3920 ! This routine is called only by client (compute) tasks.
3922 ! This is not yet supported.
3924 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3926 INTEGER , INTENT(IN) :: DataHandle
3927 CHARACTER*(*) , INTENT(IN) :: Element
3928 CHARACTER*(*) :: Data
3930 CALL wrf_message('wrf_quilt_get_dom_ti_char not supported yet')
3933 END SUBROUTINE wrf_quilt_get_dom_ti_char
3935 SUBROUTINE wrf_quilt_put_dom_ti_char ( DataHandle, Element, Data, Status )
3937 ! Instruct the I/O quilt servers to write time independent
3938 ! domain metadata named "Element"
3939 ! to the open dataset described by DataHandle.
3940 ! Metadata of type char are
3941 ! copied from string Data.
3942 ! This routine is called only by client (compute) tasks.
3944 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3945 USE module_wrf_quilt
3948 #include "intio_tags.h"
3949 INTEGER , INTENT(IN) :: DataHandle
3950 CHARACTER*(*) , INTENT(IN) :: Element
3951 CHARACTER*(*) , INTENT(IN) :: Data
3953 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group, me
3956 !!JMTIMING CALL start_timing
3957 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_char' )
3959 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3960 IF ( int_handle_in_use( DataHandle ) ) THEN
3961 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3963 !ARPDBG - potential bug. Have no access to what type of IO is being used for
3964 ! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
3965 #ifdef PNETCDF_QUILT
3966 IF(compute_group_master(1))THEN
3967 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
3968 DataHandle, Element, "", Data, &
3971 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3974 IF ( wrf_dm_on_monitor() ) THEN
3975 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
3976 DataHandle, Element, "", Data, int_dom_ti_char )
3978 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3982 iserver = get_server_id ( DataHandle )
3983 ! write(0,*)'wrf_quilt_put_dom_ti_char ',iserver
3984 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3985 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3986 ! send the size of my local buffer to the i/o task (obufsize doesnt mean anything here)
3987 !!JMTIMING! CALL start_timing
3988 !write(0,*)'calling MPI_Barrier'
3989 ! CALL MPI_Barrier( mpi_comm_local, ierr )
3990 !write(0,*)'back from MPI_Barrier'
3991 !!JMTIMING! CALL end_timing("MPI_Barrier in wrf_quilt_put_dom_ti_char")
3993 !!JMTIMING CALL start_timing
3994 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3997 reduced(1) = hdrbufsize
3998 #ifdef PNETCDF_QUILT
3999 IF(compute_group_master(1)) reduced(2) = DataHandle
4001 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
4003 !call mpi_comm_rank( comm_io_group , me, ierr )
4005 CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
4007 !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_char")
4008 ! send data to the i/o processor
4009 !!JMTIMING CALL start_timing
4011 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
4013 hdrbuf, hdrbufsize , &
4015 !!JMTIMING CALL end_timing("collect_on_comm in wrf_quilt_put_dom_ti_char")
4018 !!JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_char")
4022 END SUBROUTINE wrf_quilt_put_dom_ti_char
4024 SUBROUTINE wrf_quilt_get_dom_td_real ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
4026 ! Instruct the I/O quilt servers to attempt to read Count words of time
4027 ! dependent domain metadata named "Element" valid at time DateStr
4028 ! from the open dataset described by DataHandle.
4029 ! Metadata of type real are
4030 ! stored in array Data.
4031 ! Actual number of words read is returned in OutCount.
4032 ! This routine is called only by client (compute) tasks.
4034 ! This is not yet supported.
4036 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4038 INTEGER , INTENT(IN) :: DataHandle
4039 CHARACTER*(*) , INTENT(IN) :: Element
4040 CHARACTER*(*) , INTENT(IN) :: DateStr
4042 INTEGER , INTENT(IN) :: Count
4047 END SUBROUTINE wrf_quilt_get_dom_td_real
4049 SUBROUTINE wrf_quilt_put_dom_td_real ( DataHandle,Element, DateStr, Data, Count, Status )
4051 ! Instruct the I/O quilt servers to write Count words of time dependent
4052 ! domain metadata named "Element" valid at time DateStr
4053 ! to the open dataset described by DataHandle.
4054 ! Metadata of type real are
4055 ! copied from array Data.
4056 ! This routine is called only by client (compute) tasks.
4058 ! This is not yet supported.
4060 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4062 INTEGER , INTENT(IN) :: DataHandle
4063 CHARACTER*(*) , INTENT(IN) :: Element
4064 CHARACTER*(*) , INTENT(IN) :: DateStr
4065 real , INTENT(IN) :: Data(*)
4066 INTEGER , INTENT(IN) :: Count
4070 END SUBROUTINE wrf_quilt_put_dom_td_real
4072 SUBROUTINE wrf_quilt_get_dom_td_double ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
4074 ! Instruct the I/O quilt servers to attempt to read Count words of time
4075 ! dependent domain metadata named "Element" valid at time DateStr
4076 ! from the open dataset described by DataHandle.
4077 ! Metadata of type double are
4078 ! stored in array Data.
4079 ! Actual number of words read is returned in OutCount.
4080 ! This routine is called only by client (compute) tasks.
4082 ! This is not yet supported.
4084 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4086 INTEGER , INTENT(IN) :: DataHandle
4087 CHARACTER*(*) , INTENT(IN) :: Element
4088 CHARACTER*(*) , INTENT(IN) :: DateStr
4090 INTEGER , INTENT(IN) :: Count
4094 CALL wrf_error_fatal('wrf_quilt_get_dom_td_double not supported yet')
4096 END SUBROUTINE wrf_quilt_get_dom_td_double
4098 SUBROUTINE wrf_quilt_put_dom_td_double ( DataHandle,Element, DateStr, Data, Count, Status )
4100 ! Instruct the I/O quilt servers to write Count words of time dependent
4101 ! domain metadata named "Element" valid at time DateStr
4102 ! to the open dataset described by DataHandle.
4103 ! Metadata of type double are
4104 ! copied from array Data.
4105 ! This routine is called only by client (compute) tasks.
4107 ! This is not yet supported.
4109 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4111 INTEGER , INTENT(IN) :: DataHandle
4112 CHARACTER*(*) , INTENT(IN) :: Element
4113 CHARACTER*(*) , INTENT(IN) :: DateStr
4114 real*8 , INTENT(IN) :: Data(*)
4115 INTEGER , INTENT(IN) :: Count
4118 CALL wrf_error_fatal('wrf_quilt_put_dom_td_double not supported yet')
4120 END SUBROUTINE wrf_quilt_put_dom_td_double
4122 SUBROUTINE wrf_quilt_get_dom_td_integer ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
4124 ! Instruct the I/O quilt servers to attempt to read Count words of time
4125 ! dependent domain metadata named "Element" valid at time DateStr
4126 ! from the open dataset described by DataHandle.
4127 ! Metadata of type integer are
4128 ! stored in array Data.
4129 ! Actual number of words read is returned in OutCount.
4130 ! This routine is called only by client (compute) tasks.
4132 ! This is not yet supported.
4134 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4136 INTEGER , INTENT(IN) :: DataHandle
4137 CHARACTER*(*) , INTENT(IN) :: Element
4138 CHARACTER*(*) , INTENT(IN) :: DateStr
4140 INTEGER , INTENT(IN) :: Count
4145 END SUBROUTINE wrf_quilt_get_dom_td_integer
4147 SUBROUTINE wrf_quilt_put_dom_td_integer ( DataHandle,Element, DateStr, Data, Count, Status )
4149 ! Instruct the I/O quilt servers to write Count words of time dependent
4150 ! domain metadata named "Element" valid at time DateStr
4151 ! to the open dataset described by DataHandle.
4152 ! Metadata of type integer are
4153 ! copied from array Data.
4154 ! This routine is called only by client (compute) tasks.
4156 ! This is not yet supported.
4158 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4160 INTEGER , INTENT(IN) :: DataHandle
4161 CHARACTER*(*) , INTENT(IN) :: Element
4162 CHARACTER*(*) , INTENT(IN) :: DateStr
4163 integer , INTENT(IN) :: Data(*)
4164 INTEGER , INTENT(IN) :: Count
4168 END SUBROUTINE wrf_quilt_put_dom_td_integer
4170 SUBROUTINE wrf_quilt_get_dom_td_logical ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
4172 ! Instruct the I/O quilt servers to attempt to read Count words of time
4173 ! dependent domain metadata named "Element" valid at time DateStr
4174 ! from the open dataset described by DataHandle.
4175 ! Metadata of type logical are
4176 ! stored in array Data.
4177 ! Actual number of words read is returned in OutCount.
4178 ! This routine is called only by client (compute) tasks.
4180 ! This is not yet supported.
4182 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4184 INTEGER , INTENT(IN) :: DataHandle
4185 CHARACTER*(*) , INTENT(IN) :: Element
4186 CHARACTER*(*) , INTENT(IN) :: DateStr
4188 INTEGER , INTENT(IN) :: Count
4193 END SUBROUTINE wrf_quilt_get_dom_td_logical
4195 SUBROUTINE wrf_quilt_put_dom_td_logical ( DataHandle,Element, DateStr, Data, Count, Status )
4197 ! Instruct the I/O quilt servers to write Count words of time dependent
4198 ! domain metadata named "Element" valid at time DateStr
4199 ! to the open dataset described by DataHandle.
4200 ! Metadata of type logical are
4201 ! copied from array Data.
4202 ! This routine is called only by client (compute) tasks.
4204 ! This is not yet supported.
4206 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4208 INTEGER , INTENT(IN) :: DataHandle
4209 CHARACTER*(*) , INTENT(IN) :: Element
4210 CHARACTER*(*) , INTENT(IN) :: DateStr
4211 logical , INTENT(IN) :: Data(*)
4212 INTEGER , INTENT(IN) :: Count
4216 END SUBROUTINE wrf_quilt_put_dom_td_logical
4218 SUBROUTINE wrf_quilt_get_dom_td_char ( DataHandle,Element, DateStr, Data, Status )
4220 ! Instruct the I/O quilt servers to attempt to read time dependent
4221 ! domain metadata named "Element" valid at time DateStr
4222 ! from the open dataset described by DataHandle.
4223 ! Metadata of type char are
4224 ! stored in string Data.
4225 ! This routine is called only by client (compute) tasks.
4227 ! This is not yet supported.
4229 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4231 INTEGER , INTENT(IN) :: DataHandle
4232 CHARACTER*(*) , INTENT(IN) :: Element
4233 CHARACTER*(*) , INTENT(IN) :: DateStr
4234 CHARACTER*(*) :: Data
4238 END SUBROUTINE wrf_quilt_get_dom_td_char
4240 SUBROUTINE wrf_quilt_put_dom_td_char ( DataHandle,Element, DateStr, Data, Status )
4242 ! Instruct $he I/O quilt servers to write time dependent
4243 ! domain metadata named "Element" valid at time DateStr
4244 ! to the open dataset described by DataHandle.
4245 ! Metadata of type char are
4246 ! copied from string Data.
4247 ! This routine is called only by client (compute) tasks.
4249 ! This is not yet supported.
4251 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4253 INTEGER , INTENT(IN) :: DataHandle
4254 CHARACTER*(*) , INTENT(IN) :: Element
4255 CHARACTER*(*) , INTENT(IN) :: DateStr
4256 CHARACTER*(*) , INTENT(IN) :: Data
4260 END SUBROUTINE wrf_quilt_put_dom_td_char
4262 SUBROUTINE wrf_quilt_get_var_ti_real ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
4264 ! Instruct the I/O quilt servers to attempt to read Count words of time
4265 ! independent attribute "Element" of variable "Varname"
4266 ! from the open dataset described by DataHandle.
4267 ! Attribute of type real is
4268 ! stored in array Data.
4269 ! Actual number of words read is returned in OutCount.
4270 ! This routine is called only by client (compute) tasks.
4272 ! This is not yet supported.
4274 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4276 INTEGER , INTENT(IN) :: DataHandle
4277 CHARACTER*(*) , INTENT(IN) :: Element
4278 CHARACTER*(*) , INTENT(IN) :: VarName
4280 INTEGER , INTENT(IN) :: Count
4285 END SUBROUTINE wrf_quilt_get_var_ti_real
4287 SUBROUTINE wrf_quilt_put_var_ti_real ( DataHandle,Element, Varname, Data, Count, Status )
4289 ! Instruct the I/O quilt servers to write Count words of time independent
4290 ! attribute "Element" of variable "Varname"
4291 ! to the open dataset described by DataHandle.
4292 ! Attribute of type real is
4293 ! copied from array Data.
4294 ! This routine is called only by client (compute) tasks.
4296 ! This is not yet supported.
4298 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4300 INTEGER , INTENT(IN) :: DataHandle
4301 CHARACTER*(*) , INTENT(IN) :: Element
4302 CHARACTER*(*) , INTENT(IN) :: VarName
4303 real , INTENT(IN) :: Data(*)
4304 INTEGER , INTENT(IN) :: Count
4308 END SUBROUTINE wrf_quilt_put_var_ti_real
4310 SUBROUTINE wrf_quilt_get_var_ti_double ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
4312 ! Instruct the I/O quilt servers to attempt to read Count words of time
4313 ! independent attribute "Element" of variable "Varname"
4314 ! from the open dataset described by DataHandle.
4315 ! Attribute of type double is
4316 ! stored in array Data.
4317 ! Actual number of words read is returned in OutCount.
4318 ! This routine is called only by client (compute) tasks.
4320 ! This is not yet supported.
4322 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4324 INTEGER , INTENT(IN) :: DataHandle
4325 CHARACTER*(*) , INTENT(IN) :: Element
4326 CHARACTER*(*) , INTENT(IN) :: VarName
4328 INTEGER , INTENT(IN) :: Count
4332 CALL wrf_error_fatal('wrf_quilt_get_var_ti_double not supported yet')
4334 END SUBROUTINE wrf_quilt_get_var_ti_double
4336 SUBROUTINE wrf_quilt_put_var_ti_double ( DataHandle,Element, Varname, Data, Count, Status )
4338 ! Instruct the I/O quilt servers to write Count words of time independent
4339 ! attribute "Element" of variable "Varname"
4340 ! to the open dataset described by DataHandle.
4341 ! Attribute of type double is
4342 ! copied from array Data.
4343 ! This routine is called only by client (compute) tasks.
4345 ! This is not yet supported.
4347 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4349 INTEGER , INTENT(IN) :: DataHandle
4350 CHARACTER*(*) , INTENT(IN) :: Element
4351 CHARACTER*(*) , INTENT(IN) :: VarName
4352 real*8 , INTENT(IN) :: Data(*)
4353 INTEGER , INTENT(IN) :: Count
4356 CALL wrf_error_fatal('wrf_quilt_put_var_ti_double not supported yet')
4358 END SUBROUTINE wrf_quilt_put_var_ti_double
4360 SUBROUTINE wrf_quilt_get_var_ti_integer ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
4362 ! Instruct the I/O quilt servers to attempt to read Count words of time
4363 ! independent attribute "Element" of variable "Varname"
4364 ! from the open dataset described by DataHandle.
4365 ! Attribute of type integer is
4366 ! stored in array Data.
4367 ! Actual number of words read is returned in OutCount.
4368 ! This routine is called only by client (compute) tasks.
4370 ! This is not yet supported.
4372 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4374 INTEGER , INTENT(IN) :: DataHandle
4375 CHARACTER*(*) , INTENT(IN) :: Element
4376 CHARACTER*(*) , INTENT(IN) :: VarName
4378 INTEGER , INTENT(IN) :: Count
4383 END SUBROUTINE wrf_quilt_get_var_ti_integer
4385 SUBROUTINE wrf_quilt_put_var_ti_integer ( DataHandle,Element, Varname, Data, Count, Status )
4387 ! Instruct the I/O quilt servers to write Count words of time independent
4388 ! attribute "Element" of variable "Varname"
4389 ! to the open dataset described by DataHandle.
4390 ! Attribute of type integer is
4391 ! copied from array Data.
4392 ! This routine is called only by client (compute) tasks.
4394 ! This is not yet supported.
4396 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4398 INTEGER , INTENT(IN) :: DataHandle
4399 CHARACTER*(*) , INTENT(IN) :: Element
4400 CHARACTER*(*) , INTENT(IN) :: VarName
4401 integer , INTENT(IN) :: Data(*)
4402 INTEGER , INTENT(IN) :: Count
4406 END SUBROUTINE wrf_quilt_put_var_ti_integer
4408 SUBROUTINE wrf_quilt_get_var_ti_logical ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
4410 ! Instruct the I/O quilt servers to attempt to read Count words of time
4411 ! independent attribute "Element" of variable "Varname"
4412 ! from the open dataset described by DataHandle.
4413 ! Attribute of type logical is
4414 ! stored in array Data.
4415 ! Actual number of words read is returned in OutCount.
4416 ! This routine is called only by client (compute) tasks.
4418 ! This is not yet supported.
4420 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4422 INTEGER , INTENT(IN) :: DataHandle
4423 CHARACTER*(*) , INTENT(IN) :: Element
4424 CHARACTER*(*) , INTENT(IN) :: VarName
4426 INTEGER , INTENT(IN) :: Count
4431 END SUBROUTINE wrf_quilt_get_var_ti_logical
4433 SUBROUTINE wrf_quilt_put_var_ti_logical ( DataHandle,Element, Varname, Data, Count, Status )
4435 ! Instruct the I/O quilt servers to write Count words of time independent
4436 ! attribute "Element" of variable "Varname"
4437 ! to the open dataset described by DataHandle.
4438 ! Attribute of type logical is
4439 ! copied from array Data.
4440 ! This routine is called only by client (compute) tasks.
4442 ! This is not yet supported.
4444 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4446 INTEGER , INTENT(IN) :: DataHandle
4447 CHARACTER*(*) , INTENT(IN) :: Element
4448 CHARACTER*(*) , INTENT(IN) :: VarName
4449 logical , INTENT(IN) :: Data(*)
4450 INTEGER , INTENT(IN) :: Count
4454 END SUBROUTINE wrf_quilt_put_var_ti_logical
4456 SUBROUTINE wrf_quilt_get_var_ti_char ( DataHandle,Element, Varname, Data, Status )
4458 ! Instruct the I/O quilt servers to attempt to read time independent
4459 ! attribute "Element" of variable "Varname"
4460 ! from the open dataset described by DataHandle.
4461 ! Attribute of type char is
4462 ! stored in string Data.
4463 ! This routine is called only by client (compute) tasks.
4465 ! This is not yet supported.
4467 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4469 INTEGER , INTENT(IN) :: DataHandle
4470 CHARACTER*(*) , INTENT(IN) :: Element
4471 CHARACTER*(*) , INTENT(IN) :: VarName
4472 CHARACTER*(*) :: Data
4476 END SUBROUTINE wrf_quilt_get_var_ti_char
4478 SUBROUTINE wrf_quilt_put_var_ti_char ( DataHandle,Element, Varname, Data, Status )
4480 ! Instruct the I/O quilt servers to write time independent
4481 ! attribute "Element" of variable "Varname"
4482 ! to the open dataset described by DataHandle.
4483 ! Attribute of type char is
4484 ! copied from string Data.
4485 ! This routine is called only by client (compute) tasks.
4488 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4489 USE module_wrf_quilt
4492 #include "intio_tags.h"
4493 INTEGER , INTENT(IN) :: DataHandle
4494 CHARACTER*(*) , INTENT(IN) :: Element
4495 CHARACTER*(*) , INTENT(IN) :: VarName
4496 CHARACTER*(*) , INTENT(IN) :: Data
4498 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
4502 !!JMTIMING CALL start_timing
4503 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_var_ti_char' )
4505 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
4506 IF ( int_handle_in_use( DataHandle ) ) THEN
4507 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
4509 #ifdef PNETCDF_QUILT
4510 IF ( compute_group_master(1) ) THEN
4511 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
4512 DataHandle, TRIM(Element), &
4513 TRIM(VarName), TRIM(Data), int_var_ti_char )
4515 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
4518 IF ( wrf_dm_on_monitor() ) THEN
4519 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
4520 DataHandle, TRIM(Element), &
4521 TRIM(VarName), TRIM(Data), int_var_ti_char )
4523 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
4527 iserver = get_server_id ( DataHandle )
4528 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
4529 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
4531 !!JMTIMING CALL start_timing
4532 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
4534 reduced(1) = hdrbufsize
4535 #ifdef PNETCDF_QUILT
4536 IF ( compute_group_master(1) ) reduced(2) = DataHandle
4538 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
4540 CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
4541 !!JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_var_ti_char")
4542 ! send data to the i/o processor
4543 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
4545 hdrbuf, hdrbufsize , &
4549 !!JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_char" )
4553 END SUBROUTINE wrf_quilt_put_var_ti_char
4555 SUBROUTINE wrf_quilt_get_var_td_real ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount, Status )
4557 ! Instruct the I/O quilt servers to attempt to read Count words of time
4558 ! dependent attribute "Element" of variable "Varname" valid at time DateStr
4559 ! from the open dataset described by DataHandle.
4560 ! Attribute of type real is
4561 ! stored in array Data.
4562 ! Actual number of words read is returned in OutCount.
4563 ! This routine is called only by client (compute) tasks.
4565 ! This is not yet supported.
4567 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4569 INTEGER , INTENT(IN) :: DataHandle
4570 CHARACTER*(*) , INTENT(IN) :: Element
4571 CHARACTER*(*) , INTENT(IN) :: DateStr
4572 CHARACTER*(*) , INTENT(IN) :: VarName
4574 INTEGER , INTENT(IN) :: Count
4579 END SUBROUTINE wrf_quilt_get_var_td_real
4581 SUBROUTINE wrf_quilt_put_var_td_real ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
4583 ! Instruct the I/O quilt servers to write Count words of time dependent
4584 ! attribute "Element" of variable "Varname" valid at time DateStr
4585 ! to the open dataset described by DataHandle.
4586 ! Attribute of type real is
4587 ! copied from array Data.
4588 ! This routine is called only by client (compute) tasks.
4590 ! This is not yet supported.
4592 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4594 INTEGER , INTENT(IN) :: DataHandle
4595 CHARACTER*(*) , INTENT(IN) :: Element
4596 CHARACTER*(*) , INTENT(IN) :: DateStr
4597 CHARACTER*(*) , INTENT(IN) :: VarName
4598 real , INTENT(IN) :: Data(*)
4599 INTEGER , INTENT(IN) :: Count
4603 END SUBROUTINE wrf_quilt_put_var_td_real
4605 SUBROUTINE wrf_quilt_get_var_td_double ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount, Status )
4607 ! Instruct the I/O quilt servers to attempt to read Count words of time
4608 ! dependent attribute "Element" of variable "Varname" valid at time DateStr
4609 ! from the open dataset described by DataHandle.
4610 ! Attribute of type double is
4611 ! stored in array Data.
4612 ! Actual number of words read is returned in OutCount.
4613 ! This routine is called only by client (compute) tasks.
4615 ! This is not yet supported.
4617 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4619 INTEGER , INTENT(IN) :: DataHandle
4620 CHARACTER*(*) , INTENT(IN) :: Element
4621 CHARACTER*(*) , INTENT(IN) :: DateStr
4622 CHARACTER*(*) , INTENT(IN) :: VarName
4624 INTEGER , INTENT(IN) :: Count
4628 CALL wrf_error_fatal('wrf_quilt_get_var_td_double not supported yet')
4630 END SUBROUTINE wrf_quilt_get_var_td_double
4632 SUBROUTINE wrf_quilt_put_var_td_double ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
4634 ! Instruct the I/O quilt servers to write Count words of time dependent
4635 ! attribute "Element" of variable "Varname" valid at time DateStr
4636 ! to the open dataset described by DataHandle.
4637 ! Attribute of type double is
4638 ! copied from array Data.
4639 ! This routine is called only by client (compute) tasks.
4641 ! This is not yet supported.
4643 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4645 INTEGER , INTENT(IN) :: DataHandle
4646 CHARACTER*(*) , INTENT(IN) :: Element
4647 CHARACTER*(*) , INTENT(IN) :: DateStr
4648 CHARACTER*(*) , INTENT(IN) :: VarName
4649 real*8 , INTENT(IN) :: Data(*)
4650 INTEGER , INTENT(IN) :: Count
4653 CALL wrf_error_fatal('wrf_quilt_put_var_td_double not supported yet')
4655 END SUBROUTINE wrf_quilt_put_var_td_double
4657 SUBROUTINE wrf_quilt_get_var_td_integer ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount,Status)
4659 ! Instruct the I/O quilt servers to attempt to read Count words of time
4660 ! dependent attribute "Element" of variable "Varname" valid at time DateStr
4661 ! from the open dataset described by DataHandle.
4662 ! Attribute of type integer is
4663 ! stored in array Data.
4664 ! Actual number of words read is returned in OutCount.
4665 ! This routine is called only by client (compute) tasks.
4667 ! This is not yet supported.
4669 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4671 INTEGER , INTENT(IN) :: DataHandle
4672 CHARACTER*(*) , INTENT(IN) :: Element
4673 CHARACTER*(*) , INTENT(IN) :: DateStr
4674 CHARACTER*(*) , INTENT(IN) :: VarName
4676 INTEGER , INTENT(IN) :: Count
4681 END SUBROUTINE wrf_quilt_get_var_td_integer
4683 SUBROUTINE wrf_quilt_put_var_td_integer ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
4685 ! Instruct the I/O quilt servers to write Count words of time dependent
4686 ! attribute "Element" of variable "Varname" valid at time DateStr
4687 ! to the open dataset described by DataHandle.
4688 ! Attribute of type integer is
4689 ! copied from array Data.
4690 ! This routine is called only by client (compute) tasks.
4692 ! This is not yet supported.
4694 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4696 INTEGER , INTENT(IN) :: DataHandle
4697 CHARACTER*(*) , INTENT(IN) :: Element
4698 CHARACTER*(*) , INTENT(IN) :: DateStr
4699 CHARACTER*(*) , INTENT(IN) :: VarName
4700 integer , INTENT(IN) :: Data(*)
4701 INTEGER , INTENT(IN) :: Count
4705 END SUBROUTINE wrf_quilt_put_var_td_integer
4707 SUBROUTINE wrf_quilt_get_var_td_logical ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount, Status )
4709 ! Instruct the I/O quilt servers to attempt to read Count words of time
4710 ! dependent attribute "Element" of variable "Varname" valid at time DateStr
4711 ! from the open dataset described by DataHandle.
4712 ! Attribute of type logical is
4713 ! stored in array Data.
4714 ! Actual number of words read is returned in OutCount.
4715 ! This routine is called only by client (compute) tasks.
4717 ! This is not yet supported.
4719 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4721 INTEGER , INTENT(IN) :: DataHandle
4722 CHARACTER*(*) , INTENT(IN) :: Element
4723 CHARACTER*(*) , INTENT(IN) :: DateStr
4724 CHARACTER*(*) , INTENT(IN) :: VarName
4726 INTEGER , INTENT(IN) :: Count
4731 END SUBROUTINE wrf_quilt_get_var_td_logical
4733 SUBROUTINE wrf_quilt_put_var_td_logical ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
4735 ! Instruct the I/O quilt servers to write Count words of time dependent
4736 ! attribute "Element" of variable "Varname" valid at time DateStr
4737 ! to the open dataset described by DataHandle.
4738 ! Attribute of type logical is
4739 ! copied from array Data.
4740 ! This routine is called only by client (compute) tasks.
4742 ! This is not yet supported.
4744 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4746 INTEGER , INTENT(IN) :: DataHandle
4747 CHARACTER*(*) , INTENT(IN) :: Element
4748 CHARACTER*(*) , INTENT(IN) :: DateStr
4749 CHARACTER*(*) , INTENT(IN) :: VarName
4750 logical , INTENT(IN) :: Data(*)
4751 INTEGER , INTENT(IN) :: Count
4755 END SUBROUTINE wrf_quilt_put_var_td_logical
4757 SUBROUTINE wrf_quilt_get_var_td_char ( DataHandle,Element, DateStr,Varname, Data, Status )
4759 ! Instruct the I/O quilt servers to attempt to read time dependent
4760 ! attribute "Element" of variable "Varname" valid at time DateStr
4761 ! from the open dataset described by DataHandle.
4762 ! Attribute of type char is
4763 ! stored in string Data.
4764 ! This routine is called only by client (compute) tasks.
4766 ! This is not yet supported.
4768 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4770 INTEGER , INTENT(IN) :: DataHandle
4771 CHARACTER*(*) , INTENT(IN) :: Element
4772 CHARACTER*(*) , INTENT(IN) :: DateStr
4773 CHARACTER*(*) , INTENT(IN) :: VarName
4774 CHARACTER*(*) :: Data
4778 END SUBROUTINE wrf_quilt_get_var_td_char
4780 SUBROUTINE wrf_quilt_put_var_td_char ( DataHandle,Element, DateStr,Varname, Data, Status )
4782 ! Instruct the I/O quilt servers to write time dependent
4783 ! attribute "Element" of variable "Varname" valid at time DateStr
4784 ! to the open dataset described by DataHandle.
4785 ! Attribute of type char is
4786 ! copied from string Data.
4787 ! This routine is called only by client (compute) tasks.
4789 ! This is not yet supported.
4791 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4793 INTEGER , INTENT(IN) :: DataHandle
4794 CHARACTER*(*) , INTENT(IN) :: Element
4795 CHARACTER*(*) , INTENT(IN) :: DateStr
4796 CHARACTER*(*) , INTENT(IN) :: VarName
4797 CHARACTER*(*) , INTENT(IN) :: Data
4801 END SUBROUTINE wrf_quilt_put_var_td_char
4803 SUBROUTINE wrf_quilt_read_field ( DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
4804 DomainDesc , MemoryOrder , Stagger , DimNames , &
4805 DomainStart , DomainEnd , &
4806 MemoryStart , MemoryEnd , &
4807 PatchStart , PatchEnd , &
4810 ! Instruct the I/O quilt servers to read the variable named VarName from the
4811 ! dataset pointed to by DataHandle.
4812 ! This routine is called only by client (compute) tasks.
4814 ! This is not yet supported.
4816 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4818 INTEGER , INTENT(IN) :: DataHandle
4819 CHARACTER*(*) , INTENT(INOUT) :: DateStr
4820 CHARACTER*(*) , INTENT(INOUT) :: VarName
4821 INTEGER , INTENT(INOUT) :: Field(*)
4822 integer ,intent(in) :: FieldType
4823 integer ,intent(inout) :: Comm
4824 integer ,intent(inout) :: IOComm
4825 integer ,intent(in) :: DomainDesc
4826 character*(*) ,intent(in) :: MemoryOrder
4827 character*(*) ,intent(in) :: Stagger
4828 character*(*) , dimension (*) ,intent(in) :: DimNames
4829 integer ,dimension(*) ,intent(in) :: DomainStart, DomainEnd
4830 integer ,dimension(*) ,intent(in) :: MemoryStart, MemoryEnd
4831 integer ,dimension(*) ,intent(in) :: PatchStart, PatchEnd
4832 integer ,intent(out) :: Status
4836 END SUBROUTINE wrf_quilt_read_field
4838 SUBROUTINE wrf_quilt_write_field ( DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
4839 DomainDesc , MemoryOrder , Stagger , DimNames , &
4840 DomainStart , DomainEnd , &
4841 MemoryStart , MemoryEnd , &
4842 PatchStart , PatchEnd , &
4845 ! Prepare instructions for the I/O quilt servers to write the variable named
4846 ! VarName to the dataset pointed to by DataHandle.
4848 ! During a "training" write this routine accumulates number and sizes of
4849 ! messages that will be sent to the I/O server associated with this compute
4852 ! During a "real" write, this routine begins by allocating
4853 ! int_local_output_buffer if it has not already been allocated. Sizes
4854 ! accumulated during "training" are used to determine how big
4855 ! int_local_output_buffer must be. This routine then stores "int_field"
4856 ! headers and associated field data in int_local_output_buffer. The contents
4857 ! of int_local_output_buffer are actually sent to the I/O quilt server in
4858 ! routine wrf_quilt_iosync(). This scheme allows output of multiple variables
4859 ! to be aggregated into a single "iosync" operation.
4860 ! This routine is called only by client (compute) tasks.
4862 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
4863 USE module_state_description
4864 USE module_wrf_quilt
4867 #include "wrf_io_flags.h"
4868 INTEGER , INTENT(IN) :: DataHandle
4869 CHARACTER*(*) , INTENT(IN) :: DateStr
4870 CHARACTER*(*) , INTENT(IN) :: VarName
4871 ! INTEGER , INTENT(IN) :: Field(*)
4872 integer ,intent(in) :: FieldType
4873 integer ,intent(inout) :: Comm
4874 integer ,intent(inout) :: IOComm
4875 integer ,intent(in) :: DomainDesc
4876 character*(*) ,intent(in) :: MemoryOrder
4877 character*(*) ,intent(in) :: Stagger
4878 character*(*) , dimension (*) ,intent(in) :: DimNames
4879 integer ,dimension(*) ,intent(in) :: DomainStart, DomainEnd
4880 integer ,dimension(*) ,intent(in) :: MemoryStart, MemoryEnd
4881 integer ,dimension(*) ,intent(in) :: PatchStart, PatchEnd
4882 integer ,intent(out) :: Status
4884 integer ii,jj,kk,myrank
4886 REAL, DIMENSION( MemoryStart(1):MemoryEnd(1), &
4887 MemoryStart(2):MemoryEnd(2), &
4888 MemoryStart(3):MemoryEnd(3) ) :: Field
4889 INTEGER locsize , typesize, itypesize
4890 INTEGER ierr, tasks_in_group, comm_io_group, dummy, i
4891 INTEGER, EXTERNAL :: use_package
4893 !!ARPTIMING CALL start_timing
4894 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_write_field' )
4896 IF ( .NOT. (DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles) ) THEN
4897 CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: invalid data handle" )
4899 IF ( .NOT. int_handle_in_use( DataHandle ) ) THEN
4900 CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: DataHandle not opened" )
4903 locsize = (PatchEnd(1)-PatchStart(1)+1)* &
4904 (PatchEnd(2)-PatchStart(2)+1)* &
4905 (PatchEnd(3)-PatchStart(3)+1)
4907 CALL mpi_type_size( MPI_INTEGER, itypesize, ierr )
4908 ! Note that the WRF_DOUBLE branch of this IF statement must come first since
4909 ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds.
4910 IF ( FieldType .EQ. WRF_DOUBLE ) THEN
4911 CALL mpi_type_size( MPI_DOUBLE_PRECISION, typesize, ierr )
4912 ELSE IF ( FieldType .EQ. WRF_FLOAT ) THEN
4913 CALL mpi_type_size( MPI_REAL, typesize, ierr )
4914 ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
4915 CALL mpi_type_size( MPI_INTEGER, typesize, ierr )
4916 ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
4917 CALL mpi_type_size( MPI_LOGICAL, typesize, ierr )
4920 IF ( .NOT. okay_to_write( DataHandle ) ) THEN
4922 ! This is a "training" write.
4923 ! it is not okay to actually write; what we do here is just "bookkeep": count up
4924 ! the number and size of messages that we will output to io server associated with
4927 CALL int_gen_write_field_header ( hdrbuf, hdrbufsize, itypesize, typesize, &
4928 DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
4929 333933 , MemoryOrder , Stagger , DimNames , & ! 333933 means training; magic number
4930 DomainStart , DomainEnd , &
4931 MemoryStart , MemoryEnd , &
4932 PatchStart , PatchEnd )
4934 int_num_bytes_to_write(DataHandle) = int_num_bytes_to_write(DataHandle) + locsize * typesize + hdrbufsize
4936 ! Send the hdr for the write in case the interface is calling the I/O API in "learn" mode
4938 iserver = get_server_id ( DataHandle )
4939 !JMDEBUGwrite(0,*)'wrf_quilt_write_field (dryrun) ',iserver
4940 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
4941 ! send the size of my local buffer to the i/o task (obufsize doesnt mean anything here)
4943 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
4946 IF ( .NOT. wrf_dm_on_monitor() ) THEN ! only one task in compute grid sends this message; send noops on others
4947 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
4952 !!ARPTIMING CALL start_timing
4953 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
4955 reduced(1) = hdrbufsize
4956 #ifdef PNETCDF_QUILT
4957 IF ( compute_group_master(1) ) reduced(2) = DataHandle
4959 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
4961 CALL mpi_x_reduce( reduced, reduced_dummy, 2, MPI_INTEGER, MPI_SUM, tasks_in_group-1, comm_io_group, ierr )
4962 !!ARPTIMING CALL end_timing("MPI_Reduce in wrf_quilt_write_field dryrun")
4963 ! send data to the i/o processor
4965 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
4967 hdrbuf, hdrbufsize , &
4972 IF ( .NOT. associated( int_local_output_buffer ) ) THEN
4973 ALLOCATE ( int_local_output_buffer( (int_num_bytes_to_write( DataHandle )+1)/itypesize ), Stat=ierr )
4975 CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: allocate of int_local_output_buffer failed" )
4977 int_local_output_cursor = 1
4979 iserver = get_server_id ( DataHandle )
4980 !JMDEBUGwrite(0,*)'wrf_quilt_write_field (writing) ',iserver
4982 ! This is NOT a "training" write. It is OK to write now.
4983 CALL int_gen_write_field_header ( hdrbuf, hdrbufsize, itypesize, typesize, &
4984 DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
4985 0 , MemoryOrder , Stagger , DimNames , & ! non-333933 means okay to write; magic number
4986 DomainStart , DomainEnd , &
4987 MemoryStart , MemoryEnd , &
4988 PatchStart , PatchEnd )
4990 ! Pack header into int_local_output_buffer. It will be sent to the
4991 ! I/O servers during the next "iosync" operation.
4993 CALL int_pack_data ( hdrbuf , hdrbufsize , int_local_output_buffer(1), int_local_output_cursor )
4995 CALL int_pack_data ( hdrbuf , hdrbufsize , int_local_output_buffer, int_local_output_cursor )
4998 ! Pack field data into int_local_output_buffer. It will be sent to the
4999 ! I/O servers during the next "iosync" operation.
5001 CALL int_pack_data ( Field(PatchStart(1):PatchEnd(1),PatchStart(2):PatchEnd(2),PatchStart(3):PatchEnd(3) ), &
5002 locsize * typesize , int_local_output_buffer(1), int_local_output_cursor )
5004 CALL int_pack_data ( Field(PatchStart(1):PatchEnd(1),PatchStart(2):PatchEnd(2),PatchStart(3):PatchEnd(3) ), &
5005 locsize * typesize , int_local_output_buffer, int_local_output_cursor )
5010 !!ARPTIMING CALL end_timing("wrf_quilt_write_field")
5014 END SUBROUTINE wrf_quilt_write_field
5016 SUBROUTINE wrf_quilt_get_var_info ( DataHandle , VarName , NDim , MemoryOrder , Stagger , &
5017 DomainStart , DomainEnd , Status )
5019 ! This routine applies only to a dataset that is open for read. It instructs
5020 ! the I/O quilt servers to return information about variable VarName.
5021 ! This routine is called only by client (compute) tasks.
5023 ! This is not yet supported.
5025 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
5027 integer ,intent(in) :: DataHandle
5028 character*(*) ,intent(in) :: VarName
5030 character*(*) :: MemoryOrder
5031 character*(*) :: Stagger
5032 integer ,dimension(*) :: DomainStart, DomainEnd
5036 END SUBROUTINE wrf_quilt_get_var_info
5038 subroutine wrf_quilt_find_server(iserver)
5040 ! This routine is called by the compute processes when they need an
5041 ! I/O server to write out a new file. Upon return, this routine will
5042 ! set iserver to the next available I/O server group.
5044 ! A mpi_recv to all of mpi_comm_avail is used to implement this, and
5045 ! that recv will not return until an I/O server group calls
5046 ! wrf_quilt_server_ready to signal that it is ready for a new file.
5048 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
5049 use module_wrf_quilt, only : in_avail, mpi_comm_avail, mpi_comm_local
5053 integer, intent(inout) :: iserver
5055 character(255) :: message
5057 call wrf_message('Polling I/O servers...')
5060 call mpi_recv(iserver,1,MPI_INTEGER,MPI_ANY_SOURCE,0,mpi_comm_avail,MPI_STATUS_IGNORE,ierr)
5062 call wrf_error_fatal('mpi_recv failed in wrf_quilt_find_server')
5066 call mpi_bcast(iserver,1,MPI_INTEGER,0,mpi_comm_local,ierr)
5068 call wrf_error_fatal('mpi_bcast failed in wrf_quilt_find_server')
5071 write(message,'("I/O server ",I0," is ready for operations.")') iserver
5072 call wrf_message(message)
5076 end subroutine wrf_quilt_find_server
5077 subroutine wrf_quilt_server_ready()
5079 ! This routine is called by the I/O server group's master process once the
5080 ! I/O server group is done writing its current file, and is waiting for
5081 ! a new one. This information is passed to the monitor process by a
5082 ! blocking send from the I/O server master process to the monitor.
5084 ! All processes in an I/O group must call this routine, and this routine
5085 ! will not return (in any process) until the monitor process signals
5086 ! that it wants the I/O server group to write a file. That signal is
5087 ! sent in a call to wrf_quilt_find_server on the compute processes.
5089 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
5090 use module_wrf_quilt, only : mpi_comm_local, in_avail, availrank, mpi_comm_avail
5095 character*255 :: message
5097 write(message,*) 'Entering wrf_quilt_server_ready.'
5098 call wrf_debug(1,message)
5100 call mpi_barrier(mpi_comm_local,ierr)
5102 call wrf_error_fatal('mpi_barrier failed in wrf_quilt_server_ready')
5106 write(message,'("mpi_ssend ioserver=",I0," in wrf_quilt_server_ready")') availrank
5107 call wrf_debug(1,message)
5108 call mpi_ssend(availrank,1,MPI_INTEGER,0,0,mpi_comm_avail,ierr)
5110 call wrf_error_fatal('mpi_ssend failed in wrf_quilt_server_ready')
5114 call mpi_barrier(mpi_comm_local,ierr)
5116 call wrf_error_fatal('mpi_barrier failed in wrf_quilt_server_ready')
5119 write(message,*) 'Leaving wrf_quilt_server_ready.'
5120 call wrf_debug(1,message)
5123 end subroutine wrf_quilt_server_ready
5125 SUBROUTINE get_mpi_comm_io_groups( retval, isrvr )
5127 ! This routine returns the compute+io communicator to which this
5128 ! compute task belongs for I/O server group "isrvr".
5129 ! This routine is called only by client (compute) tasks.
5131 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
5132 USE module_wrf_quilt
5134 INTEGER, INTENT(IN ) :: isrvr
5135 INTEGER, INTENT(OUT) :: retval
5136 retval = mpi_comm_io_groups(isrvr)
5139 END SUBROUTINE get_mpi_comm_io_groups
5141 SUBROUTINE get_nio_tasks_in_group( id, retval )
5143 ! This routine returns the number of I/O server tasks in each
5144 ! I/O server group. It can be called by both clients and
5147 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
5148 USE module_wrf_quilt
5150 INTEGER, INTENT(IN) :: id
5151 INTEGER, INTENT(OUT) :: retval
5152 retval = nio_tasks_in_group
5155 END SUBROUTINE get_nio_tasks_in_group
5157 SUBROUTINE collect_on_comm_debug(file,line, comm_io_group, &
5159 hdrbuf, hdrbufsize , &
5160 outbuf, outbufsize )
5164 INTEGER comm_io_group
5166 INTEGER hdrbuf(*), outbuf(*)
5167 INTEGER hdrbufsize, outbufsize
5169 CALL collect_on_comm( comm_io_group, &
5171 hdrbuf, hdrbufsize , &
5172 outbuf, outbufsize )
5177 SUBROUTINE collect_on_comm_debug2(file,line,var,tag,sz,hdr_rec_size, &
5180 hdrbuf, hdrbufsize , &
5181 outbuf, outbufsize )
5183 CHARACTER*(*) file,var
5184 INTEGER line,tag,sz,hdr_rec_size
5185 INTEGER comm_io_group
5187 INTEGER hdrbuf(*), outbuf(*)
5188 INTEGER hdrbufsize, outbufsize
5190 CALL collect_on_comm( comm_io_group, &
5192 hdrbuf, hdrbufsize , &
5193 outbuf, outbufsize )