2 ! Author(s)/Contact(s):
7 ! Parameters: <Specify typical arguments passed>
9 ! <list file names and briefly describe the data they include>
11 ! <list file names and briefly describe the information they include>
14 ! <list exit condition or error codes returned >
15 ! If appropriate, descriptive troubleshooting instructions or
16 ! likely causes for failures could be mentioned here with the
17 ! appropriate error code
19 ! User controllable options: <if applicable>
21 !#### This is a module for parallel Land model.
22 MODULE MODULE_MPP_LAND
26 use iso_fortran_env, only: int64
29 !integer, public :: HYDRO_COMM_WORLD ! communicator for WRF-Hydro - moved to MODULE_CPL_LAND
30 integer, public :: left_id,right_id,up_id,down_id,my_id
31 integer, public :: left_right_np,up_down_np ! define total process in two dimensions.
32 integer, public :: left_right_p ,up_down_p ! the position of the current process in the logical topography.
33 integer, public :: IO_id ! the number for IO. (Last processor for IO)
34 integer, public :: global_nx, global_ny, local_nx,local_ny
35 integer, public :: global_rt_nx, global_rt_ny
36 integer, public :: local_rt_nx,local_rt_ny,rt_AGGFACTRT
37 integer, public :: numprocs ! total process, get by mpi initialization.
38 integer :: local_startx, local_starty
39 integer :: local_startx_rt, local_starty_rt, local_endx_rt, local_endy_rt
41 integer mpp_status(MPI_STATUS_SIZE)
44 integer, allocatable, DIMENSION(:), public :: local_nx_size,local_ny_size
45 integer, allocatable, DIMENSION(:), public :: local_rt_nx_size,local_rt_ny_size
46 integer, allocatable, DIMENSION(:), public :: startx,starty
47 integer, allocatable, DIMENSION(:), public :: mpp_nlinks
49 !dwj offset vectors and size vectors for scatterv and gatherv
50 integer, allocatable, dimension(:), public :: offset_vectors, offset_vectors_rt
51 integer, allocatable, dimension(:), public :: size_vectors, size_vectors_rt
54 module procedure check_landreal1
55 module procedure check_landreal1d
56 module procedure check_landreal2d
57 module procedure check_landreal3d
60 interface write_io_land
61 module procedure write_io_real3d
64 interface mpp_land_bcast
65 module procedure mpp_land_bcast_real2
66 module procedure mpp_land_bcast_real_1d
67 module procedure mpp_land_bcast_real8_1d
68 module procedure mpp_land_bcast_real1
69 module procedure mpp_land_bcast_real1_double
70 module procedure mpp_land_bcast_char1d
71 module procedure mpp_land_bcast_char1
72 module procedure mpp_land_bcast_int1
73 module procedure mpp_land_bcast_int1d
74 module procedure mpp_land_bcast_int2d
75 module procedure mpp_land_bcast_logical
80 subroutine LOG_MAP2d()
83 integer, dimension(0:1) :: dims, coords
85 logical cyclic(0:1), reorder
86 data cyclic/.false.,.false./ ! not cyclic
89 call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr )
90 call MPI_COMM_SIZE( HYDRO_COMM_WORLD, numprocs, ierr )
92 call getNX_NY(numprocs, left_right_np,up_down_np)
93 if(my_id.eq.IO_id) then
96 write(6,*) "total process:",numprocs
97 write(6,*) "left_right_np =", left_right_np,&
98 "up_down_np=",up_down_np
102 ! ### get the row and column of the current process in the logical topography.
103 ! ### left --> right, 0 -->left_right_np -1
104 ! ### up --> down, 0 --> up_down_np -1
105 left_right_p = mod(my_id , left_right_np)
106 up_down_p = my_id / left_right_np
108 ! ### get the neighbors. -1 means no neighbor.
109 down_id = my_id - left_right_np
110 up_id = my_id + left_right_np
111 if( up_down_p .eq. 0) down_id = -1
112 if( up_down_p .eq. (up_down_np-1) ) up_id = -1
116 if( left_right_p .eq. 0) left_id = -1
117 if( left_right_p .eq. (left_right_np-1) ) right_id =-1
119 ! ### the IO node is the last processor.
120 !yw IO_id = numprocs - 1
123 ! print the information for debug.
125 ! BF setup virtual cartesian grid topology
128 dims(0) = up_down_np ! rows
129 dims(1) = left_right_np ! columns
131 call MPI_Cart_create(HYDRO_COMM_WORLD, ndim, dims, &
132 cyclic, reorder, cartGridComm, ierr)
134 call MPI_CART_GET(cartGridComm, 2, dims, cyclic, coords, ierr)
136 p_up_down = coords(0)
137 p_left_right = coords(1)
138 np_up_down = up_down_np
139 np_left_right = left_right_np
142 end subroutine log_map2d
144 subroutine MPP_LAND_INIT(in_global_nx,in_global_ny)
145 ! ### initialize the land model logically based on the two D method.
146 ! ### Call this function directly if it is nested with WRF.
148 integer, optional :: in_global_nx, in_global_ny
149 integer :: ierr, provided
152 if (present(in_global_nx) .and. present(in_global_ny)) then
153 global_nx = in_global_nx
154 global_ny = in_global_ny
157 call mpi_initialized( mpi_inited, ierr )
158 if ( .not. mpi_inited ) then
159 call MPI_INIT_THREAD( MPI_THREAD_FUNNELED, provided, ierr )
160 if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_INIT failed")
161 call MPI_COMM_DUP(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr)
162 if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_COMM_DUP failed")
165 call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr )
166 call MPI_COMM_SIZE( HYDRO_COMM_WORLD, numprocs, ierr )
167 if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_COMM_RANK and/or MPI_COMM_SIZE failed")
169 ! create 2d logical mapping of the CPU.
172 end subroutine MPP_LAND_INIT
175 subroutine MPP_LAND_PAR_INI(over_lap,in_global_nx,in_global_ny,AGGFACTRT)
176 integer in_global_nx,in_global_ny, AGGFACTRT
177 integer :: over_lap ! the overlaped grid number. (default is 1)
180 global_nx = in_global_nx
181 global_ny = in_global_ny
182 rt_AGGFACTRT = AGGFACTRT
183 global_rt_nx = in_global_nx*AGGFACTRT
184 global_rt_ny = in_global_ny *AGGFACTRT
186 !ywold local_nx = global_nx / left_right_np
187 !ywold if(left_right_p .eq. (left_right_np-1) ) then
188 !ywold local_nx = global_nx &
189 !ywold -int(global_nx/left_right_np)*(left_right_np-1)
191 !ywold local_ny = global_ny / up_down_np
192 !ywold if( up_down_p .eq. (up_down_np-1) ) then
193 !ywold local_ny = global_ny &
194 !ywold -int(global_ny/up_down_np)*(up_down_np -1)
197 local_nx = int(global_nx / left_right_np)
198 !if(global_nx .ne. (local_nx*left_right_np) ) then
199 if(mod(global_nx, left_right_np) .ne. 0) then
200 do i = 1, mod(global_nx, left_right_np)
201 if(left_right_p .eq. i ) then
202 local_nx = local_nx + 1
207 local_ny = int(global_ny / up_down_np)
208 !if(global_ny .ne. (local_ny * up_down_np) ) then
209 if(mod(global_ny,up_down_np) .ne. 0 ) then
210 do i = 1, mod(global_ny,up_down_np)
211 if( up_down_p .eq. i) then
212 local_ny = local_ny + 1
217 local_rt_nx=local_nx*AGGFACTRT+2
218 local_rt_ny=local_ny*AGGFACTRT+2
219 if(left_id.lt.0) local_rt_nx = local_rt_nx -1
220 if(right_id.lt.0) local_rt_nx = local_rt_nx -1
221 if(up_id.lt.0) local_rt_ny = local_rt_ny -1
222 if(down_id.lt.0) local_rt_ny = local_rt_ny -1
224 call get_local_size(local_nx, local_ny,local_rt_nx,local_rt_ny)
225 call calculate_start_p()
226 call calculate_offset_vectors()
228 in_global_nx = local_nx
229 in_global_ny = local_ny
231 write(6,*) "my_id=",my_id,"global_rt_nx=",global_rt_nx
232 write(6,*) "my_id=",my_id,"global_rt_nx=",global_rt_ny
233 write(6,*) "my_id=",my_id,"global_nx=",global_nx
234 write(6,*) "my_id=",my_id,"global_nx=",global_ny
237 end subroutine MPP_LAND_PAR_INI
239 subroutine MPP_LAND_LR_COM(in_out_data,NX,NY,flag)
240 ! ### Communicate message on left right direction.
242 real in_out_data(nx,ny),data_r(2,ny)
243 integer count,size,tag, ierr
244 integer flag ! 99 replace the boundary, else get the sum.
246 if(flag .eq. 99) then ! replace the data
247 if(right_id .ge. 0) then ! ### send to right first.
250 call mpi_send(in_out_data(nx-1,:),size,MPI_REAL, &
251 right_id,tag,HYDRO_COMM_WORLD,ierr)
253 if(left_id .ge. 0) then ! receive from left
256 call mpi_recv(in_out_data(1,:),size,MPI_REAL, &
257 left_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
260 if(left_id .ge. 0 ) then ! ### send to left second.
263 call mpi_send(in_out_data(2,:),size,MPI_REAL, &
264 left_id,tag,HYDRO_COMM_WORLD,ierr)
266 if(right_id .ge. 0) then ! receive from right
269 call mpi_recv(in_out_data(nx,:),size,MPI_REAL,&
270 right_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr)
275 if(right_id .ge. 0) then ! ### send to right first.
278 call mpi_send(in_out_data(nx-1:nx,:),size,MPI_REAL, &
279 right_id,tag,HYDRO_COMM_WORLD,ierr)
281 if(left_id .ge. 0) then ! receive from left
284 call mpi_recv(data_r,size,MPI_REAL,left_id,tag, &
285 HYDRO_COMM_WORLD,mpp_status,ierr)
286 in_out_data(1,:) = in_out_data(1,:) + data_r(1,:)
287 in_out_data(2,:) = in_out_data(2,:) + data_r(2,:)
290 if(left_id .ge. 0 ) then ! ### send to left second.
293 call mpi_send(in_out_data(1:2,:),size,MPI_REAL, &
294 left_id,tag,HYDRO_COMM_WORLD,ierr)
296 if(right_id .ge. 0) then ! receive from right
299 call mpi_recv(in_out_data(nx-1:nx,:),size,MPI_REAL,&
300 right_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr)
302 endif ! end if black for flag.
305 end subroutine MPP_LAND_LR_COM
307 subroutine MPP_LAND_LR_COM8(in_out_data,NX,NY,flag)
308 ! ### Communicate message on left right direction.
310 real*8 in_out_data(nx,ny),data_r(2,ny)
311 integer count,size,tag, ierr
312 integer flag ! 99 replace the boundary, else get the sum.
314 if(flag .eq. 99) then ! replace the data
315 if(right_id .ge. 0) then ! ### send to right first.
318 call mpi_send(in_out_data(nx-1,:),size,MPI_DOUBLE_PRECISION, &
319 right_id,tag,HYDRO_COMM_WORLD,ierr)
321 if(left_id .ge. 0) then ! receive from left
324 call mpi_recv(in_out_data(1,:),size,MPI_DOUBLE_PRECISION, &
325 left_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
328 if(left_id .ge. 0 ) then ! ### send to left second.
331 call mpi_send(in_out_data(2,:),size,MPI_DOUBLE_PRECISION, &
332 left_id,tag,HYDRO_COMM_WORLD,ierr)
334 if(right_id .ge. 0) then ! receive from right
337 call mpi_recv(in_out_data(nx,:),size,MPI_DOUBLE_PRECISION,&
338 right_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr)
343 if(right_id .ge. 0) then ! ### send to right first.
346 call mpi_send(in_out_data(nx-1:nx,:),size,MPI_DOUBLE_PRECISION, &
347 right_id,tag,HYDRO_COMM_WORLD,ierr)
349 if(left_id .ge. 0) then ! receive from left
352 call mpi_recv(data_r,size,MPI_DOUBLE_PRECISION,left_id,tag, &
353 HYDRO_COMM_WORLD,mpp_status,ierr)
354 in_out_data(1,:) = in_out_data(1,:) + data_r(1,:)
355 in_out_data(2,:) = in_out_data(2,:) + data_r(2,:)
358 if(left_id .ge. 0 ) then ! ### send to left second.
361 call mpi_send(in_out_data(1:2,:),size,MPI_DOUBLE_PRECISION, &
362 left_id,tag,HYDRO_COMM_WORLD,ierr)
364 if(right_id .ge. 0) then ! receive from right
367 call mpi_recv(in_out_data(nx-1:nx,:),size,MPI_DOUBLE_PRECISION,&
368 right_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr)
370 endif ! end if black for flag.
373 end subroutine MPP_LAND_LR_COM8
376 subroutine get_local_size(local_nx, local_ny,rt_nx,rt_ny)
377 integer local_nx, local_ny, rt_nx,rt_ny
378 integer i,status,ierr, tag
379 integer tmp_nx,tmp_ny
380 ! ### if it is IO node, get the local_size of the x and y direction
381 ! ### for all other tasks.
384 ! if(my_id .eq. IO_id) then
385 if(.not. allocated(local_nx_size) ) allocate(local_nx_size(numprocs),stat = status)
386 if(.not. allocated(local_ny_size) ) allocate(local_ny_size(numprocs),stat = status)
387 if(.not. allocated(local_rt_nx_size) ) allocate(local_rt_nx_size(numprocs),stat = status)
388 if(.not. allocated(local_rt_ny_size) ) allocate(local_rt_ny_size(numprocs),stat = status)
391 if(my_id .eq. IO_id) then
392 do i = 0, numprocs - 1
393 if(i .ne. my_id) then
394 !block receive from other node.
396 call mpi_recv(s_r,2,MPI_INTEGER,i, &
397 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
398 local_nx_size(i+1) = s_r(1)
399 local_ny_size(i+1) = s_r(2)
401 local_nx_size(i+1) = local_nx
402 local_ny_size(i+1) = local_ny
409 call mpi_send(s_r,2,MPI_INTEGER, IO_id, &
410 tag,HYDRO_COMM_WORLD,ierr)
414 if(my_id .eq. IO_id) then
415 do i = 0, numprocs - 1
416 if(i .ne. my_id) then
417 !block receive from other node.
419 call mpi_recv(s_r,2,MPI_INTEGER,i, &
420 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
421 local_rt_nx_size(i+1) = s_r(1)
422 local_rt_ny_size(i+1) = s_r(2)
424 local_rt_nx_size(i+1) = rt_nx
425 local_rt_ny_size(i+1) = rt_ny
432 call mpi_send(s_r,2,MPI_INTEGER, IO_id, &
433 tag,HYDRO_COMM_WORLD,ierr)
437 end subroutine get_local_size
440 subroutine MPP_LAND_UB_COM(in_out_data,NX,NY,flag)
441 ! ### Communicate message on up down direction.
443 real in_out_data(nx,ny),data_r(nx,2)
444 integer count,size,tag, status, ierr
445 integer flag ! 99 replace the boundary , else get the sum of the boundary
448 if(flag .eq. 99) then ! replace the boundary data.
450 if(up_id .ge. 0 ) then ! ### send to up first.
453 call mpi_send(in_out_data(:,ny-1),size,MPI_REAL, &
454 up_id,tag,HYDRO_COMM_WORLD,ierr)
456 if(down_id .ge. 0 ) then ! receive from down
459 call mpi_recv(in_out_data(:,1),size,MPI_REAL, &
460 down_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr)
463 if(down_id .ge. 0 ) then ! send down.
466 call mpi_send(in_out_data(:,2),size,MPI_REAL, &
467 down_id,tag,HYDRO_COMM_WORLD,ierr)
469 if(up_id .ge. 0 ) then ! receive from upper
472 call mpi_recv(in_out_data(:,ny),size,MPI_REAL, &
473 up_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
478 if(up_id .ge. 0 ) then ! ### send to up first.
481 call mpi_send(in_out_data(:,ny-1:ny),size,MPI_REAL, &
482 up_id,tag,HYDRO_COMM_WORLD,ierr)
484 if(down_id .ge. 0 ) then ! receive from down
487 call mpi_recv(data_r,size,MPI_REAL, &
488 down_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr)
489 in_out_data(:,1) = in_out_data(:,1) + data_r(:,1)
490 in_out_data(:,2) = in_out_data(:,2) + data_r(:,2)
493 if(down_id .ge. 0 ) then ! send down.
496 call mpi_send(in_out_data(:,1:2),size,MPI_REAL, &
497 down_id,tag,HYDRO_COMM_WORLD,ierr)
499 if(up_id .ge. 0 ) then ! receive from upper
502 call mpi_recv(in_out_data(:,ny-1:ny),size,MPI_REAL, &
503 up_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
505 endif ! end of block flag
507 end subroutine MPP_LAND_UB_COM
509 subroutine MPP_LAND_UB_COM8(in_out_data,NX,NY,flag)
510 ! ### Communicate message on up down direction.
512 real*8 in_out_data(nx,ny),data_r(nx,2)
513 integer count,size,tag, status, ierr
514 integer flag ! 99 replace the boundary , else get the sum of the boundary
517 if(flag .eq. 99) then ! replace the boundary data.
519 if(up_id .ge. 0 ) then ! ### send to up first.
522 call mpi_send(in_out_data(:,ny-1),size,MPI_DOUBLE_PRECISION, &
523 up_id,tag,HYDRO_COMM_WORLD,ierr)
525 if(down_id .ge. 0 ) then ! receive from down
528 call mpi_recv(in_out_data(:,1),size,MPI_DOUBLE_PRECISION, &
529 down_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr)
532 if(down_id .ge. 0 ) then ! send down.
535 call mpi_send(in_out_data(:,2),size,MPI_DOUBLE_PRECISION, &
536 down_id,tag,HYDRO_COMM_WORLD,ierr)
538 if(up_id .ge. 0 ) then ! receive from upper
541 call mpi_recv(in_out_data(:,ny),size,MPI_DOUBLE_PRECISION, &
542 up_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
547 if(up_id .ge. 0 ) then ! ### send to up first.
550 call mpi_send(in_out_data(:,ny-1:ny),size,MPI_DOUBLE_PRECISION, &
551 up_id,tag,HYDRO_COMM_WORLD,ierr)
553 if(down_id .ge. 0 ) then ! receive from down
556 call mpi_recv(data_r,size,MPI_DOUBLE_PRECISION, &
557 down_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr)
558 in_out_data(:,1) = in_out_data(:,1) + data_r(:,1)
559 in_out_data(:,2) = in_out_data(:,2) + data_r(:,2)
562 if(down_id .ge. 0 ) then ! send down.
565 call mpi_send(in_out_data(:,1:2),size,MPI_DOUBLE_PRECISION, &
566 down_id,tag,HYDRO_COMM_WORLD,ierr)
568 if(up_id .ge. 0 ) then ! receive from upper
571 call mpi_recv(in_out_data(:,ny-1:ny),size,MPI_DOUBLE_PRECISION, &
572 up_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
574 endif ! end of block flag
576 end subroutine MPP_LAND_UB_COM8
578 subroutine calculate_start_p()
579 ! calculate startx and starty
580 integer :: i,status, ierr, tag
582 integer :: t_nx, t_ny
584 if(.not. allocated(starty) ) allocate(starty(numprocs),stat = ierr)
585 if(.not. allocated(startx) ) allocate(startx(numprocs),stat = ierr)
587 local_startx = int(global_nx/left_right_np) * left_right_p+1
588 local_starty = int(global_ny/up_down_np) * up_down_p+1
592 do i = 1, mod(global_nx,left_right_np)
593 if(left_right_p .gt. i ) then
597 local_startx = local_startx + t_nx
600 do i = 1, mod(global_ny,up_down_np)
601 if( up_down_p .gt. i) then
605 local_starty = local_starty + t_ny
608 if(left_id .lt. 0) local_startx = 1
609 if(down_id .lt. 0) local_starty = 1
612 if(my_id .eq. IO_id) then
613 startx(my_id+1) = local_startx
614 starty(my_id+1) = local_starty
617 r_s(1) = local_startx
618 r_s(2) = local_starty
620 if(my_id .eq. IO_id) then
621 do i = 0, numprocs - 1
622 ! block receive from other node.
625 call mpi_recv(r_s,2,MPI_INTEGER,i, &
626 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
633 call mpi_send(r_s,2,MPI_INTEGER, IO_id, &
634 tag,HYDRO_COMM_WORLD,ierr)
637 ! calculate the routing land start x and y
638 local_startx_rt = local_startx*rt_AGGFACTRT - (rt_AGGFACTRT-1)
639 if(local_startx_rt.gt.1) local_startx_rt=local_startx_rt - 1
640 local_starty_rt = local_starty*rt_AGGFACTRT - (rt_AGGFACTRT-1)
641 if(local_starty_rt.gt.1) local_starty_rt=local_starty_rt - 1
643 local_endx_rt = local_startx_rt + local_rt_nx -1
644 local_endy_rt = local_starty_rt + local_rt_ny -1
647 end subroutine calculate_start_p
649 subroutine calculate_offset_vectors()
650 !calculate the size and offset vectors needed by scatterv and gatherv
651 integer :: i, ierr, last_offset
653 ! first make sure the arrays have been allocated
654 if ( .not. allocated(offset_vectors) ) allocate(offset_vectors(numprocs),stat = ierr)
655 if ( .not. allocated(offset_vectors_rt) ) allocate(offset_vectors_rt(numprocs),stat = ierr)
656 if ( .not. allocated(size_vectors) ) allocate(size_vectors(numprocs),stat = ierr)
657 if ( .not. allocated(size_vectors_rt) ) allocate(size_vectors_rt(numprocs),stat = ierr)
659 ! calculate the size and offsets using local_nx_size and local_ny_size
662 size_vectors(i) = local_ny_size(i) * local_nx_size(i)
663 offset_vectors(i) = last_offset
664 last_offset = last_offset + size_vectors(i)
667 ! calculate the RT size and offsets using local_rt_nx_size and local_rt_ny_size
670 size_vectors_rt(i) = local_rt_ny_size(i) * local_rt_nx_size(i)
671 offset_vectors_rt(i) = last_offset
672 last_offset = last_offset + size_vectors_rt(i)
676 end subroutine calculate_offset_vectors
678 subroutine decompose_data_real3d (in_buff,out_buff,klevel)
681 real,dimension(:,:,:) :: in_buff,out_buff
683 call decompose_data_real(in_buff(:,k,:),out_buff(:,k,:))
685 end subroutine decompose_data_real3d
687 subroutine decompose_data_real (in_buff,out_buff)
688 ! usage: all of the cpu call this subroutine.
689 ! the IO node will distribute the data to rest of the node.
690 real,intent(in), dimension(:,:) :: in_buff
691 real,intent(out), dimension(:,:), volatile :: out_buff
692 real, dimension(:), allocatable :: send_buff
693 integer tag, i, ii, jj, pos, status, ierr,size
694 integer ibegin,iend,jbegin,jend
696 if(my_id .eq. IO_id) then
698 ! allocate the buffer to hold data as required by mpi_scatterv
699 ! be careful with the index range if using array prepared for mpi in fortran (offset_vectors)
700 allocate(send_buff(0: (global_nx*global_ny) -1),stat = ierr)
702 ! for each sub region in the global buffer linearize the data and place it in the
703 ! correct send buffer location
706 iend = startx(i)+local_nx_size(i) -1
708 jend = starty(i)+local_ny_size(i) -1
710 !write (6,*) offset_vectors(i), offset_vectors(i) +size_vectors(i) -1, ibegin, iend, jbegin, jend
712 ! we may want use this direct loop to avoid array temps
713 pos = offset_vectors(i)
716 send_buff(pos) = in_buff(ii,jj)
721 ! this is much more readable
722 ! send_buff(offset_vectors(i): offset_vectors(i) + size_vectors(i) - 1) = &
723 ! reshape(in_buff(ibegin:iend,jbegin:jend), (/size_vectors(i)/) )
726 ! send the to each process size_vector(mpi_rank+1) data elements
727 ! and store the results in out_buff
728 call mpi_scatterv(send_buff, size_vectors, offset_vectors, MPI_REAL, &
729 out_buff, size_vectors(my_id+1), MPI_REAL, IO_id, HYDRO_COMM_WORLD, ierr)
731 ! remove the send buffer
732 deallocate(send_buff)
735 ! other processes only need to make mpi_scatterv call
736 call mpi_scatterv(send_buff, size_vectors, offset_vectors, MPI_REAL, &
737 out_buff, local_nx*local_ny, MPI_REAL, IO_id, HYDRO_COMM_WORLD, ierr)
741 end subroutine decompose_data_real
744 subroutine decompose_data_int (in_buff,out_buff)
745 ! usage: all of the cpu call this subroutine.
746 ! the IO node will distribute the data to rest of the node.
747 integer,dimension(:,:) :: in_buff,out_buff
748 integer tag, i, status, ierr,size
749 integer ibegin,iend,jbegin,jend
752 if(my_id .eq. IO_id) then
753 do i = 0, numprocs - 1
755 iend = startx(i+1)+local_nx_size(i+1) -1
757 jend = starty(i+1)+local_ny_size(i+1) -1
758 if(my_id .eq. i) then
759 out_buff=in_buff(ibegin:iend,jbegin:jend)
761 ! send data to the rest process.
762 size = local_nx_size(i+1)*local_ny_size(i+1)
763 call mpi_send(in_buff(ibegin:iend,jbegin:jend),size,&
764 MPI_INTEGER, i,tag,HYDRO_COMM_WORLD,ierr)
768 size = local_nx*local_ny
769 call mpi_recv(out_buff,size,MPI_INTEGER,IO_id, &
770 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
773 end subroutine decompose_data_int
775 subroutine write_IO_int(in_buff,out_buff)
776 ! the IO node will receive the data from the rest process.
777 integer,dimension(:,:):: in_buff, out_buff
778 integer tag, i, status, ierr,size
779 integer ibegin,iend,jbegin,jend
780 if(my_id .ne. IO_id) then
781 size = local_nx*local_ny
783 call mpi_send(in_buff,size,MPI_INTEGER, IO_id, &
784 tag,HYDRO_COMM_WORLD,ierr)
786 do i = 0, numprocs - 1
788 iend = startx(i+1)+local_nx_size(i+1) -1
790 jend = starty(i+1)+local_ny_size(i+1) -1
791 if(i .eq. IO_id) then
792 out_buff(ibegin:iend,jbegin:jend) = in_buff
794 size = local_nx_size(i+1)*local_ny_size(i+1)
796 call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,&
797 MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
802 end subroutine write_IO_int
804 subroutine write_IO_char_head(in, out, imageHead)
806 !! for i is image number (starting from 0),
807 !! this routine writes
808 !! in(1:imageHead(i+1))
810 !! out( (sum(imageHead(i+1-1))+1) : ((sum(imageHead(i+1-1))+1)+imageHead(i+1)) )
811 !! where out is on the IO node.
812 character(len=*), intent(in), dimension(:) :: in
813 character(len=*), intent(out), dimension(:) :: out
814 integer, intent(in), dimension(:) :: imageHead
815 integer :: tag, i, status, ierr, size
816 integer :: ibegin,iend,jbegin,jend
817 integer :: lenSize, theStart, theEnd
819 if(my_id .ne. IO_id) then
820 lenSize = imageHead(my_id+1)*len(in(1)) !! some times necessary for character arrays?
821 if(lenSize .eq. 0) return
822 call mpi_send(in,lenSize,MPI_CHARACTER,IO_id,tag,HYDRO_COMM_WORLD,ierr)
825 lenSize = imageHead(i+1)*len(in(1)) !! necessary?
826 if(lenSize .eq. 0) cycle
830 theStart = sum(imageHead(1:(i+1-1))) +1
832 theEnd = theStart + imageHead(i+1) -1
833 if(i .eq. IO_id) then
834 out(theStart:theEnd) = in(1:imageHead(i+1))
836 call mpi_recv(out(theStart:theEnd),lenSize,&
837 MPI_CHARACTER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
841 end subroutine write_IO_char_head
844 subroutine write_IO_real3d(in_buff,out_buff,klevel)
846 ! the IO node will receive the data from the rest process.
848 real,dimension(:,:,:):: in_buff, out_buff
850 call write_IO_real(in_buff(:,k,:),out_buff(:,k,:))
852 end subroutine write_IO_real3d
854 subroutine write_IO_real(in_buff,out_buff)
855 ! the IO node will receive the data from the rest process.
856 real,dimension(:,:):: in_buff, out_buff
857 integer tag, i, status, ierr,size
858 integer ibegin,iend,jbegin,jend
859 if(my_id .ne. IO_id) then
860 size = local_nx*local_ny
862 call mpi_send(in_buff,size,MPI_REAL, IO_id, &
863 tag,HYDRO_COMM_WORLD,ierr)
865 do i = 0, numprocs - 1
867 iend = startx(i+1)+local_nx_size(i+1) -1
869 jend = starty(i+1)+local_ny_size(i+1) -1
870 if(i .eq. IO_id) then
871 out_buff(ibegin:iend,jbegin:jend) = in_buff
873 size = local_nx_size(i+1)*local_ny_size(i+1)
875 call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,&
876 MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
881 end subroutine write_IO_real
883 ! subroutine write_IO_RT_real_prev(in_buff,out_buff)
884 ! ! the IO node will receive the data from the rest process.
885 ! real,dimension(:,:) :: in_buff, out_buff
886 ! integer tag, i, status, ierr,size
887 ! integer ibegin,iend,jbegin,jend
888 ! if(my_id .ne. IO_id) then
889 ! size = local_rt_nx*local_rt_ny
891 ! call mpi_send(in_buff,size,MPI_REAL, IO_id, &
892 ! tag,HYDRO_COMM_WORLD,ierr)
894 ! do i = 0, numprocs - 1
895 ! ibegin = startx(i+1)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
896 ! if(ibegin.gt.1) ibegin=ibegin - 1
897 ! iend = ibegin + local_rt_nx_size(i+1) -1
898 ! jbegin = starty(i+1)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
899 ! if(jbegin.gt.1) jbegin=jbegin - 1
900 ! jend = jbegin + local_rt_ny_size(i+1) -1
901 ! if(i .eq. IO_id) then
902 ! out_buff(ibegin:iend,jbegin:jend) = in_buff
904 ! size = local_rt_nx_size(i+1)*local_rt_ny_size(i+1)
906 ! call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,&
907 ! MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
912 ! end subroutine write_IO_RT_real_prev
914 subroutine write_IO_RT_real (in_buff,out_buff)
915 ! usage: all of the cpu call this subroutine.
916 ! the IO node will recieve data from rest of the node.
917 real,intent(in), dimension(:,:) :: in_buff
918 real,intent(inout), dimension(:,:) :: out_buff
919 real, dimension(:), allocatable :: recv_buff
920 integer tag, i, ii, jj, pos, status, ierr,size
921 integer ibegin,iend,jbegin,jend,rt_startx,rt_starty
923 if(my_id .eq. IO_id) then
925 ! allocate the buffer to hold data as required by mpi_scatterv
926 ! (this will be larger than out_buff due to halo cell overlap)
927 ! be careful with the index range if using array prepared for mpi in fortran (offset_vectors)
928 allocate(recv_buff(0: sum(size_vectors_rt) -1),stat = ierr)
930 ! recieve from each process size_vector(mpi_rank+1) data elements
931 ! and store the results in recv_buffer
932 call mpi_gatherv(in_buff, size_vectors_rt(my_id+1), MPI_REAL, &
933 recv_buff, size_vectors_rt, offset_vectors_rt, MPI_REAL, &
934 IO_id, HYDRO_COMM_WORLD, ierr)
936 ! for each sub region in the recv buffer create a correct shapped array
937 ! and assign it to the output buffer
939 ibegin = startx(i)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
940 if (ibegin > 1) ibegin=ibegin - 1
941 iend = ibegin + local_rt_nx_size(i) - 1
942 jbegin = starty(i)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
943 if (jbegin > 1) jbegin=jbegin - 1
944 jend = jbegin + local_rt_ny_size(i) - 1
946 ! this is much more readable
947 out_buff(ibegin:iend,jbegin:jend) = &
948 reshape(recv_buff(offset_vectors_rt(i): offset_vectors_rt(i) + size_vectors_rt(i) - 1), &
949 (/local_rt_nx_size(i), local_rt_ny_size(i)/) )
952 ! remove the send buffer
953 deallocate(recv_buff)
956 ! other processes only need to make mpi_gatherv call
957 call mpi_gatherv(in_buff, local_rt_nx*local_rt_ny, MPI_REAL, &
958 recv_buff, size_vectors_rt, offset_vectors_rt, MPI_REAL, &
959 IO_id, HYDRO_COMM_WORLD, ierr)
962 end subroutine write_IO_RT_real
964 subroutine write_IO_RT_int (in_buff,out_buff)
965 ! usage: all of the cpu call this subroutine.
966 ! the IO node will recieve data from rest of the node.
967 integer, intent(in), dimension(:,:) :: in_buff
968 integer, intent(inout), dimension(:,:) :: out_buff
969 integer, dimension(:), allocatable :: recv_buff
970 integer tag, i, ii, jj, pos, status, ierr,size
971 integer ibegin,iend,jbegin,jend,rt_startx,rt_starty
973 if(my_id .eq. IO_id) then
975 ! allocate the buffer to hold data as required by mpi_scatterv
976 ! (this will be larger than out_buff due to halo cell overlap)
977 ! be careful with the index range if using array prepared for mpi in fortran (offset_vectors)
978 allocate(recv_buff(0: sum(size_vectors_rt) -1),stat = ierr)
980 ! recieve from each process size_vector(mpi_rank+1) data elements
981 ! and store the results in recv_buffer
982 call mpi_gatherv(in_buff, size_vectors_rt(my_id+1), MPI_REAL, &
983 recv_buff, size_vectors_rt, offset_vectors_rt, MPI_REAL, &
984 IO_id, HYDRO_COMM_WORLD, ierr)
986 ! for each sub region in the recv buffer create a correct shapped array
987 ! and assign it to the output buffer
989 ibegin = startx(i)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
990 if (ibegin > 1) ibegin=ibegin - 1
991 iend = ibegin + local_rt_nx_size(i) - 1
992 jbegin = starty(i)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
993 if (jbegin > 1) jbegin=jbegin - 1
994 jend = jbegin + local_rt_ny_size(i) - 1
996 ! this is much more readable
997 out_buff(ibegin:iend,jbegin:jend) = &
998 reshape(recv_buff(offset_vectors_rt(i): offset_vectors_rt(i) + size_vectors_rt(i) - 1), &
999 (/local_rt_nx_size(i), local_rt_ny_size(i)/) )
1002 ! remove the send buffer
1003 deallocate(recv_buff)
1006 ! other processes only need to make mpi_gatherv call
1007 call mpi_gatherv(in_buff, local_rt_nx*local_rt_ny, MPI_INTEGER, &
1008 recv_buff, size_vectors_rt, offset_vectors_rt, MPI_INTEGER, &
1009 IO_id, HYDRO_COMM_WORLD, ierr)
1012 end subroutine write_IO_RT_int
1014 ! subroutine write_IO_RT_int (in_buff,out_buff)
1015 ! ! the IO node will receive the data from the rest process.
1016 ! integer,intent(in),dimension(:,:) :: in_buff
1017 ! integer,intent(out),dimension(:,:) :: out_buff
1018 ! integer tag, i, status, ierr,size
1019 ! integer ibegin,iend,jbegin,jend
1020 ! if(my_id .ne. IO_id) then
1021 ! size = local_rt_nx*local_rt_ny
1023 ! call mpi_send(in_buff,size,MPI_INTEGER, IO_id, &
1024 ! tag,HYDRO_COMM_WORLD,ierr)
1026 ! do i = 0, numprocs - 1
1027 ! ibegin = startx(i+1)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
1028 ! if(ibegin.gt.1) ibegin=ibegin - 1
1029 ! iend = ibegin + local_rt_nx_size(i+1) -1
1030 ! jbegin = starty(i+1)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
1031 ! if(jbegin.gt.1) jbegin=jbegin - 1
1032 ! jend = jbegin + local_rt_ny_size(i+1) -1
1033 ! if(i .eq. IO_id) then
1034 ! out_buff(ibegin:iend,jbegin:jend) = in_buff
1036 ! size = local_rt_nx_size(i+1)*local_rt_ny_size(i+1)
1038 ! call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,&
1039 ! MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
1044 ! end subroutine write_IO_RT_int
1046 subroutine write_IO_RT_int8(in_buff,out_buff)
1047 ! the IO node will receive the data from the rest process.
1048 integer(kind=int64),intent(in),dimension(:,:) :: in_buff
1049 integer(kind=int64),intent(out),dimension(:,:) :: out_buff
1050 integer tag, i, status, ierr,size
1051 integer ibegin,iend,jbegin,jend
1052 if(my_id .ne. IO_id) then
1053 size = local_rt_nx*local_rt_ny
1055 call mpi_send(in_buff,size,MPI_INTEGER8, IO_id, &
1056 tag,HYDRO_COMM_WORLD,ierr)
1058 do i = 0, numprocs - 1
1059 ibegin = startx(i+1)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
1060 if(ibegin.gt.1) ibegin=ibegin - 1
1061 iend = ibegin + local_rt_nx_size(i+1) -1
1062 jbegin = starty(i+1)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
1063 if(jbegin.gt.1) jbegin=jbegin - 1
1064 jend = jbegin + local_rt_ny_size(i+1) -1
1065 if(i .eq. IO_id) then
1066 out_buff(ibegin:iend,jbegin:jend) = in_buff
1068 size = local_rt_nx_size(i+1)*local_rt_ny_size(i+1)
1070 call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,&
1071 MPI_INTEGER8,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
1076 end subroutine write_IO_RT_int8
1078 subroutine mpp_land_bcast_log1(inout)
1081 call mpi_bcast(inout,1,MPI_LOGICAL, &
1082 IO_id,HYDRO_COMM_WORLD,ierr)
1084 end subroutine mpp_land_bcast_log1
1087 subroutine mpp_land_bcast_int(size,inout)
1091 call mpi_bcast(inout,size,MPI_INTEGER, &
1092 IO_id,HYDRO_COMM_WORLD,ierr)
1094 end subroutine mpp_land_bcast_int
1096 subroutine mpp_land_bcast_int8(size,inout)
1098 integer(kind=int64) inout(size)
1100 call mpi_bcast(inout,size,MPI_INTEGER8, &
1101 IO_id,HYDRO_COMM_WORLD,ierr)
1103 end subroutine mpp_land_bcast_int8
1105 subroutine mpp_land_bcast_int8_1d(inout)
1107 integer(kind=int64) inout(:)
1110 call mpi_bcast(inout,len,MPI_INTEGER8, &
1111 IO_id,HYDRO_COMM_WORLD,ierr)
1113 end subroutine mpp_land_bcast_int8_1d
1115 subroutine mpp_land_bcast_int1d(inout)
1120 call mpi_bcast(inout,len,MPI_INTEGER, &
1121 IO_id,HYDRO_COMM_WORLD,ierr)
1123 end subroutine mpp_land_bcast_int1d
1125 subroutine mpp_land_bcast_int1d_root(inout, rootId)
1128 integer, intent(in) :: rootId
1131 call mpi_bcast(inout,len,MPI_INTEGER,rootId,HYDRO_COMM_WORLD,ierr)
1133 end subroutine mpp_land_bcast_int1d_root
1135 subroutine mpp_land_bcast_int1(inout)
1138 call mpi_bcast(inout,1,MPI_INTEGER, &
1139 IO_id,HYDRO_COMM_WORLD,ierr)
1141 end subroutine mpp_land_bcast_int1
1143 subroutine mpp_land_bcast_int1_root(inout, rootId)
1146 integer, intent(in) :: rootId
1147 call mpi_bcast(inout,1,MPI_INTEGER,rootId,HYDRO_COMM_WORLD,ierr)
1149 end subroutine mpp_land_bcast_int1_root
1151 subroutine mpp_land_bcast_logical(inout)
1154 call mpi_bcast(inout,1,MPI_LOGICAL, &
1155 IO_id,HYDRO_COMM_WORLD,ierr)
1157 end subroutine mpp_land_bcast_logical
1159 subroutine mpp_land_bcast_logical_root(inout, rootId)
1161 integer, intent(in) :: rootId
1163 call mpi_bcast(inout,1,MPI_LOGICAL,rootId,HYDRO_COMM_WORLD,ierr)
1165 end subroutine mpp_land_bcast_logical_root
1167 subroutine mpp_land_bcast_real1(inout)
1170 call mpi_bcast(inout,1,MPI_REAL, &
1171 IO_id,HYDRO_COMM_WORLD,ierr)
1173 end subroutine mpp_land_bcast_real1
1175 subroutine mpp_land_bcast_real1_double(inout)
1178 call mpi_bcast(inout,1,MPI_REAL8, &
1179 IO_id,HYDRO_COMM_WORLD,ierr)
1181 end subroutine mpp_land_bcast_real1_double
1183 subroutine mpp_land_bcast_real_1d(inout)
1188 call mpi_bcast(inout,len,MPI_real, &
1189 IO_id,HYDRO_COMM_WORLD,ierr)
1191 end subroutine mpp_land_bcast_real_1d
1193 subroutine mpp_land_bcast_real_1d_root(inout, rootId)
1196 integer, intent(in) :: rootId
1199 call mpi_bcast(inout,len,MPI_real,rootId,HYDRO_COMM_WORLD,ierr)
1201 end subroutine mpp_land_bcast_real_1d_root
1203 subroutine mpp_land_bcast_real8_1d(inout)
1208 call mpi_bcast(inout,len,MPI_double, &
1209 IO_id,HYDRO_COMM_WORLD,ierr)
1211 end subroutine mpp_land_bcast_real8_1d
1213 subroutine mpp_land_bcast_real(size1,inout)
1216 real , dimension(:) :: inout
1218 call mpi_bcast(inout,size1,MPI_real, &
1219 IO_id,HYDRO_COMM_WORLD,ierr)
1221 end subroutine mpp_land_bcast_real
1223 subroutine mpp_land_bcast_int2d(inout)
1224 integer length1, k,length2
1227 length1 = size(inout,1)
1228 length2 = size(inout,2)
1230 call mpi_bcast(inout(:,k),length1,MPI_INTEGER, &
1231 IO_id,HYDRO_COMM_WORLD,ierr)
1234 end subroutine mpp_land_bcast_int2d
1236 subroutine mpp_land_bcast_real2(inout)
1237 integer length1, k,length2
1240 length1 = size(inout,1)
1241 length2 = size(inout,2)
1243 call mpi_bcast(inout(:,k),length1,MPI_real, &
1244 IO_id,HYDRO_COMM_WORLD,ierr)
1247 end subroutine mpp_land_bcast_real2
1249 subroutine mpp_land_bcast_real3d(inout)
1250 integer j, k, length1, length2, length3
1253 length1 = size(inout,1)
1254 length2 = size(inout,2)
1255 length3 = size(inout,3)
1258 call mpi_bcast(inout(:,j,k), length1, MPI_real, &
1259 IO_id, HYDRO_COMM_WORLD, ierr)
1263 end subroutine mpp_land_bcast_real3d
1265 subroutine mpp_land_bcast_rd(size,inout)
1269 call mpi_bcast(inout,size,MPI_REAL8, &
1270 IO_id,HYDRO_COMM_WORLD,ierr)
1272 end subroutine mpp_land_bcast_rd
1274 subroutine mpp_land_bcast_char(size,inout)
1278 call mpi_bcast(inout,size,MPI_CHARACTER, &
1279 IO_id,HYDRO_COMM_WORLD,ierr)
1281 end subroutine mpp_land_bcast_char
1283 subroutine mpp_land_bcast_char_root(size,inout,rootId)
1286 integer, intent(in) :: rootId
1288 call mpi_bcast(inout,size,MPI_CHARACTER,rootId,HYDRO_COMM_WORLD,ierr)
1290 end subroutine mpp_land_bcast_char_root
1292 subroutine mpp_land_bcast_char1d(inout)
1293 character(len=*) :: inout(:)
1296 lenSize = size(inout,1)*len(inout)
1297 call mpi_bcast(inout,lenSize,MPI_CHARACTER, &
1298 IO_id,HYDRO_COMM_WORLD,ierr)
1300 end subroutine mpp_land_bcast_char1d
1302 subroutine mpp_land_bcast_char1d_root(inout,rootId)
1303 character(len=*) :: inout(:)
1304 integer, intent(in) :: rootId
1307 lenSize = size(inout,1)*len(inout)
1308 call mpi_bcast(inout,lenSize,MPI_CHARACTER,rootId,HYDRO_COMM_WORLD,ierr)
1310 end subroutine mpp_land_bcast_char1d_root
1312 subroutine mpp_land_bcast_char1(inout)
1314 character(len=*) inout
1316 len = LEN_TRIM(inout)
1317 call mpi_bcast(inout,len,MPI_CHARACTER, &
1318 IO_id,HYDRO_COMM_WORLD,ierr)
1320 end subroutine mpp_land_bcast_char1
1322 subroutine MPP_LAND_COM_REAL(in_out_data,NX,NY,flag)
1323 ! ### Communicate message on left right and up bottom directions.
1325 integer flag != 99 test only for land model. (replace the boundary).
1326 != 1 get the sum of the boundary value.
1327 real in_out_data(nx,ny)
1329 call MPP_LAND_LR_COM(in_out_data,NX,NY,flag)
1330 call MPP_LAND_UB_COM(in_out_data,NX,NY,flag)
1333 end subroutine MPP_LAND_COM_REAL
1335 subroutine MPP_LAND_COM_REAL8(in_out_data,NX,NY,flag)
1336 ! ### Communicate message on left right and up bottom directions.
1338 integer flag != 99 test only for land model. (replace the boundary).
1339 != 1 get the sum of the boundary value.
1340 real*8 in_out_data(nx,ny)
1342 call MPP_LAND_LR_COM8(in_out_data,NX,NY,flag)
1343 call MPP_LAND_UB_COM8(in_out_data,NX,NY,flag)
1346 end subroutine MPP_LAND_COM_REAL8
1348 subroutine MPP_LAND_COM_INTEGER(data,NX,NY,flag)
1349 ! ### Communicate message on left right and up bottom directions.
1351 integer flag != 99 test only for land model. (replace the boundary).
1352 != 1 get the sum of the boundary value.
1354 real in_out_data(nx,ny)
1356 in_out_data = data + 0.0
1357 call MPP_LAND_LR_COM(in_out_data,NX,NY,flag)
1358 call MPP_LAND_UB_COM(in_out_data,NX,NY,flag)
1359 data = in_out_data + 0
1362 end subroutine MPP_LAND_COM_INTEGER
1365 subroutine MPP_LAND_COM_INTEGER8(data,NX,NY,flag)
1366 ! ### Communicate message on left right and up bottom directions.
1368 integer flag != 99 test only for land model. (replace the boundary).
1369 != 1 get the sum of the boundary value.
1370 integer(kind=int64) data(nx,ny)
1371 real in_out_data(nx,ny)
1373 in_out_data = data + 0.0
1374 call MPP_LAND_LR_COM(in_out_data,NX,NY,flag)
1375 call MPP_LAND_UB_COM(in_out_data,NX,NY,flag)
1376 data = in_out_data + 0
1379 end subroutine MPP_LAND_COM_INTEGER8
1381 subroutine read_restart_3(unit,nz,out)
1383 real buf3(global_nx,global_ny,nz),&
1384 out(local_nx,local_ny,3)
1385 if(my_id.eq.IO_id) read(unit) buf3
1387 call decompose_data_real (buf3(:,:,i),out(:,:,i))
1390 end subroutine read_restart_3
1392 subroutine read_restart_2(unit,out)
1394 real buf2(global_nx,global_ny),&
1395 out(local_nx,local_ny)
1397 if(my_id.eq.IO_id) read(unit,IOSTAT=ierr2) buf2
1398 call mpp_land_bcast_int1(ierr2)
1399 if(ierr2 .ne. 0) return
1401 call decompose_data_real (buf2,out)
1403 end subroutine read_restart_2
1405 subroutine read_restart_rt_2(unit,out)
1407 real buf2(global_rt_nx,global_rt_ny),&
1408 out(local_rt_nx,local_rt_ny)
1410 if(my_id.eq.IO_id) read(unit,IOSTAT=ierr2) buf2
1411 call mpp_land_bcast_int1(ierr2)
1412 if(ierr2.ne.0) return
1414 call decompose_RT_real(buf2,out, &
1415 global_rt_nx,global_rt_ny,local_rt_nx,local_rt_ny)
1417 end subroutine read_restart_rt_2
1419 subroutine read_restart_rt_3(unit,nz,out)
1420 integer unit,nz,i,ierr2
1421 real buf3(global_rt_nx,global_rt_ny,nz),&
1422 out(local_rt_nx,local_rt_ny,3)
1424 if(my_id.eq.IO_id) read(unit,IOSTAT=ierr2) buf3
1425 call mpp_land_bcast_int1(ierr2)
1426 if(ierr2.ne.0) return
1429 call decompose_RT_real (buf3(:,:,i),out(:,:,i),&
1430 global_rt_nx,global_rt_ny,local_rt_nx,local_rt_ny)
1433 end subroutine read_restart_rt_3
1435 subroutine write_restart_3(unit,nz,in)
1437 real buf3(global_nx,global_ny,nz),&
1438 in(local_nx,local_ny,nz)
1440 call write_IO_real(in(:,:,i),buf3(:,:,i))
1442 if(my_id.eq.IO_id) write(unit) buf3
1444 end subroutine write_restart_3
1446 subroutine write_restart_2(unit,in)
1448 real buf2(global_nx,global_ny),&
1449 in(local_nx,local_ny)
1450 call write_IO_real(in,buf2)
1451 if(my_id.eq.IO_id) write(unit) buf2
1453 end subroutine write_restart_2
1455 subroutine write_restart_rt_2(unit,in)
1457 real buf2(global_rt_nx,global_rt_ny), &
1458 in(local_rt_nx,local_rt_ny)
1459 call write_IO_RT_real(in,buf2)
1460 if(my_id.eq.IO_id) write(unit) buf2
1462 end subroutine write_restart_rt_2
1464 subroutine write_restart_rt_3(unit,nz,in)
1466 real buf3(global_rt_nx,global_rt_ny,nz),&
1467 in(local_rt_nx,local_rt_ny,nz)
1469 call write_IO_RT_real(in(:,:,i),buf3(:,:,i))
1471 if(my_id.eq.IO_id) write(unit) buf3
1473 end subroutine write_restart_rt_3
1475 subroutine decompose_RT_real (in_buff,out_buff,g_nx,g_ny,nx,ny)
1476 ! usage: all of the cpu call this subroutine.
1477 ! the IO node will distribute the data to rest of the node.
1478 integer g_nx,g_ny,nx,ny
1479 real,intent(in),dimension(:,:) :: in_buff
1480 real,intent(out),dimension(:,:) :: out_buff
1481 integer tag, i, status, ierr,size
1482 integer ibegin,iend,jbegin,jend
1485 if(my_id .eq. IO_id) then
1486 do i = 0, numprocs - 1
1487 ibegin = startx(i+1)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
1488 if(ibegin.gt.1) ibegin=ibegin - 1
1489 iend = ibegin + local_rt_nx_size(i+1) -1
1490 jbegin = starty(i+1)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
1491 if(jbegin.gt.1) jbegin=jbegin - 1
1492 jend = jbegin + local_rt_ny_size(i+1) -1
1494 if(my_id .eq. i) then
1495 out_buff=in_buff(ibegin:iend,jbegin:jend)
1497 ! send data to the rest process.
1498 size = (iend-ibegin+1)*(jend-jbegin+1)
1499 call mpi_send(in_buff(ibegin:iend,jbegin:jend),size,&
1500 MPI_REAL, i,tag,HYDRO_COMM_WORLD,ierr)
1505 call mpi_recv(out_buff,size,MPI_REAL,IO_id, &
1506 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
1509 end subroutine decompose_RT_real
1511 subroutine decompose_RT_int (in_buff,out_buff,g_nx,g_ny,nx,ny)
1512 ! usage: all of the cpu call this subroutine.
1513 ! the IO node will distribute the data to rest of the node.
1514 integer g_nx,g_ny,nx,ny
1515 integer,intent(in),dimension(:,:) :: in_buff
1516 integer,intent(out),dimension(:,:) :: out_buff
1517 integer tag, i, status, ierr,size
1518 integer ibegin,iend,jbegin,jend
1521 if(my_id .eq. IO_id) then
1522 do i = 0, numprocs - 1
1523 ibegin = startx(i+1)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
1524 if(ibegin.gt.1) ibegin=ibegin - 1
1525 iend = ibegin + local_rt_nx_size(i+1) -1
1526 jbegin = starty(i+1)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
1527 if(jbegin.gt.1) jbegin=jbegin - 1
1528 jend = jbegin + local_rt_ny_size(i+1) -1
1530 if(my_id .eq. i) then
1531 out_buff=in_buff(ibegin:iend,jbegin:jend)
1533 ! send data to the rest process.
1534 size = (iend-ibegin+1)*(jend-jbegin+1)
1535 call mpi_send(in_buff(ibegin:iend,jbegin:jend),size,&
1536 MPI_INTEGER, i,tag,HYDRO_COMM_WORLD,ierr)
1541 call mpi_recv(out_buff,size,MPI_INTEGER,IO_id, &
1542 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
1545 end subroutine decompose_RT_int
1547 subroutine decompose_RT_int8 (in_buff,out_buff,g_nx,g_ny,nx,ny)
1548 ! usage: all of the cpu call this subroutine.
1549 ! the IO node will distribute the data to rest of the node.
1550 integer g_nx,g_ny,nx,ny
1551 integer(kind=int64),intent(in),dimension(:,:) :: in_buff
1552 integer(kind=int64),intent(out),dimension(:,:) :: out_buff
1553 integer tag, i, status, ierr,size
1554 integer ibegin,iend,jbegin,jend
1557 if(my_id == IO_id) then
1558 do i = 0, numprocs - 1
1559 ibegin = startx(i+1)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
1560 if(ibegin > 1) ibegin=ibegin - 1
1561 iend = ibegin + local_rt_nx_size(i+1) -1
1562 jbegin = starty(i+1)*rt_AGGFACTRT - (rt_AGGFACTRT-1)
1563 if(jbegin > 1) jbegin=jbegin - 1
1564 jend = jbegin + local_rt_ny_size(i+1) -1
1567 out_buff=in_buff(ibegin:iend,jbegin:jend)
1569 ! send data to the rest process.
1570 size = (iend-ibegin+1)*(jend-jbegin+1)
1571 call mpi_send(in_buff(ibegin:iend,jbegin:jend),size,&
1572 MPI_INTEGER8, i,tag,HYDRO_COMM_WORLD,ierr)
1577 call mpi_recv(out_buff,size,MPI_INTEGER8,IO_id, &
1578 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
1581 end subroutine decompose_RT_int8
1583 subroutine getNX_NY(nprocs,nx,ny)
1584 ! calculate the nx and ny based on the total nprocs.
1585 integer nprocs, nx, ny, n
1588 n = global_nx * global_ny
1589 if( nprocs .ge. n ) then
1590 call fatal_error_stop("Error: number of processes greater than number of cells in the land grid")
1595 if( mod(nprocs,j) .eq. 0 ) then
1597 if( i .le. global_nx ) then
1598 if( abs(i-j) .lt. max) then
1599 if( j .le. global_ny ) then
1609 end subroutine getNX_NY
1611 subroutine pack_global_22(in, &
1614 real out(global_nx,global_ny,k)
1615 real in(local_nx,local_ny,k)
1617 call write_IO_real(in(:,:,i),out(:,:,i))
1620 end subroutine pack_global_22
1623 subroutine wrf_LAND_set_INIT(info,total_pe,AGGFACTRT)
1626 integer info(9,total_pe),AGGFACTRT
1627 integer :: ierr, status
1630 call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr )
1631 call MPI_COMM_SIZE( HYDRO_COMM_WORLD, numprocs, ierr )
1633 if(numprocs .ne. total_pe) then
1634 write(6,*) "FATAL ERROR: In wrf_LAND_set_INIT() - numprocs .ne. total_pe ",numprocs, total_pe
1635 call mpp_land_abort()
1639 ! ### get the neighbors. -1 means no neighbor.
1640 left_id = info(2,my_id+1)
1641 right_id = info(3,my_id+1)
1642 up_id = info(4,my_id+1)
1643 down_id = info(5,my_id+1)
1646 allocate(local_nx_size(numprocs),stat = status)
1647 allocate(local_ny_size(numprocs),stat = status)
1648 allocate(local_rt_nx_size(numprocs),stat = status)
1649 allocate(local_rt_ny_size(numprocs),stat = status)
1650 allocate(starty(numprocs),stat = ierr)
1651 allocate(startx(numprocs),stat = ierr)
1654 local_nx = info(7,i) - info(6,i) + 1
1655 local_ny = info(9,i) - info(8,i) + 1
1660 global_nx = max(global_nx,info(7,i))
1661 global_ny = max(global_ny,info(9,i))
1664 local_rt_nx = local_nx*AGGFACTRT+2
1665 local_rt_ny = local_ny*AGGFACTRT+2
1666 if(left_id.lt.0) local_rt_nx = local_rt_nx -1
1667 if(right_id.lt.0) local_rt_nx = local_rt_nx -1
1668 if(up_id.lt.0) local_rt_ny = local_rt_ny -1
1669 if(down_id.lt.0) local_rt_ny = local_rt_ny -1
1671 global_rt_nx = global_nx*AGGFACTRT
1672 global_rt_ny = global_ny*AGGFACTRT
1673 rt_AGGFACTRT = AGGFACTRT
1676 local_nx_size(i) = info(7,i) - info(6,i) + 1
1677 local_ny_size(i) = info(9,i) - info(8,i) + 1
1678 startx(i) = info(6,i)
1679 starty(i) = info(8,i)
1681 local_rt_nx_size(i) = (info(7,i) - info(6,i) + 1)*AGGFACTRT+2
1682 local_rt_ny_size(i) = (info(9,i) - info(8,i) + 1 )*AGGFACTRT+2
1683 if(info(2,i).lt.0) local_rt_nx_size(i) = local_rt_nx_size(i) -1
1684 if(info(3,i).lt.0) local_rt_nx_size(i) = local_rt_nx_size(i) -1
1685 if(info(4,i).lt.0) local_rt_ny_size(i) = local_rt_ny_size(i) -1
1686 if(info(5,i).lt.0) local_rt_ny_size(i) = local_rt_ny_size(i) -1
1688 call calculate_offset_vectors()
1691 end subroutine wrf_LAND_set_INIT
1693 subroutine getMy_global_id()
1695 call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr )
1697 end subroutine getMy_global_id
1699 subroutine MPP_CHANNEL_COM_REAL(Link_location,ix,jy,Link_V,size,flag)
1700 ! communicate the data for channel routine.
1703 integer(kind=int64) Link_location(ix,jy)
1705 real Link_V(size), tmp_inout(ix,jy)
1709 if(size .eq. 0) then
1713 ! map the Link_V data to tmp_inout(ix,jy)
1715 if(Link_location(i,1) .gt. 0) &
1716 tmp_inout(i,1) = Link_V(Link_location(i,1))
1717 if(Link_location(i,2) .gt. 0) &
1718 tmp_inout(i,2) = Link_V(Link_location(i,2))
1719 if(Link_location(i,jy-1) .gt. 0) &
1720 tmp_inout(i,jy-1) = Link_V(Link_location(i,jy-1))
1721 if(Link_location(i,jy) .gt. 0) &
1722 tmp_inout(i,jy) = Link_V(Link_location(i,jy))
1725 if(Link_location(1,j) .gt. 0) &
1726 tmp_inout(1,j) = Link_V(Link_location(1,j))
1727 if(Link_location(2,j) .gt. 0) &
1728 tmp_inout(2,j) = Link_V(Link_location(2,j))
1729 if(Link_location(ix-1,j) .gt. 0) &
1730 tmp_inout(ix-1,j) = Link_V(Link_location(ix-1,j))
1731 if(Link_location(ix,j) .gt. 0) &
1732 tmp_inout(ix,j) = Link_V(Link_location(ix,j))
1736 ! commu nicate tmp_inout
1737 call MPP_LAND_COM_REAL(tmp_inout, ix,jy,flag)
1739 !map the data back to Link_V
1740 if(size .eq. 0) return
1742 if( (Link_location(1,j) .gt. 0) .and. (tmp_inout(1,j) .ne. -999) ) &
1743 Link_V(Link_location(1,j)) = tmp_inout(1,j)
1744 if((Link_location(2,j) .gt. 0) .and. (tmp_inout(2,j) .ne. -999) ) &
1745 Link_V(Link_location(2,j)) = tmp_inout(2,j)
1746 if((Link_location(ix-1,j) .gt. 0) .and. (tmp_inout(ix-1,j) .ne. -999)) &
1747 Link_V(Link_location(ix-1,j)) = tmp_inout(ix-1,j)
1748 if((Link_location(ix,j) .gt. 0) .and. (tmp_inout(ix,j) .ne. -999) )&
1749 Link_V(Link_location(ix,j)) = tmp_inout(ix,j)
1752 if((Link_location(i,1) .gt. 0) .and. (tmp_inout(i,1) .ne. -999) )&
1753 Link_V(Link_location(i,1)) = tmp_inout(i,1)
1754 if( (Link_location(i,2) .gt. 0) .and. (tmp_inout(i,2) .ne. -999) )&
1755 Link_V(Link_location(i,2)) = tmp_inout(i,2)
1756 if((Link_location(i,jy-1) .gt. 0) .and. (tmp_inout(i,jy-1) .ne. -999) ) &
1757 Link_V(Link_location(i,jy-1)) = tmp_inout(i,jy-1)
1758 if((Link_location(i,jy) .gt. 0) .and. (tmp_inout(i,jy) .ne. -999) ) &
1759 Link_V(Link_location(i,jy)) = tmp_inout(i,jy)
1761 end subroutine MPP_CHANNEL_COM_REAL
1764 subroutine MPP_CHANNEL_COM_REAL8(Link_location,ix,jy,Link_V,size,flag)
1765 ! communicate the data for channel routine.
1768 integer(kind=int64) Link_location(ix,jy)
1770 real*8 :: Link_V(size), tmp_inout(ix,jy)
1774 if(size .eq. 0) then
1778 ! map the Link_V data to tmp_inout(ix,jy)
1780 if(Link_location(i,1) .gt. 0) &
1781 tmp_inout(i,1) = Link_V(Link_location(i,1))
1782 if(Link_location(i,2) .gt. 0) &
1783 tmp_inout(i,2) = Link_V(Link_location(i,2))
1784 if(Link_location(i,jy-1) .gt. 0) &
1785 tmp_inout(i,jy-1) = Link_V(Link_location(i,jy-1))
1786 if(Link_location(i,jy) .gt. 0) &
1787 tmp_inout(i,jy) = Link_V(Link_location(i,jy))
1790 if(Link_location(1,j) .gt. 0) &
1791 tmp_inout(1,j) = Link_V(Link_location(1,j))
1792 if(Link_location(2,j) .gt. 0) &
1793 tmp_inout(2,j) = Link_V(Link_location(2,j))
1794 if(Link_location(ix-1,j) .gt. 0) &
1795 tmp_inout(ix-1,j) = Link_V(Link_location(ix-1,j))
1796 if(Link_location(ix,j) .gt. 0) &
1797 tmp_inout(ix,j) = Link_V(Link_location(ix,j))
1801 ! commu nicate tmp_inout
1802 call MPP_LAND_COM_REAL8(tmp_inout, ix,jy,flag)
1804 !map the data back to Link_V
1805 if(size .eq. 0) return
1807 if( (Link_location(1,j) .gt. 0) .and. (tmp_inout(1,j) .ne. -999) ) &
1808 Link_V(Link_location(1,j)) = tmp_inout(1,j)
1809 if((Link_location(2,j) .gt. 0) .and. (tmp_inout(2,j) .ne. -999) ) &
1810 Link_V(Link_location(2,j)) = tmp_inout(2,j)
1811 if((Link_location(ix-1,j) .gt. 0) .and. (tmp_inout(ix-1,j) .ne. -999)) &
1812 Link_V(Link_location(ix-1,j)) = tmp_inout(ix-1,j)
1813 if((Link_location(ix,j) .gt. 0) .and. (tmp_inout(ix,j) .ne. -999) )&
1814 Link_V(Link_location(ix,j)) = tmp_inout(ix,j)
1817 if((Link_location(i,1) .gt. 0) .and. (tmp_inout(i,1) .ne. -999) )&
1818 Link_V(Link_location(i,1)) = tmp_inout(i,1)
1819 if( (Link_location(i,2) .gt. 0) .and. (tmp_inout(i,2) .ne. -999) )&
1820 Link_V(Link_location(i,2)) = tmp_inout(i,2)
1821 if((Link_location(i,jy-1) .gt. 0) .and. (tmp_inout(i,jy-1) .ne. -999) ) &
1822 Link_V(Link_location(i,jy-1)) = tmp_inout(i,jy-1)
1823 if((Link_location(i,jy) .gt. 0) .and. (tmp_inout(i,jy) .ne. -999) ) &
1824 Link_V(Link_location(i,jy)) = tmp_inout(i,jy)
1826 end subroutine MPP_CHANNEL_COM_REAL8
1828 subroutine MPP_CHANNEL_COM_INT(Link_location,ix,jy,Link_V,size,flag)
1829 ! communicate the data for channel routine.
1832 integer(kind=int64) Link_location(ix,jy)
1834 integer(kind=int64) Link_V(size), tmp_inout(ix,jy)
1836 if(size .eq. 0) then
1840 ! map the Link_V data to tmp_inout(ix,jy)
1842 if(Link_location(i,1) .gt. 0) &
1843 tmp_inout(i,1) = Link_V(Link_location(i,1))
1844 if(Link_location(i,2) .gt. 0) &
1845 tmp_inout(i,2) = Link_V(Link_location(i,2))
1846 if(Link_location(i,jy-1) .gt. 0) &
1847 tmp_inout(i,jy-1) = Link_V(Link_location(i,jy-1))
1848 if(Link_location(i,jy) .gt. 0) &
1849 tmp_inout(i,jy) = Link_V(Link_location(i,jy))
1852 if(Link_location(1,j) .gt. 0) &
1853 tmp_inout(1,j) = Link_V(Link_location(1,j))
1854 if(Link_location(2,j) .gt. 0) &
1855 tmp_inout(2,j) = Link_V(Link_location(2,j))
1856 if(Link_location(ix-1,j) .gt. 0) &
1857 tmp_inout(ix-1,j) = Link_V(Link_location(ix-1,j))
1858 if(Link_location(ix,j) .gt. 0) &
1859 tmp_inout(ix,j) = Link_V(Link_location(ix,j))
1865 ! commu nicate tmp_inout
1866 call MPP_LAND_COM_INTEGER8(tmp_inout, ix,jy,flag)
1868 !map the data back to Link_V
1869 if(size .eq. 0) return
1871 if( (Link_location(1,j) .gt. 0) .and. (tmp_inout(1,j) .ne. -999) ) &
1872 Link_V(Link_location(1,j)) = tmp_inout(1,j)
1873 if((Link_location(2,j) .gt. 0) .and. (tmp_inout(2,j) .ne. -999) ) &
1874 Link_V(Link_location(2,j)) = tmp_inout(2,j)
1875 if((Link_location(ix-1,j) .gt. 0) .and. (tmp_inout(ix-1,j) .ne. -999)) &
1876 Link_V(Link_location(ix-1,j)) = tmp_inout(ix-1,j)
1877 if((Link_location(ix,j) .gt. 0) .and. (tmp_inout(ix,j) .ne. -999) )&
1878 Link_V(Link_location(ix,j)) = tmp_inout(ix,j)
1881 if((Link_location(i,1) .gt. 0) .and. (tmp_inout(i,1) .ne. -999) )&
1882 Link_V(Link_location(i,1)) = tmp_inout(i,1)
1883 if( (Link_location(i,2) .gt. 0) .and. (tmp_inout(i,2) .ne. -999) )&
1884 Link_V(Link_location(i,2)) = tmp_inout(i,2)
1885 if((Link_location(i,jy-1) .gt. 0) .and. (tmp_inout(i,jy-1) .ne. -999) ) &
1886 Link_V(Link_location(i,jy-1)) = tmp_inout(i,jy-1)
1887 if((Link_location(i,jy) .gt. 0) .and. (tmp_inout(i,jy) .ne. -999) ) &
1888 Link_V(Link_location(i,jy)) = tmp_inout(i,jy)
1890 end subroutine MPP_CHANNEL_COM_INT
1893 subroutine print_2(unit,in,fm)
1896 real buf2(global_nx,global_ny),&
1897 in(local_nx,local_ny)
1898 call write_IO_real(in,buf2)
1899 if(my_id.eq.IO_id) write(unit,*) buf2
1901 end subroutine print_2
1903 subroutine print_rt_2(unit,in)
1905 real buf2(global_nx,global_ny),&
1906 in(local_nx,local_ny)
1907 call write_IO_real(in,buf2)
1908 if(my_id.eq.IO_id) write(unit,*) buf2
1910 end subroutine print_rt_2
1912 subroutine mpp_land_max_int1(v)
1915 integer i, ierr, tag
1916 if(my_id .eq. IO_id) then
1918 do i = 0, numprocs - 1
1919 if(i .ne. my_id) then
1920 !block receive from other node.
1922 call mpi_recv(r1,1,MPI_INTEGER,i, &
1923 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
1924 if(max <= r1) max = r1
1929 call mpi_send(v,1,MPI_INTEGER, IO_id, &
1930 tag,HYDRO_COMM_WORLD,ierr)
1932 call mpp_land_bcast_int1(max)
1935 end subroutine mpp_land_max_int1
1937 subroutine mpp_land_max_real1(v)
1940 integer i, ierr, tag
1941 if(my_id .eq. IO_id) then
1943 do i = 0, numprocs - 1
1944 if(i .ne. my_id) then
1945 !block receive from other node.
1947 call mpi_recv(r1,1,MPI_REAL,i, &
1948 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
1949 if(max <= r1) max = r1
1954 call mpi_send(v,1,MPI_REAL, IO_id, &
1955 tag,HYDRO_COMM_WORLD,ierr)
1957 call mpp_land_bcast_real1(max)
1960 end subroutine mpp_land_max_real1
1962 subroutine mpp_same_int1(v)
1965 integer i, ierr, tag
1966 if(my_id .eq. IO_id) then
1967 do i = 0, numprocs - 1
1968 if(i .ne. my_id) then
1969 !block receive from other node.
1971 call mpi_recv(r1,1,MPI_INTEGER,i, &
1972 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
1973 if(v .ne. r1) v = -99
1978 call mpi_send(v,1,MPI_INTEGER, IO_id, &
1979 tag,HYDRO_COMM_WORLD,ierr)
1981 call mpp_land_bcast_int1(v)
1982 end subroutine mpp_same_int1
1986 subroutine write_chanel_real(v,map_l2g,gnlinks,nlinks,g_v)
1988 integer gnlinks,nlinks, map_l2g(nlinks)
1989 real recv(nlinks), v(nlinks)
1990 ! real g_v(gnlinks), tmp_v(gnlinks)
1991 integer i, ierr, tag, k
1992 integer length, node, message_len
1993 integer,allocatable,dimension(:) :: tmp_map
1994 real, allocatable, dimension(:) :: tmp_v
1995 real, dimension(:) :: g_v
1997 if(my_id .eq. io_id) then
1998 allocate(tmp_map(gnlinks))
1999 allocate(tmp_v(gnlinks))
2000 if(nlinks .le. 0) then
2003 tmp_map(1:nlinks) = map_l2g(1:nlinks)
2006 allocate(tmp_map(1))
2010 if(my_id .eq. IO_id) then
2011 do i = 0, numprocs - 1
2012 message_len = mpp_nlinks(i+1)
2013 if(i .ne. my_id) then
2014 !block receive from other node.
2017 call mpi_recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, &
2018 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2021 call mpi_recv(tmp_v(1:message_len),message_len,MPI_REAL,i, &
2022 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2024 do k = 1,message_len
2026 if(node .gt. 0) then
2027 g_v(node) = tmp_v(k)
2030 write(6,*) "Maping infor k=",k," node=", node
2037 if(node .gt. 0) then
2041 write(6,*) "local Maping infor k=",k," node=",node
2050 call mpi_send(map_l2g,nlinks,MPI_INTEGER, IO_id, &
2051 tag,HYDRO_COMM_WORLD,ierr)
2053 call mpi_send(v,nlinks,MPI_REAL,IO_id, &
2054 tag,HYDRO_COMM_WORLD,ierr)
2057 if(allocated(tmp_map)) deallocate(tmp_map)
2058 if(allocated(tmp_v)) deallocate(tmp_v)
2059 end subroutine write_chanel_real
2061 subroutine write_chanel_int(v,map_l2g,gnlinks,nlinks,g_v)
2063 integer gnlinks,nlinks, map_l2g(nlinks)
2064 integer :: recv(nlinks), v(nlinks)
2065 integer, allocatable, dimension(:) :: tmp_map , tmp_v
2066 integer, dimension(:) :: g_v
2067 integer i, ierr, tag, k
2068 integer length, node, message_len
2070 if(my_id .eq. io_id) then
2071 allocate(tmp_map(gnlinks))
2072 allocate(tmp_v(gnlinks))
2073 if(nlinks .le. 0) then
2076 tmp_map(1:nlinks) = map_l2g(1:nlinks)
2079 allocate(tmp_map(1))
2084 if(my_id .eq. IO_id) then
2085 do i = 0, numprocs - 1
2086 message_len = mpp_nlinks(i+1)
2087 if(i .ne. my_id) then
2088 !block receive from other node.
2091 call mpi_recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, &
2092 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2095 call mpi_recv(tmp_v(1:message_len),message_len,MPI_INTEGER,i, &
2096 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2098 do k = 1,message_len
2099 if(tmp_map(k) .gt. 0) then
2101 g_v(node) = tmp_v(k)
2104 write(6,*) "Maping infor k=",k," node=",tmp_v(k)
2110 if(map_l2g(k) .gt. 0) then
2115 write(6,*) "Maping infor k=",k," node=",map_l2g(k)
2124 call mpi_send(map_l2g,nlinks,MPI_INTEGER, IO_id, &
2125 tag,HYDRO_COMM_WORLD,ierr)
2127 call mpi_send(v,nlinks,MPI_INTEGER,IO_id, &
2128 tag,HYDRO_COMM_WORLD,ierr)
2130 if(allocated(tmp_map)) deallocate(tmp_map)
2131 if(allocated(tmp_v)) deallocate(tmp_v)
2132 end subroutine write_chanel_int
2134 subroutine write_chanel_int8(v,map_l2g,gnlinks,nlinks,g_v)
2136 integer gnlinks,nlinks, map_l2g(nlinks)
2137 integer(kind=int64) :: recv(nlinks), v(nlinks)
2138 integer(kind=int64), allocatable, dimension(:) :: tmp_v
2139 integer, allocatable, dimension(:) :: tmp_map
2140 integer(kind=int64), dimension(:) :: g_v
2141 integer i, ierr, tag, k
2142 integer length, node, message_len
2144 if(my_id .eq. io_id) then
2145 allocate(tmp_map(gnlinks))
2146 allocate(tmp_v(gnlinks))
2147 if(nlinks .le. 0) then
2150 tmp_map(1:nlinks) = map_l2g(1:nlinks)
2153 allocate(tmp_map(1))
2158 if(my_id .eq. IO_id) then
2159 do i = 0, numprocs - 1
2160 message_len = mpp_nlinks(i+1)
2161 if(i .ne. my_id) then
2162 !block receive from other node.
2165 call mpi_recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, &
2166 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2168 call mpi_recv(tmp_v(1:message_len),message_len,MPI_INTEGER8,i, &
2169 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2171 do k = 1,message_len
2172 if(tmp_map(k) .gt. 0) then
2174 g_v(node) = tmp_v(k)
2177 write(6,*) "Maping infor k=",k," node=",tmp_v(k)
2183 if(map_l2g(k) .gt. 0) then
2188 write(6,*) "Maping infor k=",k," node=",map_l2g(k)
2197 call mpi_send(map_l2g,nlinks,MPI_INTEGER, IO_id, &
2198 tag,HYDRO_COMM_WORLD,ierr)
2200 call mpi_send(v,nlinks,MPI_INTEGER8,IO_id, &
2201 tag,HYDRO_COMM_WORLD,ierr)
2203 if(allocated(tmp_map)) deallocate(tmp_map)
2204 if(allocated(tmp_v)) deallocate(tmp_v)
2205 end subroutine write_chanel_int8
2208 subroutine write_lake_real(v,nodelist_in,nlakes)
2210 real recv(nlakes), v(nlakes)
2211 integer nodelist(nlakes), nlakes, nodelist_in(nlakes)
2212 integer i, ierr, tag, k
2213 integer length, node
2215 nodelist = nodelist_in
2216 if(my_id .eq. IO_id) then
2217 do i = 0, numprocs - 1
2218 if(i .ne. my_id) then
2219 !block receive from other node.
2221 call mpi_recv(nodelist,nlakes,MPI_INTEGER,i, &
2222 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2224 call mpi_recv(recv(:),nlakes,MPI_REAL,i, &
2225 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2228 if(nodelist(k) .gt. -99) then
2230 v(node) = recv(node)
2237 call mpi_send(nodelist,nlakes,MPI_INTEGER, IO_id, &
2238 tag,HYDRO_COMM_WORLD,ierr)
2240 call mpi_send(v,nlakes,MPI_REAL,IO_id, &
2241 tag,HYDRO_COMM_WORLD,ierr)
2243 end subroutine write_lake_real
2246 subroutine write_lake_char(v,nodelist_in,nlakes)
2248 character(len=256) recv(nlakes), v(nlakes)
2249 integer nodelist(nlakes), nlakes, nodelist_in(nlakes)
2250 integer i, ierr, tag, k, in_len
2251 integer length, node
2253 in_len = size(v, 1)*len(v)
2255 nodelist = nodelist_in
2256 if(my_id .eq. IO_id) then
2257 do i = 0, numprocs - 1
2258 if(i .ne. my_id) then
2259 !block receive from other node.
2261 call mpi_recv(nodelist,nlakes,MPI_INTEGER,i, &
2262 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2264 call mpi_recv(recv(:),in_len,MPI_CHARACTER,i, &
2265 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2268 if(nodelist(k) .gt. -99) then
2270 v(node) = recv(node)
2277 call mpi_send(nodelist,nlakes,MPI_INTEGER, IO_id, &
2278 tag,HYDRO_COMM_WORLD,ierr)
2280 call mpi_send(v,in_len,MPI_CHARACTER,IO_id, &
2281 tag,HYDRO_COMM_WORLD,ierr)
2283 end subroutine write_lake_char
2286 subroutine read_rst_crt_r(unit,out,size)
2288 integer unit, size, ierr,ierr2
2289 real out(size),out1(size)
2290 if(my_id.eq.IO_id) then
2291 read(unit,IOSTAT=ierr2,end=99) out1
2292 if(ierr2.eq.0) out=out1
2295 call mpp_land_bcast_int1(ierr2)
2296 if(ierr2 .ne. 0) return
2297 call mpi_bcast(out,size,MPI_REAL, &
2298 IO_id,HYDRO_COMM_WORLD,ierr)
2300 end subroutine read_rst_crt_r
2302 subroutine write_rst_crt_r(unit,cd,map_l2g,gnlinks,nlinks)
2303 integer :: unit,gnlinks,nlinks,map_l2g(nlinks)
2306 call write_chanel_real(cd,map_l2g,gnlinks,nlinks, g_cd)
2309 end subroutine write_rst_crt_r
2311 subroutine sum_int1d(vin,nsize)
2313 integer nsize,i,j,tag,ierr
2314 integer, dimension(nsize):: vin,recv
2316 if(nsize .le. 0) return
2317 if(my_id .eq. IO_id) then
2318 do i = 0, numprocs - 1
2319 if(i .ne. my_id) then
2320 call mpi_recv(recv,nsize,MPI_INTEGER,i, &
2321 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2322 vin(:) = vin(:) + recv(:)
2326 call mpi_send(vin,nsize,MPI_INTEGER,IO_id, &
2327 tag,HYDRO_COMM_WORLD,ierr)
2329 call mpp_land_bcast_int1d(vin)
2331 end subroutine sum_int1d
2333 subroutine combine_int1d(vin,nsize, flag)
2335 integer nsize,i,j,tag,ierr, flag, k
2336 integer, dimension(nsize):: vin,recv
2338 if(nsize .le. 0) return
2339 if(my_id .eq. IO_id) then
2340 do i = 0, numprocs - 1
2341 if(i .ne. my_id) then
2342 call mpi_recv(recv,nsize,MPI_INTEGER,i, &
2343 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2345 if(recv(k) .ne. flag) then
2352 call mpi_send(vin,nsize,MPI_INTEGER,IO_id, &
2353 tag,HYDRO_COMM_WORLD,ierr)
2355 call mpp_land_bcast_int1d(vin)
2357 end subroutine combine_int1d
2359 subroutine combine_int8_1d(vin,nsize, flag)
2361 integer nsize,i,j,tag,ierr, flag, k
2362 integer(kind=int64), dimension(nsize):: vin,recv
2364 if(nsize .le. 0) return
2365 if(my_id .eq. IO_id) then
2366 do i = 0, numprocs - 1
2367 if(i .ne. my_id) then
2368 call mpi_recv(recv,nsize,MPI_INTEGER8,i, &
2369 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2371 if(recv(k) .ne. flag) then
2378 call mpi_send(vin,nsize,MPI_INTEGER8,IO_id, &
2379 tag,HYDRO_COMM_WORLD,ierr)
2381 call mpp_land_bcast_int8_1d(vin)
2383 end subroutine combine_int8_1d
2385 subroutine sum_real1d(vin,nsize)
2388 real,dimension(nsize) :: vin
2389 real*8,dimension(nsize) :: vin8
2391 call sum_real8(vin8,nsize)
2393 end subroutine sum_real1d
2395 subroutine sum_real8(vin,nsize)
2397 integer nsize,i,j,tag,ierr
2398 real*8, dimension(nsize):: vin,recv
2399 real, dimension(nsize):: v
2401 if(my_id .eq. IO_id) then
2402 do i = 0, numprocs - 1
2403 if(i .ne. my_id) then
2404 call mpi_recv(recv,nsize,MPI_DOUBLE_PRECISION,i, &
2405 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2406 vin(:) = vin(:) + recv(:)
2411 call mpi_send(vin,nsize,MPI_DOUBLE_PRECISION,IO_id, &
2412 tag,HYDRO_COMM_WORLD,ierr)
2414 call mpp_land_bcast_real(nsize,v)
2417 end subroutine sum_real8
2419 ! subroutine get_globalDim(ix,g_ix)
2421 ! integer ix,g_ix, ierr
2423 ! if ( my_id .eq. IO_id ) then
2425 ! call mpi_reduce( MPI_IN_PLACE, g_ix, 4, MPI_INTEGER, &
2426 ! MPI_SUM, 0, HYDRO_COMM_WORLD, ierr )
2428 ! call mpi_reduce( ix, 0, 4, MPI_INTEGER, &
2429 ! MPI_SUM, 0, HYDRO_COMM_WORLD, ierr )
2431 ! call mpp_land_bcast_int1(g_ix)
2435 ! end subroutine get_globalDim
2437 subroutine gather_1d_real_tmp(vl,s_in,e_in,vg,sg)
2438 integer sg, s,e, size, s_in, e_in
2441 ! s: start index, e: end index
2442 real vl(e_in-s_in+1), vg(sg)
2446 if(my_id .eq. IO_id) then
2454 if(my_id .eq. IO_id) then
2455 do i = 0, numprocs - 1
2456 if(i .ne. my_id) then
2457 !block receive from other node.
2459 call mpi_recv(index_s,2,MPI_INTEGER,i, &
2460 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2466 call mpi_recv(vg(s:e),size,MPI_REAL, &
2467 i,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2472 call mpi_send(index_s,2,MPI_INTEGER, IO_id, &
2473 tag,HYDRO_COMM_WORLD,ierr)
2476 call mpi_send(vl,size,MPI_REAL,IO_id, &
2477 tag,HYDRO_COMM_WORLD,ierr)
2481 end subroutine gather_1d_real_tmp
2483 subroutine sum_real1(inout)
2488 CALL MPI_ALLREDUCE(send,inout,1,MPI_REAL,MPI_SUM,HYDRO_COMM_WORLD,ierr)
2489 end subroutine sum_real1
2491 subroutine sum_double(inout)
2493 real*8:: inout, send
2496 !yw CALL MPI_ALLREDUCE(send,inout,1,MPI_DOUBLE,MPI_SUM,HYDRO_COMM_WORLD,ierr)
2497 CALL MPI_ALLREDUCE(send,inout,1,MPI_DOUBLE_PRECISION,MPI_SUM,HYDRO_COMM_WORLD,ierr)
2498 end subroutine sum_double
2500 subroutine mpp_chrt_nlinks_collect(nlinks)
2501 ! collect the nlinks
2504 integer :: i, ierr, status, tag
2505 allocate(mpp_nlinks(numprocs),stat = status)
2508 if(my_id .eq. IO_id) then
2509 do i = 0,numprocs -1
2510 if(i .ne. my_id) then
2511 call mpi_recv(mpp_nlinks(i+1),1,MPI_INTEGER,i, &
2512 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2518 call mpi_send(nlinks,1,MPI_INTEGER, IO_id, &
2519 tag,HYDRO_COMM_WORLD,ierr)
2523 end subroutine mpp_chrt_nlinks_collect
2525 subroutine getLocalXY(ix,jx,startx,starty,endx,endy)
2526 !!! this is for NoahMP only
2528 integer:: ix,jx,startx,starty,endx,endy
2529 startx = local_startx
2530 starty = local_starty
2531 endx = startx + ix -1
2532 endy = starty + jx -1
2533 end subroutine getLocalXY
2535 subroutine check_landreal1(unit, inVar)
2539 if(my_id .eq. IO_id) then
2543 end subroutine check_landreal1
2545 subroutine check_landreal1d(unit, inVar)
2549 if(my_id .eq. IO_id) then
2553 end subroutine check_landreal1d
2554 subroutine check_landreal2d(unit, inVar)
2558 real :: g_var(global_nx,global_ny)
2559 call write_io_real(inVar,g_var)
2560 if(my_id .eq. IO_id) then
2564 end subroutine check_landreal2d
2566 subroutine check_landreal3d(unit, inVar)
2568 integer :: unit, k, klevel
2569 real :: inVar(:,:,:)
2570 real :: g_var(global_nx,global_ny)
2571 klevel = size(inVar,2)
2573 call write_io_real(inVar(:,k,:),g_var)
2574 if(my_id .eq. IO_id) then
2579 end subroutine check_landreal3d
2581 subroutine mpp_collect_1d_int(nlinks,vinout)
2582 ! collect the nlinks
2585 integer :: i, ierr, status, tag
2586 integer, dimension(nlinks) :: vinout
2587 integer, dimension(nlinks) :: buf
2589 if(my_id .eq. IO_id) then
2590 do i = 0,numprocs -1
2591 if(i .ne. my_id) then
2592 call mpi_recv(buf,nlinks,MPI_INTEGER,i, &
2593 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2594 vinout = vinout + buf
2598 call mpi_send(vinout,nlinks,MPI_INTEGER, IO_id, &
2599 tag,HYDRO_COMM_WORLD,ierr)
2601 call mpp_land_bcast_int1d(vinout)
2603 end subroutine mpp_collect_1d_int
2605 subroutine mpp_collect_1d_int_mem(nlinks,vinout)
2606 ! consider the memory and big size data transport
2607 ! collect the nlinks
2610 integer :: i, ierr, status, tag
2611 integer, dimension(nlinks) :: vinout, tmpIn
2612 integer, dimension(nlinks) :: buf
2613 integer :: lsize, k,m
2614 integer, allocatable, dimension(:) :: tmpBuf
2616 if(my_id .eq. IO_id) then
2617 allocate (tmpBuf(nlinks))
2618 do i = 0,numprocs -1
2619 if(i .ne. my_id) then
2621 call mpi_recv(lsize,1,MPI_INTEGER,i, &
2622 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2623 if(lsize .gt. 0) then
2625 call mpi_recv(tmpBuf(1:lsize),lsize,MPI_INTEGER,i, &
2626 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2634 if(allocated(tmpBuf)) deallocate(tmpBuf)
2638 if(vinout(k) .gt. 0) then
2644 call mpi_send(lsize,1,MPI_INTEGER, IO_id, &
2645 tag,HYDRO_COMM_WORLD,ierr)
2646 if(lsize .gt. 0) then
2648 call mpi_send(tmpIn(1:lsize),lsize,MPI_INTEGER, IO_id, &
2649 tag,HYDRO_COMM_WORLD,ierr)
2652 call mpp_land_bcast_int1d(vinout)
2654 end subroutine mpp_collect_1d_int_mem
2656 subroutine updateLake_seqInt(in,nsize,in0)
2659 integer, dimension(nsize) :: in
2660 integer, dimension(nsize) :: tmp
2661 integer, dimension(:) :: in0
2662 integer tag, i, status, ierr, k
2663 if(nsize .le. 0) return
2666 if(my_id .ne. IO_id) then
2667 call mpi_send(in,nsize,MPI_INTEGER, IO_id, &
2668 tag,HYDRO_COMM_WORLD,ierr)
2670 do i = 0, numprocs - 1
2671 if(i .ne. IO_id) then
2672 call mpi_recv(tmp,nsize,&
2673 MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2675 if(in0(k) .ne. tmp(k)) in(k) = tmp(k)
2680 call mpp_land_bcast_int1d(in)
2682 end subroutine updateLake_seqInt
2684 subroutine updateLake_seqInt8(in,nsize,in0)
2687 integer(kind=int64), dimension(nsize) :: in
2688 integer(kind=int64), dimension(nsize) :: tmp
2689 integer(kind=int64), dimension(:) :: in0
2690 integer tag, i, status, ierr, k
2691 if(nsize .le. 0) return
2694 if(my_id .ne. IO_id) then
2695 call mpi_send(in,nsize,MPI_INTEGER8, IO_id, &
2696 tag,HYDRO_COMM_WORLD,ierr)
2698 do i = 0, numprocs - 1
2699 if(i .ne. IO_id) then
2700 call mpi_recv(tmp,nsize,&
2701 MPI_INTEGER8,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2703 if(in0(k) .ne. tmp(k)) in(k) = tmp(k)
2708 call mpp_land_bcast_int8_1d(in)
2710 end subroutine updateLake_seqInt8
2713 subroutine updateLake_seq(in,nsize,in0)
2717 real, dimension(:), intent(inout), asynchronous :: in
2718 real, dimension(:), intent(in) :: in0
2720 real, dimension(nsize) :: tmp
2721 real, dimension(:), allocatable, asynchronous :: prev
2722 real, dimension(:), allocatable :: adjustment
2724 integer tag, i, status, ierr, k
2726 if (nsize .le. 0) return
2728 allocate(adjustment(nsize))
2729 allocate(prev(nsize))
2731 if (my_id == IO_id) prev = in0
2732 call mpi_bcast(prev, nsize, MPI_REAL, IO_id, HYDRO_COMM_WORLD, ierr)
2734 if (my_id == IO_id) then
2737 adjustment = in - prev
2740 call mpi_allreduce(adjustment, in, nsize, MPI_REAL, MPI_SUM, HYDRO_COMM_WORLD, ierr) ! TODO: check ierr!
2742 deallocate(adjustment)
2745 end subroutine updateLake_seq
2748 subroutine updateLake_seq_char(in,nsize,in0)
2751 character(len=256), dimension(nsize) :: in
2752 character(len=256), dimension(nsize) :: tmp
2753 character(len=256), dimension(:) :: in0
2754 integer tag, i, status, ierr, k, in_len
2755 if(nsize .le. 0) return
2757 in_len = size(in, 1)*len(in)
2760 if(my_id .ne. IO_id) then
2761 call mpi_send(in,in_len,MPI_CHARACTER, IO_id, &
2762 tag,HYDRO_COMM_WORLD,ierr)
2764 do i = 0, numprocs - 1
2765 if(i .ne. IO_id) then
2766 call mpi_recv(tmp,in_len,&
2767 MPI_CHARACTER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2769 if(in0(k) .ne. tmp(k)) in(k) = tmp(k)
2774 call mpp_land_bcast_char1d(in)
2776 end subroutine updateLake_seq_char
2779 subroutine updateLake_grid(in,nsize,lake_index)
2782 real, dimension(nsize) :: in
2783 integer, dimension(nsize) :: lake_index
2784 real, dimension(nsize) :: tmp
2785 integer tag, i, status, ierr, k
2786 if(nsize .le. 0) return
2788 if(my_id .ne. IO_id) then
2790 call mpi_send(in,nsize,MPI_REAL, IO_id, &
2791 tag,HYDRO_COMM_WORLD,ierr)
2793 call mpi_send(lake_index,nsize,MPI_INTEGER, IO_id, &
2794 tag,HYDRO_COMM_WORLD,ierr)
2796 do i = 0, numprocs - 1
2797 if(i .ne. IO_id) then
2799 call mpi_recv(tmp,nsize,&
2800 MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2802 call mpi_recv(lake_index,nsize,&
2803 MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2805 if(lake_index(k) .gt. 0) in(k) = tmp(k)
2810 call mpp_land_bcast_real_1d(in)
2812 end subroutine updateLake_grid
2815 !subroutine match1dLake:
2816 !global lake. Find the same lake and mark as flag
2817 ! default of win is 0
2818 subroutine match1dLake(vin,nsize,flag)
2820 integer nsize,i,j,tag,ierr, flag, k
2821 integer, dimension(nsize):: vin,recv
2823 if(nsize .le. 0) return
2824 if(my_id .eq. IO_id) then
2825 do i = 0, numprocs - 1
2826 if(i .ne. my_id) then
2827 call mpi_recv(recv,nsize,MPI_INTEGER,i, &
2828 tag,HYDRO_COMM_WORLD,mpp_status,ierr)
2830 if(recv(k) .eq. flag) vin(k) = flag
2831 if(vin(k) .ne. flag) then
2832 if(vin(k) .gt. 0 .and. recv(k) .gt. 0) then
2835 if(recv(k) .gt. 0) vin(k) = recv(k)
2842 call mpi_send(vin,nsize,MPI_INTEGER,IO_id, &
2843 tag,HYDRO_COMM_WORLD,ierr)
2845 call mpp_land_bcast_int1d(vin)
2847 end subroutine match1dLake
2849 subroutine mpp_land_abort()
2852 CALL MPI_ABORT(HYDRO_COMM_WORLD,1,IERR)
2853 end subroutine mpp_land_abort ! mpp_land_abort
2855 subroutine mpp_land_sync()
2858 call MPI_barrier( HYDRO_COMM_WORLD ,ierr)
2859 if(ierr .ne. 0) call mpp_land_abort()
2861 end subroutine mpp_land_sync ! mpp_land_sync
2863 subroutine mpp_comm_scalar_real(scalar, fromImage, toImage)
2865 real, intent(inout) :: scalar
2866 integer, intent(in) :: fromImage, toImage
2869 if(my_id .eq. fromImage) &
2870 call mpi_send(scalar, 1, MPI_REAL, &
2871 toImage, tag, HYDRO_COMM_WORLD, ierr)
2872 if(my_id .eq. toImage) &
2873 call mpi_recv(scalar, 1, MPI_REAL, &
2874 fromImage, tag, HYDRO_COMM_WORLD, mpp_status, ierr)
2875 end subroutine mpp_comm_scalar_real
2877 subroutine mpp_comm_scalar_char(scalar, fromImage, toImage)
2879 character(len=*), intent(inout) :: scalar
2880 integer, intent(in) :: fromImage, toImage
2881 integer:: ierr, tag, length
2884 if(my_id .eq. fromImage) &
2885 call mpi_send(scalar, length, MPI_CHARACTER, &
2886 toImage, tag, HYDRO_COMM_WORLD, ierr)
2887 if(my_id .eq. toImage) &
2888 call mpi_recv(scalar, length, MPI_CHARACTER, &
2889 fromImage, tag, HYDRO_COMM_WORLD, mpp_status, ierr)
2890 end subroutine mpp_comm_scalar_char
2893 subroutine mpp_comm_1d_real(vector, fromImage, toImage)
2895 real, dimension(:), intent(inout) :: vector
2896 integer, intent(in) :: fromImage, toImage
2898 integer:: my_id, numprocs
2900 call MPI_COMM_RANK(HYDRO_COMM_WORLD,my_id,ierr)
2901 call MPI_COMM_SIZE(HYDRO_COMM_WORLD,numprocs,ierr)
2902 if(numprocs > 1) then
2903 if(my_id .eq. fromImage) &
2904 call mpi_send(vector, size(vector), MPI_REAL, &
2905 toImage, tag, HYDRO_COMM_WORLD, ierr)
2906 if(my_id .eq. toImage) &
2907 call mpi_recv(vector, size(vector), MPI_REAL, &
2908 fromImage, tag, HYDRO_COMM_WORLD, mpp_status, ierr)
2910 end subroutine mpp_comm_1d_real
2913 subroutine mpp_comm_1d_char(vector, fromImage, toImage)
2915 character(len=*), dimension(:), intent(inout) :: vector
2916 integer, intent(in) :: fromImage, toImage
2917 integer:: ierr, tag, totalLength
2918 integer:: my_id,numprocs
2920 call MPI_COMM_RANK(HYDRO_COMM_WORLD,my_id,ierr)
2921 call MPI_COMM_SIZE(HYDRO_COMM_WORLD,numprocs,ierr)
2922 totalLength=len(vector(1))*size(vector,1)
2923 if(numprocs > 1) then
2924 if(my_id .eq. fromImage) &
2925 call mpi_send(vector, totalLength, MPI_CHARACTER, &
2926 toImage, tag, HYDRO_COMM_WORLD, ierr)
2927 if(my_id .eq. toImage) &
2928 call mpi_recv(vector, totalLength, MPI_CHARACTER, &
2929 fromImage, tag, HYDRO_COMM_WORLD, mpp_status, ierr)
2931 end subroutine mpp_comm_1d_char
2934 END MODULE MODULE_MPP_LAND