1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 ! MODULE PARALLEL_MODULE
4 ! This module provides routines for parallelizing.
5 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
12 integer, parameter :: IO_NODE = 0
14 integer, parameter :: HALO_WIDTH = 3
16 integer, pointer, dimension(:,:) :: processors, &
17 proc_minx, proc_maxx, &
23 my_minx, my_miny, my_maxx, my_maxy, &
30 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
31 ! Name: parallel_start
33 ! Purpose: For MPI, the purpose of this routine is to basically set up
34 ! a communicator for a rectangular mesh, and determine how many processors
35 ! in the x and y directions there will be.
36 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
37 subroutine parallel_start()
45 integer :: mpi_rank, mpi_size
47 integer, dimension(2) :: dims, coords
48 integer :: rectangle, myleft, myright, mytop, mybottom
50 logical, dimension(2) :: periods
52 ! Find out our rank and the total number of processors
53 call MPI_Init(mpi_ierr)
54 call MPI_Comm_rank(MPI_COMM_WORLD, mpi_rank, mpi_ierr)
55 call MPI_Comm_size(MPI_COMM_WORLD, mpi_size, mpi_ierr)
62 ! Code from RSL to get number of procs in m and n directions
67 if ( mod( nprocs, m ) == 0 ) then
69 if ( abs(m-n) < mini ) then
77 ! Calculate which patch current processor will work on
78 my_x = mod(mpi_rank,nproc_x)
79 my_y = mpi_rank / nproc_x
97 end subroutine parallel_start
100 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
101 ! Name: parallel_get_tile_dims
103 ! Purpose: To compute the starting and ending indices of the patch that the
104 ! calling processor is to work on. When there are multiple processors,
105 ! appropriate data structures describing the range of indices being
106 ! worked on by other processors are also allocated and filled
107 ! (processors, proc_minx, proc_maxx, proc_miny, proc_maxy).
108 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
109 subroutine parallel_get_tile_dims(idim, jdim)
114 integer, intent(in) :: idim, jdim
118 integer :: i, j, ix, iy, px, py
119 integer, dimension(2) :: buffer
121 integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
124 ! Determine starting and ending grid points in x and y direction that we will work on
127 ! For now, copy code from RSL_LITE's module_dm.F until build mechanism to link
128 ! WRF and WPS code is worked out more.
129 ! Eventually, it would probably be best to use module_dm code without copying
134 call task_for_point(i, j, 1, idim, 1, jdim, nproc_x, nproc_y, px, py)
135 if ( px == my_x ) then
137 if ( my_minx == -1 ) my_minx = i
144 call task_for_point(i, j, 1, idim, 1, jdim, nproc_x, nproc_y, px, py)
145 if ( py == my_y ) then
147 if ( my_miny == -1 ) my_miny = j
151 ! Create space to hold information about which other processors are
152 ! working on which parts of the domain
153 allocate(processors(0:nproc_x-1, 0:nproc_y-1))
154 allocate(proc_minx(0:nproc_x-1, 0:nproc_y-1))
155 allocate(proc_miny(0:nproc_x-1, 0:nproc_y-1))
156 allocate(proc_maxx(0:nproc_x-1, 0:nproc_y-1))
157 allocate(proc_maxy(0:nproc_x-1, 0:nproc_y-1))
159 ! Exchange information with other processors
160 if (my_proc_id == IO_NODE) then
161 processors(my_x, my_y) = my_proc_id
163 call MPI_Recv(buffer, 2, MPI_INTEGER, i, MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
164 processors(buffer(1), buffer(2)) = mpi_stat(MPI_SOURCE)
169 call MPI_Send(buffer, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
174 call parallel_bcast_int(processors(ix,iy), IO_NODE)
178 proc_minx(my_x, my_y) = my_minx
179 proc_maxx(my_x, my_y) = my_maxx
180 proc_miny(my_x, my_y) = my_miny
181 proc_maxy(my_x, my_y) = my_maxy
185 call parallel_bcast_int(proc_minx(ix,iy), processors(ix,iy))
186 call parallel_bcast_int(proc_maxx(ix,iy), processors(ix,iy))
187 call parallel_bcast_int(proc_miny(ix,iy), processors(ix,iy))
188 call parallel_bcast_int(proc_maxy(ix,iy), processors(ix,iy))
193 allocate(processors(0:nproc_x-1, 0:nproc_y-1))
194 allocate(proc_minx(0:nproc_x-1, 0:nproc_y-1))
195 allocate(proc_miny(0:nproc_x-1, 0:nproc_y-1))
196 allocate(proc_maxx(0:nproc_x-1, 0:nproc_y-1))
197 allocate(proc_maxy(0:nproc_x-1, 0:nproc_y-1))
199 processors(0,0) = IO_NODE
202 proc_maxx(0,0) = idim
203 proc_maxy(0,0) = jdim
211 end subroutine parallel_get_tile_dims
214 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
215 ! Copied from RSL_LITE's task_for_point.c until a good way can be found to
216 ! get the build mechanism to use the original code in RSL_LITE.
217 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
218 subroutine task_for_point(i_p, j_p, ids_p, ide_p, jds_p, jde_p, npx, npy, px, py)
223 integer, intent(in) :: i_p, j_p, ids_p, ide_p, jds_p, jde_p, npx, npy
224 integer, intent(out) :: px, py
227 integer :: a, b, rem, idim, jdim, i, j, ids, jds, ide, jde
242 a = ( rem / 2 ) * ( (idim / npx) + 1 )
243 b = a + ( npx - rem ) * ( idim / npx )
244 if ( i-ids < a ) then
245 px = (i-ids) / ( (idim / npx) + 1 )
246 else if ( i-ids < b ) then
247 px = ( a / ( (idim / npx) + 1 ) ) + (i-a-ids) / ( ( b - a ) / ( npx - rem ) )
249 px = ( a / ( (idim / npx) + 1 ) ) + (b-a-ids) / ( ( b - a ) / ( npx - rem ) ) + &
250 (i-b-ids) / ( ( idim / npx ) + 1 )
256 a = ( rem / 2 ) * ( (jdim / npy) + 1 )
257 b = a + ( npy - rem ) * ( jdim / npy )
258 if ( j-jds < a ) then
259 py = (j-jds) / ( (jdim / npy) + 1 )
260 else if ( j-jds < b ) then
261 py = ( a / ( (jdim / npy) + 1 ) ) + (j-a-jds) / ( ( b - a ) / ( npy - rem ) )
263 py = ( a / ( (jdim / npy) + 1 ) ) + (b-a-jds) / ( ( b - a ) / ( npy - rem ) ) + &
264 (j-b-jds) / ( ( jdim / npy ) + 1 )
267 end subroutine task_for_point
270 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
271 ! Name: gather_whole_field_i
274 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
275 subroutine gather_whole_field_i(patch_array, ms1, me1, ms2, me2, ms3, me3, &
276 ps1, pe1, ps2, pe2, ps3, pe3, &
277 domain_array, ds1, de1, ds2, de2, ds3, de3)
282 integer, intent(in) :: ps1, pe1, ps2, pe2, ps3, pe3, &
283 ms1, me1, ms2, me2, ms3, me3, &
284 ds1, de1, ds2, de2, ds3, de3
285 integer, dimension(ms1:me1,ms2:me2,ms3:me3), intent(in) :: patch_array
286 integer, dimension(ds1:de1,ds2:de2,ds3:de3), intent(inout) :: domain_array
290 integer :: i, ii, j, jj, kk
291 integer, dimension(2) :: idims, jdims
293 integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
295 if (my_proc_id == IO_NODE) then
299 if (processors(i,j) /= IO_NODE) then
300 call MPI_Recv(jdims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
301 call MPI_Recv(idims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
303 ! BUG: Check on mpi_stat and mpi_ierr
304 call MPI_Recv(domain_array(idims(1):idims(2),jdims(1):jdims(2),kk), &
305 (idims(2)-idims(1)+1)*(jdims(2)-jdims(1)+1), &
306 MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
309 domain_array(ps1:pe1,ps2:pe2,ps3:pe3) = patch_array(ps1:pe1,ps2:pe2,ps3:pe3)
318 call MPI_Send(jdims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
321 call MPI_Send(idims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
323 call MPI_Send(patch_array(ps1:pe1,ps2:pe2,kk), (pe1-ps1+1)*(pe2-ps2+1), MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
324 ! BUG: Check on mpi_ierr
329 end subroutine gather_whole_field_i
332 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
333 ! Name: gather_whole_field_r
336 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
337 subroutine gather_whole_field_r(patch_array, ms1, me1, ms2, me2, ms3, me3, &
338 ps1, pe1, ps2, pe2, ps3, pe3, &
339 domain_array, ds1, de1, ds2, de2, ds3, de3)
344 integer, intent(in) :: ps1, pe1, ps2, pe2, ps3, pe3, &
345 ms1, me1, ms2, me2, ms3, me3, &
346 ds1, de1, ds2, de2, ds3, de3
347 real, dimension(ms1:me1,ms2:me2,ms3:me3), intent(in) :: patch_array
348 real, dimension(ds1:de1,ds2:de2,ds3:de3), intent(inout) :: domain_array
352 integer :: i, ii, j, jj, kk
353 integer, dimension(2) :: idims, jdims
355 integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
357 if (my_proc_id == IO_NODE) then
361 if (processors(i,j) /= IO_NODE) then
362 call MPI_Recv(jdims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
363 call MPI_Recv(idims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
365 ! BUG: Check on mpi_stat and mpi_ierr
366 call MPI_Recv(domain_array(idims(1):idims(2),jdims(1):jdims(2),kk), &
367 (idims(2)-idims(1)+1)*(jdims(2)-jdims(1)+1), &
368 MPI_REAL, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
371 domain_array(ps1:pe1,ps2:pe2,ps3:pe3) = patch_array(ps1:pe1,ps2:pe2,ps3:pe3)
380 call MPI_Send(jdims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
383 call MPI_Send(idims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
385 call MPI_Send(patch_array(ps1:pe1,ps2:pe2,kk), (pe1-ps1+1)*(pe2-ps2+1), MPI_REAL, 0, my_proc_id, comm, mpi_ierr)
386 ! BUG: Check on mpi_ierr
391 end subroutine gather_whole_field_r
394 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
395 ! Name: scatter_whole_field_i
398 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
399 subroutine scatter_whole_field_i(patch_array, ms1, me1, ms2, me2, ms3, me3, &
400 ps1, pe1, ps2, pe2, ps3, pe3, &
401 domain_array, ds1, de1, ds2, de2, ds3, de3)
406 integer, intent(in) :: ps1, pe1, ps2, pe2, ps3, pe3, &
407 ms1, me1, ms2, me2, ms3, me3, &
408 ds1, de1, ds2, de2, ds3, de3
409 integer, dimension(ms1:me1,ms2:me2,ms3:me3), intent(inout) :: patch_array
410 integer, dimension(ds1:de1,ds2:de2,ds3:de3), intent(in) :: domain_array
414 integer :: i, ii, j, jj, kk
415 integer, dimension(2) :: idims, jdims
417 integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
419 if (my_proc_id == IO_NODE) then
423 if (processors(i,j) /= IO_NODE) then
424 call MPI_Recv(jdims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
425 call MPI_Recv(idims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
427 ! BUG: Check on mpi_stat and mpi_ierr
428 call MPI_Send(domain_array(idims(1):idims(2),jdims(1):jdims(2),kk), &
429 (idims(2)-idims(1)+1)*(jdims(2)-jdims(1)+1), &
430 MPI_INTEGER, processors(i,j), my_proc_id, comm, mpi_ierr)
433 patch_array(ps1:pe1,ps2:pe2,ps3:pe3) = domain_array(ps1:pe1,ps2:pe2,ps3:pe3)
442 call MPI_Send(jdims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
445 call MPI_Send(idims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
447 call MPI_Recv(patch_array(ps1:pe1,ps2:pe2,kk), (pe1-ps1+1)*(pe2-ps2+1), &
448 MPI_INTEGER, 0, MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
449 ! BUG: Check on mpi_ierr
454 end subroutine scatter_whole_field_i
457 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
458 ! Name: scatter_whole_field_r
461 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
462 subroutine scatter_whole_field_r(patch_array, ms1, me1, ms2, me2, ms3, me3, &
463 ps1, pe1, ps2, pe2, ps3, pe3, &
464 domain_array, ds1, de1, ds2, de2, ds3, de3)
469 integer, intent(in) :: ps1, pe1, ps2, pe2, ps3, pe3, &
470 ms1, me1, ms2, me2, ms3, me3, &
471 ds1, de1, ds2, de2, ds3, de3
472 real, dimension(ms1:me1,ms2:me2,ms3:me3), intent(inout) :: patch_array
473 real, dimension(ds1:de1,ds2:de2,ds3:de3), intent(in) :: domain_array
477 integer :: i, ii, j, jj, kk
478 integer, dimension(2) :: idims, jdims
480 integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
482 if (my_proc_id == IO_NODE) then
486 if (processors(i,j) /= IO_NODE) then
487 call MPI_Recv(jdims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
488 call MPI_Recv(idims, 2, MPI_INTEGER, processors(i,j), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
490 ! BUG: Check on mpi_stat and mpi_ierr
491 call MPI_Send(domain_array(idims(1):idims(2),jdims(1):jdims(2),kk), &
492 (idims(2)-idims(1)+1)*(jdims(2)-jdims(1)+1), &
493 MPI_REAL, processors(i,j), my_proc_id, comm, mpi_ierr)
496 patch_array(ps1:pe1,ps2:pe2,ps3:pe3) = domain_array(ps1:pe1,ps2:pe2,ps3:pe3)
505 call MPI_Send(jdims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
508 call MPI_Send(idims, 2, MPI_INTEGER, 0, my_proc_id, comm, mpi_ierr)
510 call MPI_Recv(patch_array(ps1:pe1,ps2:pe2,kk), (pe1-ps1+1)*(pe2-ps2+1), &
511 MPI_REAL, 0, MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
512 ! BUG: Check on mpi_ierr
517 end subroutine scatter_whole_field_r
520 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
521 ! Name: exchange_halo_r
524 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
525 subroutine exchange_halo_r(patch_array, &
526 ms1, me1, ms2, me2, ms3, me3, &
527 ps1, pe1, ps2, pe2, ps3, pe3)
532 integer, intent(in) :: ps1, pe1, ps2, pe2, ps3, pe3, &
533 ms1, me1, ms2, me2, ms3, me3
534 real, dimension(ms1:me1,ms2:me2,ms3:me3), intent(inout) :: patch_array
540 integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
543 ! Get left edge of halo
545 if (my_x /= (nproc_x - 1)) then
548 call MPI_Send(patch_array(pe1-HALO_WIDTH+1:pe1,jj,kk), HALO_WIDTH, MPI_REAL, &
549 processors(my_x+1,my_y), my_proc_id, comm, mpi_ierr)
556 call MPI_Recv(patch_array(ms1:ms1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_REAL, &
557 processors(my_x-1,my_y), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
563 ! Get right edge of halo
568 call MPI_Send(patch_array(ps1:ps1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_REAL, &
569 processors(my_x-1,my_y), my_proc_id, comm, mpi_ierr)
573 if (my_x /= (nproc_x - 1)) then
576 call MPI_Recv(patch_array(me1-HALO_WIDTH+1:me1,jj,kk), HALO_WIDTH, MPI_REAL, &
577 processors(my_x+1,my_y), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
583 ! Get bottom edge of halo
585 if (my_y /= (nproc_y - 1)) then
587 do jj=pe2-HALO_WIDTH+1,pe2
588 call MPI_Send(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_REAL, &
589 processors(my_x,my_y+1), my_proc_id, comm, mpi_ierr)
595 do jj=ms2,ms2+HALO_WIDTH-1
596 call MPI_Recv(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_REAL, &
597 processors(my_x,my_y-1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
603 ! Get top edge of halo
607 do jj=ps2,ps2+HALO_WIDTH-1
608 call MPI_Send(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_REAL, &
609 processors(my_x,my_y-1), my_proc_id, comm, mpi_ierr)
613 if (my_y /= (nproc_y - 1)) then
615 do jj=me2-HALO_WIDTH+1,me2
616 call MPI_Recv(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_REAL, &
617 processors(my_x,my_y+1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
623 ! Get lower-right corner of halo
625 if (my_y /= (nproc_y - 1) .and. my_x /= 0) then
627 do jj=pe2-HALO_WIDTH+1,pe2
628 call MPI_Send(patch_array(ps1:ps1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_REAL, &
629 processors(my_x-1,my_y+1), my_proc_id, comm, mpi_ierr)
633 if (my_y /= 0 .and. my_x /= (nproc_x - 1)) then
635 do jj=ms2,ms2+HALO_WIDTH-1
636 call MPI_Recv(patch_array(me1-HALO_WIDTH+1:me1,jj,kk), HALO_WIDTH, MPI_REAL, &
637 processors(my_x+1,my_y-1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
643 ! Get upper-left corner of halo
645 if (my_y /= 0 .and. my_x /= (nproc_x - 1)) then
647 do jj=ps2,ps2+HALO_WIDTH-1
648 call MPI_Send(patch_array(pe1-HALO_WIDTH+1:pe1,jj,kk), HALO_WIDTH, MPI_REAL, &
649 processors(my_x+1,my_y-1), my_proc_id, comm, mpi_ierr)
653 if (my_y /= (nproc_y - 1) .and. my_x /= 0) then
655 do jj=me2-HALO_WIDTH+1,me2
656 call MPI_Recv(patch_array(ms1:ms1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_REAL, &
657 processors(my_x-1,my_y+1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
663 ! Get upper-right corner of halo
665 if (my_y /= 0 .and. my_x /= 0) then
667 do jj=ps2,ps2+HALO_WIDTH-1
668 call MPI_Send(patch_array(ps1:ps1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_REAL, &
669 processors(my_x-1,my_y-1), my_proc_id, comm, mpi_ierr)
673 if (my_y /= (nproc_y - 1) .and. my_x /= (nproc_x - 1)) then
675 do jj=me2-HALO_WIDTH+1,me2
676 call MPI_Recv(patch_array(me1-HALO_WIDTH+1:me1,jj,kk), HALO_WIDTH, MPI_REAL, &
677 processors(my_x+1,my_y+1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
683 ! Get lower-left corner of halo
685 if (my_y /= (nproc_y - 1) .and. my_x /= (nproc_x - 1)) then
687 do jj=pe2-HALO_WIDTH+1,pe2
688 call MPI_Send(patch_array(pe1-HALO_WIDTH+1:pe1,jj,kk), HALO_WIDTH, MPI_REAL, &
689 processors(my_x+1,my_y+1), my_proc_id, comm, mpi_ierr)
693 if (my_y /= 0 .and. my_x /= 0) then
695 do jj=ms2,ms2+HALO_WIDTH-1
696 call MPI_Recv(patch_array(ms1:ms1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_REAL, &
697 processors(my_x-1,my_y-1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
703 end subroutine exchange_halo_r
706 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
707 ! Name: exchange_halo_i
710 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
711 subroutine exchange_halo_i(patch_array, &
712 ms1, me1, ms2, me2, ms3, me3, &
713 ps1, pe1, ps2, pe2, ps3, pe3)
718 integer, intent(in) :: ps1, pe1, ps2, pe2, ps3, pe3, &
719 ms1, me1, ms2, me2, ms3, me3
720 integer, dimension(ms1:me1,ms2:me2,ms3:me3), intent(inout) :: patch_array
726 integer, dimension(MPI_STATUS_SIZE) :: mpi_stat
729 ! Get left edge of halo
731 if (my_x /= (nproc_x - 1)) then
734 call MPI_Send(patch_array(pe1-HALO_WIDTH+1:pe1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
735 processors(my_x+1,my_y), my_proc_id, comm, mpi_ierr)
742 call MPI_Recv(patch_array(ms1:ms1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
743 processors(my_x-1,my_y), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
749 ! Get right edge of halo
754 call MPI_Send(patch_array(ps1:ps1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
755 processors(my_x-1,my_y), my_proc_id, comm, mpi_ierr)
759 if (my_x /= (nproc_x - 1)) then
762 call MPI_Recv(patch_array(me1-HALO_WIDTH+1:me1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
763 processors(my_x+1,my_y), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
769 ! Get bottom edge of halo
771 if (my_y /= (nproc_y - 1)) then
773 do jj=pe2-HALO_WIDTH+1,pe2
774 call MPI_Send(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_INTEGER, &
775 processors(my_x,my_y+1), my_proc_id, comm, mpi_ierr)
781 do jj=ms2,ms2+HALO_WIDTH-1
782 call MPI_Recv(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_INTEGER, &
783 processors(my_x,my_y-1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
789 ! Get top edge of halo
793 do jj=ps2,ps2+HALO_WIDTH-1
794 call MPI_Send(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_INTEGER, &
795 processors(my_x,my_y-1), my_proc_id, comm, mpi_ierr)
799 if (my_y /= (nproc_y - 1)) then
801 do jj=me2-HALO_WIDTH+1,me2
802 call MPI_Recv(patch_array(ms1:me1,jj,kk), (me1-ms1+1), MPI_INTEGER, &
803 processors(my_x,my_y+1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
809 ! Get lower-right corner of halo
811 if (my_y /= (nproc_y - 1) .and. my_x /= 0) then
813 do jj=pe2-HALO_WIDTH+1,pe2
814 call MPI_Send(patch_array(ps1:ps1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
815 processors(my_x-1,my_y+1), my_proc_id, comm, mpi_ierr)
819 if (my_y /= 0 .and. my_x /= (nproc_x - 1)) then
821 do jj=ms2,ms2+HALO_WIDTH-1
822 call MPI_Recv(patch_array(me1-HALO_WIDTH+1:me1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
823 processors(my_x+1,my_y-1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
829 ! Get upper-left corner of halo
831 if (my_y /= 0 .and. my_x /= (nproc_x - 1)) then
833 do jj=ps2,ps2+HALO_WIDTH-1
834 call MPI_Send(patch_array(pe1-HALO_WIDTH+1:pe1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
835 processors(my_x+1,my_y-1), my_proc_id, comm, mpi_ierr)
839 if (my_y /= (nproc_y - 1) .and. my_x /= 0) then
841 do jj=me2-HALO_WIDTH+1,me2
842 call MPI_Recv(patch_array(ms1:ms1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
843 processors(my_x-1,my_y+1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
849 ! Get upper-right corner of halo
851 if (my_y /= 0 .and. my_x /= 0) then
853 do jj=ps2,ps2+HALO_WIDTH-1
854 call MPI_Send(patch_array(ps1:ps1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
855 processors(my_x-1,my_y-1), my_proc_id, comm, mpi_ierr)
859 if (my_y /= (nproc_y - 1) .and. my_x /= (nproc_x - 1)) then
861 do jj=me2-HALO_WIDTH+1,me2
862 call MPI_Recv(patch_array(me1-HALO_WIDTH+1:me1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
863 processors(my_x+1,my_y+1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
869 ! Get lower-left corner of halo
871 if (my_y /= (nproc_y - 1) .and. my_x /= (nproc_x - 1)) then
873 do jj=pe2-HALO_WIDTH+1,pe2
874 call MPI_Send(patch_array(pe1-HALO_WIDTH+1:pe1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
875 processors(my_x+1,my_y+1), my_proc_id, comm, mpi_ierr)
879 if (my_y /= 0 .and. my_x /= 0) then
881 do jj=ms2,ms2+HALO_WIDTH-1
882 call MPI_Recv(patch_array(ms1:ms1+HALO_WIDTH-1,jj,kk), HALO_WIDTH, MPI_INTEGER, &
883 processors(my_x-1,my_y-1), MPI_ANY_TAG, comm, mpi_stat, mpi_ierr)
889 end subroutine exchange_halo_i
892 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
893 ! Name: parallel_bcast_logical
896 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
897 subroutine parallel_bcast_logical(lval)
902 logical, intent(inout) :: lval
908 call MPI_Bcast(lval, 1, MPI_LOGICAL, IO_NODE, comm, mpi_ierr)
911 end subroutine parallel_bcast_logical
914 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
915 ! Name: parallel_bcast_int
918 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
919 subroutine parallel_bcast_int(ival, from_whom)
924 integer, intent(inout) :: ival
925 integer, intent(in), optional :: from_whom
931 if (present(from_whom)) then
932 call MPI_Bcast(ival, 1, MPI_INTEGER, from_whom, comm, mpi_ierr)
934 call MPI_Bcast(ival, 1, MPI_INTEGER, IO_NODE, comm, mpi_ierr)
938 end subroutine parallel_bcast_int
941 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
942 ! Name: parallel_bcast_real
945 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
946 subroutine parallel_bcast_real(rval, from_whom)
951 real, intent(inout) :: rval
952 integer, intent(in), optional :: from_whom
958 if (present(from_whom)) then
959 call MPI_Bcast(rval, 1, MPI_REAL, from_whom, comm, mpi_ierr)
961 call MPI_Bcast(rval, 1, MPI_REAL, IO_NODE, comm, mpi_ierr)
965 end subroutine parallel_bcast_real
968 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
969 ! Name: parallel_bcast_char
972 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
973 subroutine parallel_bcast_char(cval, n, from_whom)
978 integer, intent(in) :: n
979 character (len=n), intent(inout) :: cval
980 integer, intent(in), optional :: from_whom
986 if (present(from_whom)) then
987 call MPI_Bcast(cval, n, MPI_CHARACTER, from_whom, comm, mpi_ierr)
989 call MPI_Bcast(cval, n, MPI_CHARACTER, IO_NODE, comm, mpi_ierr)
993 end subroutine parallel_bcast_char
996 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
997 ! Name: parallel_finish
999 ! Purpose: Free up, deallocate, and for MPI, finalize.
1000 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1001 subroutine parallel_finish()
1011 call MPI_Finalize(mpi_ierr)
1014 if (associated(processors)) deallocate(processors)
1015 if (associated(proc_minx)) deallocate(proc_minx)
1016 if (associated(proc_maxx)) deallocate(proc_maxx)
1017 if (associated(proc_miny)) deallocate(proc_miny)
1018 if (associated(proc_maxy)) deallocate(proc_maxy)
1020 end subroutine parallel_finish
1023 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1024 ! Name: parallel_abort
1026 ! Purpose: Terminate everything
1027 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1028 subroutine parallel_abort()
1036 integer :: mpi_ierr, mpi_errcode
1038 call MPI_Abort(MPI_COMM_WORLD, mpi_errcode, mpi_ierr)
1043 end subroutine parallel_abort
1045 end module parallel_module