Merge remote-tracking branch 'origin/release-v4.5.2'
[WRF.git] / var / mri4dvar / da_vp_split.f90
blob0a3ab07a69654feb80f9cfea735a72d197261a6d
1 program da_vp_split
3 !----------------------------------------------------------------------
4 ! Purpose: Scatter global hires. control variables to different PEs
6 ! Input : vp_hires.bin -- high resolution global control variables
8 ! Output : vp_XXXX -- high resolution local control variables
10 ! In order to keep the domain size, it needs to match ( n - 1 )*ratio + 1
12 ! where n is the grid number in x or y
13 ! ratio is the refinement ratio between two resulotions
15 ! liuz@ucar.edu , 2016-05, NCAR/MMM
16 !----------------------------------------------------------------------
18 implicit none
20 include 'mpif.h'
22 integer :: i, j, k, n, status
24 INTEGER :: ntasks_x, ntasks_y, mytask, mytask_x, mytask_y
25 INTEGER :: new_local_comm, local_communicator
26 INTEGER, DIMENSION(2) :: dims, coords
27 LOGICAL, DIMENSION(2) :: isperiodic
28 INTEGER :: ids, ide, jds, jde, kds, kde, &
29 ips, ipe, jps, jpe, kps, kpe
30 INTEGER :: minx, miny
31 integer :: ratio = 3
33 integer :: io_status
35 character (len = 255) :: vp_hires
36 character (len = 255) :: arg = ""
37 integer, parameter :: vp_unit = 8
39 integer :: ix, jy, kz
41 real, dimension(:,:,:), allocatable :: v1, v2, v3, v4, v5
42 real, dimension(:,:,:), allocatable :: v6, v7, v8, v9, v10, v11
43 real, dimension(:,:,:), allocatable :: v1l, v2l, v3l, v4l, v5l
44 real, dimension(:,:,:), allocatable :: v6l, v7l, v8l, v9l, v10l, v11l
46 integer size, ierror
47 integer :: cloud_cv_options ! 2 or 3 with cloud cv variables
48 integer :: use_cv_w ! =1 for w control variable
50 LOGICAL :: file_exists
53 !------------------------------
54 ! read program arguments
55 !------------------------------
56 call getarg(1, arg)
57 call getarg(2, arg)
58 read(arg, '(i3)') cloud_cv_options
60 call getarg(3, arg)
61 call getarg(4, arg)
62 read(arg, '(i3)') use_cv_w
64 write (*, *) 'cloud_cv_options = ', cloud_cv_options, &
65 'use_cv_w = ', use_cv_w
67 !---------------------------------------------------------------------
68 ! MPI initialization
69 !---------------------------------------------------------------------
70 call MPI_INIT(ierror)
71 call MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierror)
72 call MPI_COMM_RANK(MPI_COMM_WORLD, mytask, ierror)
74 call MPASPECT( size, ntasks_x, ntasks_y, 1, 1 )
75 if ( mytask == 0 ) WRITE( * , * )'Ntasks in X ',ntasks_x,', ntasks in Y ',ntasks_y
77 new_local_comm = MPI_COMM_WORLD
78 dims(1) = ntasks_y ! rows
79 dims(2) = ntasks_x ! columns
80 isperiodic(1) = .false.
81 isperiodic(2) = .false.
82 CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator, ierror )
83 CALL mpi_comm_rank( local_communicator, mytask, ierror )
84 CALL mpi_cart_coords( local_communicator, mytask, 2, coords, ierror )
85 mytask_x = coords(2) ! col task (x)
86 mytask_y = coords(1) ! row task (y)
87 !write (*,*) "The coords of task ",mytask, " is ",mytask_x,mytask_y
89 io_status = 0
91 vp_hires='vp_output.global_hires'
92 inquire(FILE=trim(vp_hires), EXIST=file_exists)
94 if ( .not. file_exists ) then
95 Write(*,*) "\nError: "//trim(vp_hires)//" not exists\n"
96 call exit(-1)
97 endif
99 open(unit=vp_unit,file=trim(vp_hires),iostat=io_status,form='UNFORMATTED',status='OLD')
100 if (io_status /= 0) then
101 write(*,*) "Error ",io_status," opening vp file "//trim(vp_hires)
102 call exit(-1)
103 end if
104 if ( mytask == 0 ) write(*,*) 'Reading vp from : '//trim(vp_hires)
105 read(vp_unit) ide, jde, kde ! domain dimension (unstagered)
106 ide = ide + 1 ! WRF parallel decomposition is based on stagered grid
107 jde = jde + 1
108 kde = kde + 1
109 if ( mytask == 0 ) write(*,*) 'ide, jde, kde = ', ide, jde, kde
110 ids = 1
111 jds = 1
112 kds = 1
114 !---------------------------------------------------------------------
115 ! Calculate the domain decomposition
116 !---------------------------------------------------------------------
117 CALL compute_memory_dims_rsl_lite ( 0 , &
118 ids, ide, jds, jde, kds, kde, &
119 ips, ipe, jps, jpe, kps, kpe )
120 ! convert to A-grid and middle levels on which control variables sit
121 if ( ipe == ide ) ipe = ipe - 1
122 if ( jpe == jde ) jpe = jpe - 1
123 if ( kpe == kde ) kpe = kpe - 1
124 !WRITE(*,*)'*************************************'
125 !WRITE(90,*)'local ',ips,ipe,jps,jpe,kps,kpe
126 WRITE(*,*)'local ',ips,ipe,jps,jpe,kps,kpe
127 !WRITE(*,*)'*************************************'
129 !---------------------------------------------------------------------
130 ! allocate global vp variables (unstagered)
131 !---------------------------------------------------------------------
132 allocate ( v1(ids:ide-1,jds:jde-1,kds:kde-1) )
133 allocate ( v2(ids:ide-1,jds:jde-1,kds:kde-1) )
134 allocate ( v3(ids:ide-1,jds:jde-1,kds:kde-1) )
135 allocate ( v4(ids:ide-1,jds:jde-1,kds:kde-1) )
136 allocate ( v5(ids:ide-1,jds:jde-1,kds:kde-1) )
138 if ( cloud_cv_options >= 2 ) then
139 allocate ( v6(ids:ide-1,jds:jde-1,kds:kde-1) )
140 allocate ( v7(ids:ide-1,jds:jde-1,kds:kde-1) )
141 allocate ( v8(ids:ide-1,jds:jde-1,kds:kde-1) )
142 allocate ( v9(ids:ide-1,jds:jde-1,kds:kde-1) )
143 allocate ( v10(ids:ide-1,jds:jde-1,kds:kde-1) )
144 end if
146 if ( use_cv_w == 1 ) allocate ( v11(ids:ide-1,jds:jde-1,kds:kde-1) )
148 read(vp_unit) v1, v2, v3, v4, v5
149 if ( cloud_cv_options >= 2 )read(vp_unit) v6, v7, v8, v9, v10
150 if ( use_cv_w == 1 )read(vp_unit) v11
151 close(vp_unit)
153 call MPI_BARRIER(MPI_COMM_WORLD,ierror)
154 if ( mytask == 0 ) write(*,*) 'Reading vp from : '//trim(vp_hires)//' is completeed'
156 !---------------------------------------------------------------------
157 ! allocate local vp variables (unstagered)
158 !---------------------------------------------------------------------
159 ix = ipe-ips+1
160 jy = jpe-jps+1
161 kz = kpe-kps+1
163 allocate ( v1l(1:ix,1:jy,1:kz) )
164 allocate ( v2l(1:ix,1:jy,1:kz) )
165 allocate ( v3l(1:ix,1:jy,1:kz) )
166 allocate ( v4l(1:ix,1:jy,1:kz) )
167 allocate ( v5l(1:ix,1:jy,1:kz) )
169 if ( cloud_cv_options >= 2 ) then
170 allocate ( v6l(1:ix,1:jy,1:kz) )
171 allocate ( v7l(1:ix,1:jy,1:kz) )
172 allocate ( v8l(1:ix,1:jy,1:kz) )
173 allocate ( v9l(1:ix,1:jy,1:kz) )
174 allocate ( v10l(1:ix,1:jy,1:kz) )
175 end if
177 if ( use_cv_w == 1 ) allocate ( v11l(1:ix,1:jy,1:kz) )
179 !---------------------------------------------------------------------
180 ! Scatter vp to PEs
181 !---------------------------------------------------------------------
183 v1l(1:ix,1:jy,1:kz) = v1(ips:ipe,jps:jpe,kps:kpe)
184 v2l(1:ix,1:jy,1:kz) = v2(ips:ipe,jps:jpe,kps:kpe)
185 v3l(1:ix,1:jy,1:kz) = v3(ips:ipe,jps:jpe,kps:kpe)
186 v4l(1:ix,1:jy,1:kz) = v4(ips:ipe,jps:jpe,kps:kpe)
187 v5l(1:ix,1:jy,1:kz) = v5(ips:ipe,jps:jpe,kps:kpe)
189 if ( cloud_cv_options >= 2 ) then
190 v6l(1:ix,1:jy,1:kz) = v6(ips:ipe,jps:jpe,kps:kpe)
191 v7l(1:ix,1:jy,1:kz) = v7(ips:ipe,jps:jpe,kps:kpe)
192 v8l(1:ix,1:jy,1:kz) = v8(ips:ipe,jps:jpe,kps:kpe)
193 v9l(1:ix,1:jy,1:kz) = v9(ips:ipe,jps:jpe,kps:kpe)
194 v10l(1:ix,1:jy,1:kz) = v10(ips:ipe,jps:jpe,kps:kpe)
195 end if
197 if ( use_cv_w == 1 ) v11l(1:ix,1:jy,1:kz) = v11(ips:ipe,jps:jpe,kps:kpe)
199 write (vp_hires,'(A,i4.4)') "vp_input.",mytask
201 open(unit=vp_unit,file=trim(vp_hires),iostat=io_status,form='UNFORMATTED',status='UNKNOWN')
202 if (io_status /= 0) then
203 write(*,*) "Error ",io_status," opening vp file "//trim(vp_hires)
204 call exit(-1)
205 end if
206 write(*,*) 'Writting vp on hires to : '//trim(vp_hires)
207 write(vp_unit) ips, ipe, jps, jpe, kps, kpe
208 write(vp_unit) v1l, v2l, v3l, v4l, v5l
209 if ( cloud_cv_options >= 2 )write(vp_unit) v6l, v7l, v8l, v9l, v10l
210 if ( use_cv_w == 1 )write(vp_unit) v11l
211 !write(*,*) 'Sample of cvt :',mytask, maxval(cvt), minval(cvt)
212 close(vp_unit)
214 !---------------------------------------------------------------------
215 ! The end
216 !---------------------------------------------------------------------
217 !if ( mytask == 0 ) then
218 deallocate (v1)
219 deallocate (v2)
220 deallocate (v3)
221 deallocate (v4)
222 deallocate (v5)
223 deallocate (v1l)
224 deallocate (v2l)
225 deallocate (v3l)
226 deallocate (v4l)
227 deallocate (v5l)
229 if ( cloud_cv_options >= 2 ) then
230 deallocate (v6)
231 deallocate (v7)
232 deallocate (v8)
233 deallocate (v9)
234 deallocate (v10)
235 deallocate (v6l)
236 deallocate (v7l)
237 deallocate (v8l)
238 deallocate (v9l)
239 deallocate (v10l)
240 end if
242 if ( use_cv_w == 1 ) then
243 deallocate (v11)
244 deallocate (v11l)
245 end if
246 !endif
248 call MPI_BARRIER(MPI_COMM_WORLD,ierror)
249 if ( mytask == 0 ) Write(*,*) "Distributting control variables completed successfully"
250 call MPI_FINALIZE(ierror)
252 contains
254 SUBROUTINE MPASPECT( P, MINM, MINN, PROCMIN_M, PROCMIN_N )
255 IMPLICIT NONE
256 INTEGER P, M, N, MINI, MINM, MINN, PROCMIN_M, PROCMIN_N, ierror
257 MINI = 2*P
258 MINM = 1
259 MINN = P
260 DO M = 1, P
261 IF ( MOD( P, M ) .EQ. 0 ) THEN
262 N = P / M
263 IF ( ABS(M-N) .LT. MINI &
264 .AND. M .GE. PROCMIN_M &
265 .AND. N .GE. PROCMIN_N &
266 ) THEN
267 MINI = ABS(M-N)
268 MINM = M
269 MINN = N
270 ENDIF
271 ENDIF
272 ENDDO
273 IF ( MINM .LT. PROCMIN_M .OR. MINN .LT. PROCMIN_N ) THEN
274 WRITE( * , * )'MPASPECT: UNABLE TO GENERATE PROCESSOR MESH. STOPPING.'
275 WRITE( * , * )' PROCMIN_M ', PROCMIN_M
276 WRITE( * , * )' PROCMIN_N ', PROCMIN_N
277 WRITE( * , * )' P ', P
278 WRITE( * , * )' MINM ', MINM
279 WRITE( * , * )' MINN ', MINN
280 call MPI_FINALIZE(ierror)
281 stop
282 ENDIF
283 RETURN
284 END SUBROUTINE MPASPECT
286 SUBROUTINE compute_memory_dims_rsl_lite ( &
287 shw , &
288 ids, ide, jds, jde, kds, kde, &
289 ips, ipe, jps, jpe, kps, kpe )
291 IMPLICIT NONE
292 INTEGER, INTENT(IN) :: shw
293 INTEGER, INTENT(IN) :: ids, ide, jds, jde, kds, kde
294 INTEGER, INTENT(OUT) :: ips, ipe, jps, jpe, kps, kpe
296 INTEGER Px, Py, P, i, j, k, ierr
298 ! xy decomposition
300 ips = -1
301 j = jds
302 ierr = 0
303 DO i = ids, ide
304 CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
305 minx, miny, ierr )
306 IF ( ierr .NE. 0 ) stop 'error code returned by task_for_point '
307 IF ( Px .EQ. mytask_x ) THEN
308 ipe = i
309 IF ( ips .EQ. -1 ) ips = i
310 ENDIF
311 ENDDO
312 ! handle setting the memory dimensions where there are no X elements assigned to this proc
313 IF (ips .EQ. -1 ) THEN
314 ipe = -1
315 ips = 0
316 ENDIF
317 jps = -1
318 i = ids
319 ierr = 0
320 DO j = jds, jde
321 CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
322 minx, miny, ierr )
323 IF ( ierr .NE. 0 ) stop 'error code returned by task_for_point '
324 IF ( Py .EQ. mytask_y ) THEN
325 jpe = j
326 IF ( jps .EQ. -1 ) jps = j
327 ENDIF
328 ENDDO
329 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
330 IF (jps .EQ. -1 ) THEN
331 jpe = -1
332 jps = 0
333 ENDIF
335 !begin: wig; 12-Mar-2008
336 ! This appears redundant with the conditionals above, but we get cases with only
337 ! one of the directions being set to "missing" when turning off extra processors.
338 ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
339 IF (ipe .EQ. -1 .or. jpe .EQ. -1) THEN
340 ipe = -1
341 ips = 0
342 jpe = -1
343 jps = 0
344 ENDIF
345 !end: wig; 12-Mar-2008
347 ! extend the patch dimensions out shw along edges of domain
348 IF ( ips < ipe .and. jps < jpe ) THEN !wig; 11-Mar-2008
349 IF ( mytask_x .EQ. 0 ) THEN
350 ips = ips - shw
351 ENDIF
352 IF ( mytask_x .EQ. ntasks_x-1 ) THEN
353 ipe = ipe + shw
354 ENDIF
355 IF ( mytask_y .EQ. 0 ) THEN
356 jps = jps - shw
357 ENDIF
358 IF ( mytask_y .EQ. ntasks_y-1 ) THEN
359 jpe = jpe + shw
360 ENDIF
361 ENDIF !wig; 11-Mar-2008
363 kps = 1
364 kpe = kde-kds+1
366 END SUBROUTINE compute_memory_dims_rsl_lite
368 end program da_vp_split