1 subroutine da_cv_to_global(cv_size, cv_size_global, x, grid, mzs, nmzs, xg )
4 !-----------------------------------------------------------------------
5 ! Purpose: Gathers local cv-array x into domain cv-array xg(:).
6 ! Global cv-array xg will only be valid on the "monitor" task.
8 ! Must be called by all MPI tasks.
9 !-----------------------------------------------------------------------
13 integer, intent(in) :: cv_size ! Size of local cv-array
14 integer, intent(in) :: cv_size_global ! Size of domain cv-array
15 real, intent(in) :: x(1:cv_size) ! local cv-array
16 type(domain), intent(in) :: grid
17 integer, intent(in) :: nmzs ! number of mz
18 integer, intent(in) :: mzs(nmzs) ! mz for each variable
19 ! (to identify empty or 2D arrays)
20 real, intent(inout) :: xg(1:cv_size_global) ! global cv-array
25 type (vp_type) :: vv_x ! Grdipt/EOF cv_array (local)
26 type (vp_type) :: vv_xg ! Grdipt/EOF cv_array (global)
27 type (xpose_type) :: xpg ! global dimensions
28 integer :: ids, ide, jds, jde, kds, &
29 ims, ime, jms, jme, kms, &
30 ips, ipe, jps, jpe, kps
34 if (trace_use) call da_trace_entry("da_cv_to_global")
37 ! Gather to mimic serial summation order.
40 ! k?e varies with variable v1 - v5
41 ids = ids; ide = ide; jds = jds; jde = jde; kds = kds
42 ims = ims; ime = ime; jms = jms; jme = jme; kms = grid%xp%kms
43 ips = grid%xp%ips; ipe = grid%xp%ipe; jps = grid%xp%jps; jpe = grid%xp%jpe; kps = grid%xp%kps
45 ! TOdo: encapsulate this crap!
47 allocate(vv_x%v1(ims:ime,jms:jme,mzs(1)))
48 allocate(vv_x%v2(ims:ime,jms:jme,mzs(2)))
49 allocate(vv_x%v3(ims:ime,jms:jme,mzs(3)))
50 allocate(vv_x%v4(ims:ime,jms:jme,mzs(4)))
51 allocate(vv_x%v5(ims:ime,jms:jme,mzs(5)))
53 allocate(vv_x%v6(ims:ime,jms:jme,mzs(6)))
54 allocate(vv_x%v7(ims:ime,jms:jme,mzs(7)))
55 allocate(vv_x%v8(ims:ime,jms:jme,mzs(8)))
56 allocate(vv_x%v9(ims:ime,jms:jme,mzs(9)))
57 allocate(vv_x%v10(ims:ime,jms:jme,mzs(10)))
59 if ( nmzs == 8 .or. nmzs == 13 ) then
60 allocate(vv_x%v11(ims:ime,jms:jme,mzs(nmzs-2)))
62 ! the 4th dimension of vv_x%alpha is the number of ensembles
63 allocate(vv_x%alpha(ims_int:ime_int,jms_int:jme_int,kms_int:kme_int,mzs(nmzs)))
65 call da_cv_to_vv (cv_size, x, mzs, nmzs, vv_x )
69 allocate(vv_xg%v1(ids:ide,jds:jde,mzs(1)))
70 allocate(vv_xg%v2(ids:ide,jds:jde,mzs(2)))
71 allocate(vv_xg%v3(ids:ide,jds:jde,mzs(3)))
72 allocate(vv_xg%v4(ids:ide,jds:jde,mzs(4)))
73 allocate(vv_xg%v5(ids:ide,jds:jde,mzs(5)))
75 allocate(vv_xg%v6(ids:ide,jds:jde,mzs(6)))
76 allocate(vv_xg%v7(ids:ide,jds:jde,mzs(7)))
77 allocate(vv_xg%v8(ids:ide,jds:jde,mzs(8)))
78 allocate(vv_xg%v9(ids:ide,jds:jde,mzs(9)))
79 allocate(vv_xg%v10(ids:ide,jds:jde,mzs(10)))
81 if ( nmzs == 8 .or. nmzs == 13 ) then
82 allocate(vv_xg%v11(ids:ide,jds:jde,mzs(nmzs-2)))
84 ! the 4th dimension of vv_xg%alpha is the number of ensembles
85 allocate(vv_xg%alpha(ids_int:ide_int,jds_int:jde_int,kds_int:kde_int,mzs(nmzs)))
87 ! Allocate dummy array for non-monitor process to keep Fortran
89 allocate(vv_xg%v1(1,1,1))
90 allocate(vv_xg%v2(1,1,1))
91 allocate(vv_xg%v3(1,1,1))
92 allocate(vv_xg%v4(1,1,1))
93 allocate(vv_xg%v5(1,1,1))
95 allocate(vv_xg%v6(1,1,1))
96 allocate(vv_xg%v7(1,1,1))
97 allocate(vv_xg%v8(1,1,1))
98 allocate(vv_xg%v9(1,1,1))
99 allocate(vv_xg%v10(1,1,1))
101 if ( nmzs == 8 .or. nmzs == 13 ) then
102 allocate(vv_xg%v11(1,1,1))
104 allocate(vv_xg%alpha(1,1,1,1))
107 ! TOdo: encapsulate this crap!
108 ! gather to global data structures
109 ! it is possible to gather straight into a globally-sized cv-array
110 ! TOdo: add this optimization later
111 call da_patch_to_global(grid, vv_x%v1, vv_xg%v1, mzs(1))
112 call da_patch_to_global(grid, vv_x%v2, vv_xg%v2, mzs(2))
113 call da_patch_to_global(grid, vv_x%v3, vv_xg%v3, mzs(3))
114 call da_patch_to_global(grid, vv_x%v4, vv_xg%v4, mzs(4))
115 call da_patch_to_global(grid, vv_x%v5, vv_xg%v5, mzs(5))
116 if ( nmzs > 10 ) then
117 call da_patch_to_global(grid, vv_x%v6, vv_xg%v6, mzs(6))
118 call da_patch_to_global(grid, vv_x%v7, vv_xg%v7, mzs(7))
119 call da_patch_to_global(grid, vv_x%v8, vv_xg%v8, mzs(8))
120 call da_patch_to_global(grid, vv_x%v9, vv_xg%v9, mzs(9))
121 call da_patch_to_global(grid, vv_x%v10, vv_xg%v10, mzs(10))
123 if ( nmzs == 8 .or. nmzs == 13 ) then
124 call da_patch_to_global(grid, vv_x%v11, vv_xg%v11, mzs(nmzs-2))
126 if ( mzs(nmzs) > 0 ) then
127 do n = 1, mzs(nmzs) ! Ensemble size
128 if ( anal_type_hybrid_dual_res ) then
129 call da_patch_to_global_dual_res(grid%intermediate_grid, vv_x%alpha(:,:,:,n), vv_xg%alpha(:,:,:,n), mzs(nmzs-1))
131 call da_patch_to_global(grid, vv_x%alpha(:,:,:,n), vv_xg%alpha(:,:,:,n), mzs(nmzs-1))
137 deallocate (vv_x%v1, vv_x%v2, vv_x%v3, vv_x%v4, vv_x%v5, vv_x%alpha)
138 if ( nmzs > 10 ) then
139 deallocate (vv_x%v6, vv_x%v7, vv_x%v8, vv_x%v9, vv_x%v10)
141 if ( nmzs == 8 .or. nmzs == 13 ) then
142 deallocate (vv_x%v11)
145 ! finally, collapse data back into a globally-sized cv-array
146 xpg%ids = ids; xpg%ide = ide
147 xpg%ims = ids; xpg%ime = ide
148 xpg%its = ids; xpg%ite = ide
149 xpg%jds = jds; xpg%jde = jde
150 xpg%jms = jds; xpg%jme = jde
151 xpg%jts = jds; xpg%jte = jde
152 xpg%kds = kds; xpg%kde = kde
153 xpg%kms = kds; xpg%kme = kde
154 xpg%kts = kds; xpg%kte = kde
155 call da_vv_to_cv(vv_xg, xpg, mzs, nmzs, cv_size_global, xg)
158 deallocate (vv_xg%v1, vv_xg%v2, vv_xg%v3, vv_xg%v4, vv_xg%v5, vv_xg%alpha)
159 if ( nmzs > 10 ) then
160 deallocate (vv_xg%v6, vv_xg%v7, vv_xg%v8, vv_xg%v9, vv_xg%v10)
162 if ( nmzs == 8 .or. nmzs == 13 ) then
163 deallocate (vv_xg%v11)
166 if (trace_use) call da_trace_exit("da_cv_to_global")
170 end subroutine da_cv_to_global