1 ; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=tonga -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-ALLOCA %s
2 ; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=tonga -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-PROMOTE %s
3 ; RUN: opt -S -mtriple=amdgcn-- -amdgpu-promote-alloca -sroa -instcombine < %s | FileCheck -check-prefix=OPT %s
5 target datalayout = "A5"
7 ; OPT-LABEL: @vector_read_alloca_bitcast(
9 ; OPT: %0 = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
10 ; OPT-NEXT: store i32 %0, i32 addrspace(1)* %out, align 4
12 ; GCN-LABEL: {{^}}vector_read_alloca_bitcast:
13 ; GCN-ALLOCA-COUNT-4: buffer_store_dword
14 ; GCN-ALLOCA: buffer_load_dword
16 ; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 2
17 ; GCN-PROMOTE: s_cmp_eq_u32 s{{[0-9]+}}, 1
18 ; GCN-PROMOTE: s_cselect_b64 [[CC1:[^,]+]], -1, 0
19 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]]
20 ; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0
21 ; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 3
22 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], vcc
23 ; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0
24 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], vcc
25 ; GCN-PROMOTE: ScratchSize: 0
27 define amdgpu_kernel void @vector_read_alloca_bitcast(i32 addrspace(1)* %out, i32 %index) {
29 %tmp = alloca [4 x i32], addrspace(5)
30 %x = bitcast [4 x i32] addrspace(5)* %tmp to i32 addrspace(5)*
31 %y = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 1
32 %z = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 2
33 %w = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 3
34 store i32 0, i32 addrspace(5)* %x
35 store i32 1, i32 addrspace(5)* %y
36 store i32 2, i32 addrspace(5)* %z
37 store i32 3, i32 addrspace(5)* %w
38 %tmp1 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %index
39 %tmp2 = load i32, i32 addrspace(5)* %tmp1
40 store i32 %tmp2, i32 addrspace(1)* %out
44 ; OPT-LABEL: @vector_write_alloca_bitcast(
46 ; OPT: %0 = insertelement <4 x i32> zeroinitializer, i32 1, i32 %w_index
47 ; OPT-NEXT: %1 = extractelement <4 x i32> %0, i32 %r_index
48 ; OPT-NEXT: store i32 %1, i32 addrspace(1)* %out, align
50 ; GCN-LABEL: {{^}}vector_write_alloca_bitcast:
51 ; GCN-ALLOCA-COUNT-5: buffer_store_dword
52 ; GCN-ALLOCA: buffer_load_dword
54 ; GCN-PROMOTE-COUNT-7: v_cndmask
56 ; GCN-PROMOTE: ScratchSize: 0
58 define amdgpu_kernel void @vector_write_alloca_bitcast(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) {
60 %tmp = alloca [4 x i32], addrspace(5)
61 %x = bitcast [4 x i32] addrspace(5)* %tmp to i32 addrspace(5)*
62 %y = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 1
63 %z = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 2
64 %w = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 3
65 store i32 0, i32 addrspace(5)* %x
66 store i32 0, i32 addrspace(5)* %y
67 store i32 0, i32 addrspace(5)* %z
68 store i32 0, i32 addrspace(5)* %w
69 %tmp1 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %w_index
70 store i32 1, i32 addrspace(5)* %tmp1
71 %tmp2 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %r_index
72 %tmp3 = load i32, i32 addrspace(5)* %tmp2
73 store i32 %tmp3, i32 addrspace(1)* %out
77 ; OPT-LABEL: @vector_write_read_bitcast_to_float(
80 ; OPT: %tmp.sroa.0.0 = phi <6 x float> [ undef, %bb ], [ %0, %bb2 ]
81 ; OPT: %0 = insertelement <6 x float> %tmp.sroa.0.0, float %tmp73, i32 %tmp10
83 ; OPT: %bc = bitcast <6 x float> %0 to <6 x i32>
84 ; OPT: %1 = extractelement <6 x i32> %bc, i32 %tmp20
86 ; GCN-LABEL: {{^}}vector_write_read_bitcast_to_float:
87 ; GCN-ALLOCA: buffer_store_dword
89 ; GCN-PROMOTE-COUNT-6: v_cmp_eq_u16
90 ; GCN-PROMOTE-COUNT-6: v_cndmask
94 ; GCN-ALLOCA: buffer_load_dword
96 ; GCN-PROMOTE: v_cmp_eq_u16
97 ; GCN-PROMOTE: v_cndmask
98 ; GCN-PROMOTE: v_cmp_eq_u16
99 ; GCN-PROMOTE: v_cndmask
100 ; GCN-PROMOTE: v_cmp_eq_u16
101 ; GCN-PROMOTE: v_cndmask
102 ; GCN-PROMOTE: v_cmp_eq_u16
103 ; GCN-PROMOTE: v_cndmask
104 ; GCN-PROMOTE: v_cmp_eq_u16
105 ; GCN-PROMOTE: v_cndmask
107 ; GCN-PROMOTE: ScratchSize: 0
109 define amdgpu_kernel void @vector_write_read_bitcast_to_float(float addrspace(1)* %arg) {
111 %tmp = alloca [6 x float], align 4, addrspace(5)
112 %tmp1 = bitcast [6 x float] addrspace(5)* %tmp to i8 addrspace(5)*
113 call void @llvm.lifetime.start.p5i8(i64 24, i8 addrspace(5)* %tmp1) #2
116 bb2: ; preds = %bb2, %bb
117 %tmp3 = phi i32 [ 0, %bb ], [ %tmp13, %bb2 ]
118 %tmp4 = zext i32 %tmp3 to i64
119 %tmp5 = getelementptr inbounds float, float addrspace(1)* %arg, i64 %tmp4
120 %tmp6 = bitcast float addrspace(1)* %tmp5 to i32 addrspace(1)*
121 %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4
122 %tmp8 = trunc i32 %tmp3 to i16
123 %tmp9 = urem i16 %tmp8, 6
124 %tmp10 = zext i16 %tmp9 to i32
125 %tmp11 = getelementptr inbounds [6 x float], [6 x float] addrspace(5)* %tmp, i32 0, i32 %tmp10
126 %tmp12 = bitcast float addrspace(5)* %tmp11 to i32 addrspace(5)*
127 store i32 %tmp7, i32 addrspace(5)* %tmp12, align 4
128 %tmp13 = add nuw nsw i32 %tmp3, 1
129 %tmp14 = icmp eq i32 %tmp13, 1000
130 br i1 %tmp14, label %.preheader, label %bb2
132 bb15: ; preds = %.preheader
133 call void @llvm.lifetime.end.p5i8(i64 24, i8 addrspace(5)* %tmp1) #2
136 .preheader: ; preds = %.preheader, %bb2
137 %tmp16 = phi i32 [ %tmp27, %.preheader ], [ 0, %bb2 ]
138 %tmp17 = trunc i32 %tmp16 to i16
139 %tmp18 = urem i16 %tmp17, 6
140 %tmp19 = sub nuw nsw i16 5, %tmp18
141 %tmp20 = zext i16 %tmp19 to i32
142 %tmp21 = getelementptr inbounds [6 x float], [6 x float] addrspace(5)* %tmp, i32 0, i32 %tmp20
143 %tmp22 = bitcast float addrspace(5)* %tmp21 to i32 addrspace(5)*
144 %tmp23 = load i32, i32 addrspace(5)* %tmp22, align 4
145 %tmp24 = zext i32 %tmp16 to i64
146 %tmp25 = getelementptr inbounds float, float addrspace(1)* %arg, i64 %tmp24
147 %tmp26 = bitcast float addrspace(1)* %tmp25 to i32 addrspace(1)*
148 store i32 %tmp23, i32 addrspace(1)* %tmp26, align 4
149 %tmp27 = add nuw nsw i32 %tmp16, 1
150 %tmp28 = icmp eq i32 %tmp27, 1000
151 br i1 %tmp28, label %bb15, label %.preheader
154 ; OPT-LABEL: @vector_write_read_bitcast_to_double(
157 ; OPT: %tmp.sroa.0.0 = phi <6 x double> [ undef, %bb ], [ %0, %bb2 ]
158 ; OPT: %0 = insertelement <6 x double> %tmp.sroa.0.0, double %tmp73, i32 %tmp10
160 ; OPT: %bc = bitcast <6 x double> %0 to <6 x i64>
161 ; OPT: %1 = extractelement <6 x i64> %bc, i32 %tmp20
163 ; GCN-LABEL: {{^}}vector_write_read_bitcast_to_double:
165 ; GCN-ALLOCA-COUNT-2: buffer_store_dword
166 ; GCN-PROMOTE-COUNT-2: v_movreld_b32_e32
170 ; GCN-ALLOCA-COUNT-2: buffer_load_dword
171 ; GCN-PROMOTE-COUNT-2: v_movrels_b32_e32
173 ; GCN-PROMOTE: ScratchSize: 0
175 define amdgpu_kernel void @vector_write_read_bitcast_to_double(double addrspace(1)* %arg) {
177 %tmp = alloca [6 x double], align 8, addrspace(5)
178 %tmp1 = bitcast [6 x double] addrspace(5)* %tmp to i8 addrspace(5)*
179 call void @llvm.lifetime.start.p5i8(i64 48, i8 addrspace(5)* %tmp1) #2
182 bb2: ; preds = %bb2, %bb
183 %tmp3 = phi i32 [ 0, %bb ], [ %tmp13, %bb2 ]
184 %tmp4 = zext i32 %tmp3 to i64
185 %tmp5 = getelementptr inbounds double, double addrspace(1)* %arg, i64 %tmp4
186 %tmp6 = bitcast double addrspace(1)* %tmp5 to i64 addrspace(1)*
187 %tmp7 = load i64, i64 addrspace(1)* %tmp6, align 8
188 %tmp8 = trunc i32 %tmp3 to i16
189 %tmp9 = urem i16 %tmp8, 6
190 %tmp10 = zext i16 %tmp9 to i32
191 %tmp11 = getelementptr inbounds [6 x double], [6 x double] addrspace(5)* %tmp, i32 0, i32 %tmp10
192 %tmp12 = bitcast double addrspace(5)* %tmp11 to i64 addrspace(5)*
193 store i64 %tmp7, i64 addrspace(5)* %tmp12, align 8
194 %tmp13 = add nuw nsw i32 %tmp3, 1
195 %tmp14 = icmp eq i32 %tmp13, 1000
196 br i1 %tmp14, label %.preheader, label %bb2
198 bb15: ; preds = %.preheader
199 call void @llvm.lifetime.end.p5i8(i64 48, i8 addrspace(5)* %tmp1) #2
202 .preheader: ; preds = %.preheader, %bb2
203 %tmp16 = phi i32 [ %tmp27, %.preheader ], [ 0, %bb2 ]
204 %tmp17 = trunc i32 %tmp16 to i16
205 %tmp18 = urem i16 %tmp17, 6
206 %tmp19 = sub nuw nsw i16 5, %tmp18
207 %tmp20 = zext i16 %tmp19 to i32
208 %tmp21 = getelementptr inbounds [6 x double], [6 x double] addrspace(5)* %tmp, i32 0, i32 %tmp20
209 %tmp22 = bitcast double addrspace(5)* %tmp21 to i64 addrspace(5)*
210 %tmp23 = load i64, i64 addrspace(5)* %tmp22, align 8
211 %tmp24 = zext i32 %tmp16 to i64
212 %tmp25 = getelementptr inbounds double, double addrspace(1)* %arg, i64 %tmp24
213 %tmp26 = bitcast double addrspace(1)* %tmp25 to i64 addrspace(1)*
214 store i64 %tmp23, i64 addrspace(1)* %tmp26, align 8
215 %tmp27 = add nuw nsw i32 %tmp16, 1
216 %tmp28 = icmp eq i32 %tmp27, 1000
217 br i1 %tmp28, label %bb15, label %.preheader
220 ; OPT-LABEL: @vector_write_read_bitcast_to_i64(
223 ; OPT: %tmp.sroa.0.0 = phi <6 x i64> [ undef, %bb ], [ %0, %bb2 ]
224 ; OPT: %0 = insertelement <6 x i64> %tmp.sroa.0.0, i64 %tmp6, i32 %tmp9
226 ; OPT: %1 = extractelement <6 x i64> %0, i32 %tmp18
228 ; GCN-LABEL: {{^}}vector_write_read_bitcast_to_i64:
230 ; GCN-ALLOCA-COUNT-2: buffer_store_dword
231 ; GCN-PROMOTE-COUNT-2: v_movreld_b32_e32
235 ; GCN-ALLOCA-COUNT-2: buffer_load_dword
236 ; GCN-PROMOTE-COUNT-2: v_movrels_b32_e32
238 ; GCN-PROMOTE: ScratchSize: 0
240 define amdgpu_kernel void @vector_write_read_bitcast_to_i64(i64 addrspace(1)* %arg) {
242 %tmp = alloca [6 x i64], align 8, addrspace(5)
243 %tmp1 = bitcast [6 x i64] addrspace(5)* %tmp to i8 addrspace(5)*
244 call void @llvm.lifetime.start.p5i8(i64 48, i8 addrspace(5)* %tmp1) #2
247 bb2: ; preds = %bb2, %bb
248 %tmp3 = phi i32 [ 0, %bb ], [ %tmp11, %bb2 ]
249 %tmp4 = zext i32 %tmp3 to i64
250 %tmp5 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %tmp4
251 %tmp6 = load i64, i64 addrspace(1)* %tmp5, align 8
252 %tmp7 = trunc i32 %tmp3 to i16
253 %tmp8 = urem i16 %tmp7, 6
254 %tmp9 = zext i16 %tmp8 to i32
255 %tmp10 = getelementptr inbounds [6 x i64], [6 x i64] addrspace(5)* %tmp, i32 0, i32 %tmp9
256 store i64 %tmp6, i64 addrspace(5)* %tmp10, align 8
257 %tmp11 = add nuw nsw i32 %tmp3, 1
258 %tmp12 = icmp eq i32 %tmp11, 1000
259 br i1 %tmp12, label %.preheader, label %bb2
261 bb13: ; preds = %.preheader
262 call void @llvm.lifetime.end.p5i8(i64 48, i8 addrspace(5)* %tmp1) #2
265 .preheader: ; preds = %.preheader, %bb2
266 %tmp14 = phi i32 [ %tmp23, %.preheader ], [ 0, %bb2 ]
267 %tmp15 = trunc i32 %tmp14 to i16
268 %tmp16 = urem i16 %tmp15, 6
269 %tmp17 = sub nuw nsw i16 5, %tmp16
270 %tmp18 = zext i16 %tmp17 to i32
271 %tmp19 = getelementptr inbounds [6 x i64], [6 x i64] addrspace(5)* %tmp, i32 0, i32 %tmp18
272 %tmp20 = load i64, i64 addrspace(5)* %tmp19, align 8
273 %tmp21 = zext i32 %tmp14 to i64
274 %tmp22 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %tmp21
275 store i64 %tmp20, i64 addrspace(1)* %tmp22, align 8
276 %tmp23 = add nuw nsw i32 %tmp14, 1
277 %tmp24 = icmp eq i32 %tmp23, 1000
278 br i1 %tmp24, label %bb13, label %.preheader
281 ; TODO: llvm.assume can be ingored
283 ; OPT-LABEL: @vector_read_alloca_bitcast_assume(
284 ; OPT: %0 = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
285 ; OPT: store i32 %0, i32 addrspace(1)* %out, align 4
287 ; GCN-LABEL: {{^}}vector_read_alloca_bitcast_assume:
288 ; GCN-COUNT-4: buffer_store_dword
290 define amdgpu_kernel void @vector_read_alloca_bitcast_assume(i32 addrspace(1)* %out, i32 %index) {
292 %tmp = alloca [4 x i32], addrspace(5)
293 %x = bitcast [4 x i32] addrspace(5)* %tmp to i32 addrspace(5)*
294 %cmp = icmp ne i32 addrspace(5)* %x, null
295 call void @llvm.assume(i1 %cmp)
296 %y = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 1
297 %z = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 2
298 %w = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 3
299 store i32 0, i32 addrspace(5)* %x
300 store i32 1, i32 addrspace(5)* %y
301 store i32 2, i32 addrspace(5)* %z
302 store i32 3, i32 addrspace(5)* %w
303 %tmp1 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %index
304 %tmp2 = load i32, i32 addrspace(5)* %tmp1
305 store i32 %tmp2, i32 addrspace(1)* %out
309 ; OPT-LABEL: @vector_read_alloca_multiuse(
311 ; OPT: %0 = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
312 ; OPT-NEXT: %add2 = add nuw nsw i32 %0, 1
313 ; OPT-NEXT: store i32 %add2, i32 addrspace(1)* %out, align 4
315 ; GCN-LABEL: {{^}}vector_read_alloca_multiuse:
316 ; GCN-ALLOCA-COUNT-4: buffer_store_dword
317 ; GCN-ALLOCA: buffer_load_dword
319 ; GCN-PROMOTE: s_cmp_eq_u32 s{{[0-9]+}}, 1
320 ; GCN-PROMOTE: s_cselect_b64 [[CC1:[^,]+]], -1, 0
321 ; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 2
322 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]]
323 ; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0
324 ; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 3
325 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], vcc
326 ; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0
327 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], vcc
329 ; GCN-PROMOTE: ScratchSize: 0
331 define amdgpu_kernel void @vector_read_alloca_multiuse(i32 addrspace(1)* %out, i32 %index) {
333 %tmp = alloca [4 x i32], addrspace(5)
334 %b = bitcast [4 x i32] addrspace(5)* %tmp to float addrspace(5)*
335 %x = bitcast float addrspace(5)* %b to i32 addrspace(5)*
336 %y = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 1
337 %z = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 2
338 %w = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 3
339 store i32 0, i32 addrspace(5)* %x
340 store i32 1, i32 addrspace(5)* %y
341 store i32 2, i32 addrspace(5)* %z
342 store i32 3, i32 addrspace(5)* %w
343 %tmp1 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %index
344 %tmp2 = load i32, i32 addrspace(5)* %tmp1
345 %tmp3 = load i32, i32 addrspace(5)* %x
346 %tmp4 = load i32, i32 addrspace(5)* %y
347 %add1 = add i32 %tmp2, %tmp3
348 %add2 = add i32 %add1, %tmp4
349 store i32 %add2, i32 addrspace(1)* %out
353 ; OPT-LABEL: @bitcast_vector_to_vector(
355 ; OPT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(1)* %out, align 16
357 ; GCN-LABEL: {{^}}bitcast_vector_to_vector:
358 ; GCN: v_mov_b32_e32 v0, 1
359 ; GCN: v_mov_b32_e32 v1, 2
360 ; GCN: v_mov_b32_e32 v2, 3
361 ; GCN: v_mov_b32_e32 v3, 4
363 ; GCN: ScratchSize: 0
365 define amdgpu_kernel void @bitcast_vector_to_vector(<4 x i32> addrspace(1)* %out) {
367 %alloca = alloca <4 x float>, align 16, addrspace(5)
368 %cast = bitcast <4 x float> addrspace(5)* %alloca to <4 x i32> addrspace(5)*
369 store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %cast
370 %load = load <4 x i32>, <4 x i32> addrspace(5)* %cast, align 16
371 store <4 x i32> %load, <4 x i32> addrspace(1)* %out
375 ; OPT-LABEL: @vector_bitcast_from_alloca_array(
377 ; OPT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(1)* %out, align 16
379 ; GCN-LABEL: {{^}}vector_bitcast_from_alloca_array:
380 ; GCN: v_mov_b32_e32 v0, 1
381 ; GCN: v_mov_b32_e32 v1, 2
382 ; GCN: v_mov_b32_e32 v2, 3
383 ; GCN: v_mov_b32_e32 v3, 4
385 ; GCN: ScratchSize: 0
387 define amdgpu_kernel void @vector_bitcast_from_alloca_array(<4 x i32> addrspace(1)* %out) {
389 %alloca = alloca [4 x float], align 16, addrspace(5)
390 %cast = bitcast [4 x float] addrspace(5)* %alloca to <4 x i32> addrspace(5)*
391 store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %cast
392 %load = load <4 x i32>, <4 x i32> addrspace(5)* %cast, align 16
393 store <4 x i32> %load, <4 x i32> addrspace(1)* %out
397 ; OPT-LABEL: @vector_bitcast_to_array_from_alloca_array(
399 ; OPT: %out.repack = getelementptr inbounds [4 x i32], [4 x i32] addrspace(1)* %out, i64 0, i64 0
400 ; OPT-NEXT: store i32 1, i32 addrspace(1)* %out.repack, align 4
401 ; OPT-NEXT: %out.repack1 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(1)* %out, i64 0, i64 1
402 ; OPT-NEXT: store i32 2, i32 addrspace(1)* %out.repack1, align 4
403 ; OPT-NEXT: %out.repack2 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(1)* %out, i64 0, i64 2
404 ; OPT-NEXT: store i32 3, i32 addrspace(1)* %out.repack2, align 4
405 ; OPT-NEXT: %out.repack3 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(1)* %out, i64 0, i64 3
406 ; OPT-NEXT: store i32 4, i32 addrspace(1)* %out.repack3, align 4
408 ; GCN-LABEL: {{^}}vector_bitcast_to_array_from_alloca_array:
409 ; GCN: v_mov_b32_e32 v0, 1
410 ; GCN: v_mov_b32_e32 v1, 2
411 ; GCN: v_mov_b32_e32 v2, 3
412 ; GCN: v_mov_b32_e32 v3, 4
414 ; GCN: ScratchSize: 0
416 define amdgpu_kernel void @vector_bitcast_to_array_from_alloca_array([4 x i32] addrspace(1)* %out) {
418 %alloca = alloca [4 x float], align 16, addrspace(5)
419 %cast = bitcast [4 x float] addrspace(5)* %alloca to [4 x i32] addrspace(5)*
420 store [4 x i32] [i32 1, i32 2, i32 3, i32 4], [4 x i32] addrspace(5)* %cast
421 %load = load [4 x i32], [4 x i32] addrspace(5)* %cast, align 16
422 store [4 x i32] %load, [4 x i32] addrspace(1)* %out
426 ; OPT-LABEL: @vector_bitcast_to_struct_from_alloca_array(
428 ; OPT: %out.repack = getelementptr inbounds %struct.v4, %struct.v4 addrspace(1)* %out, i64 0, i32 0
429 ; OPT-NEXT: store i32 1, i32 addrspace(1)* %out.repack, align 4
430 ; OPT-NEXT: %out.repack1 = getelementptr inbounds %struct.v4, %struct.v4 addrspace(1)* %out, i64 0, i32 1
431 ; OPT-NEXT: store i32 2, i32 addrspace(1)* %out.repack1, align 4
432 ; OPT-NEXT: %out.repack2 = getelementptr inbounds %struct.v4, %struct.v4 addrspace(1)* %out, i64 0, i32 2
433 ; OPT-NEXT: store i32 3, i32 addrspace(1)* %out.repack2, align 4
434 ; OPT-NEXT: %out.repack3 = getelementptr inbounds %struct.v4, %struct.v4 addrspace(1)* %out, i64 0, i32 3
435 ; OPT-NEXT: store i32 4, i32 addrspace(1)* %out.repack3, align 4
437 ; GCN-LABEL: {{^}}vector_bitcast_to_struct_from_alloca_array:
438 ; GCN: v_mov_b32_e32 v0, 1
439 ; GCN: v_mov_b32_e32 v1, 2
440 ; GCN: v_mov_b32_e32 v2, 3
441 ; GCN: v_mov_b32_e32 v3, 4
443 ; GCN: ScratchSize: 0
445 %struct.v4 = type { i32, i32, i32, i32 }
447 define amdgpu_kernel void @vector_bitcast_to_struct_from_alloca_array(%struct.v4 addrspace(1)* %out) {
449 %alloca = alloca [4 x float], align 16, addrspace(5)
450 %cast = bitcast [4 x float] addrspace(5)* %alloca to %struct.v4 addrspace(5)*
451 store %struct.v4 { i32 1, i32 2, i32 3, i32 4 }, %struct.v4 addrspace(5)* %cast
452 %load = load %struct.v4, %struct.v4 addrspace(5)* %cast, align 16
453 store %struct.v4 %load, %struct.v4 addrspace(1)* %out
457 declare void @llvm.lifetime.start.p5i8(i64 immarg, i8 addrspace(5)* nocapture)
459 declare void @llvm.lifetime.end.p5i8(i64 immarg, i8 addrspace(5)* nocapture)
461 declare void @llvm.assume(i1)