1 ; RUN: opt -S -mtriple=amdgcn- -passes=sroa %s -o %t.sroa.ll
2 ; RUN: llc -mtriple=amdgcn-- -mcpu=tonga -mattr=-promote-alloca -verify-machineinstrs < %t.sroa.ll | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-ALLOCA %s
3 ; RUN: llc -mtriple=amdgcn-- -mcpu=tonga -mattr=+promote-alloca -verify-machineinstrs < %t.sroa.ll | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-PROMOTE %s
4 ; RUN: opt -S -mtriple=amdgcn-- -passes='sroa,amdgpu-promote-alloca,instcombine' < %s | FileCheck -check-prefix=OPT %s
6 target datalayout = "A5"
8 ; OPT-LABEL: @vector_read_alloca_bitcast(
10 ; OPT: %0 = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
11 ; OPT-NEXT: store i32 %0, ptr addrspace(1) %out, align 4
13 ; GCN-LABEL: {{^}}vector_read_alloca_bitcast:
14 ; GCN-ALLOCA-COUNT-4: buffer_store_dword
15 ; GCN-ALLOCA: buffer_load_dword
17 ; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 2
18 ; GCN-PROMOTE: s_cmp_eq_u32 s{{[0-9]+}}, 1
19 ; GCN-PROMOTE: s_cselect_b64 [[CC1:[^,]+]], -1, 0
20 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]]
21 ; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0
22 ; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 3
23 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], vcc
24 ; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0
25 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], vcc
26 ; GCN-PROMOTE: ScratchSize: 0
28 define amdgpu_kernel void @vector_read_alloca_bitcast(ptr addrspace(1) %out, i32 %index) {
30 %tmp = alloca [4 x i32], addrspace(5)
31 %y = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 1
32 %z = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 2
33 %w = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 3
34 store i32 0, ptr addrspace(5) %tmp
35 store i32 1, ptr addrspace(5) %y
36 store i32 2, ptr addrspace(5) %z
37 store i32 3, ptr addrspace(5) %w
38 %tmp1 = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 %index
39 %tmp2 = load i32, ptr addrspace(5) %tmp1
40 store i32 %tmp2, ptr addrspace(1) %out
44 ; OPT-LABEL: @vector_write_alloca_bitcast(
46 ; OPT: %0 = insertelement <4 x i32> zeroinitializer, i32 1, i32 %w_index
47 ; OPT-NEXT: %1 = extractelement <4 x i32> %0, i32 %r_index
48 ; OPT-NEXT: store i32 %1, ptr addrspace(1) %out, align
50 ; GCN-LABEL: {{^}}vector_write_alloca_bitcast:
51 ; GCN-ALLOCA-COUNT-5: buffer_store_dword
52 ; GCN-ALLOCA: buffer_load_dword
54 ; GCN-PROMOTE-COUNT-7: v_cndmask
56 ; GCN-PROMOTE: ScratchSize: 0
58 define amdgpu_kernel void @vector_write_alloca_bitcast(ptr addrspace(1) %out, i32 %w_index, i32 %r_index) {
60 %tmp = alloca [4 x i32], addrspace(5)
61 %y = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 1
62 %z = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 2
63 %w = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 3
64 store i32 0, ptr addrspace(5) %tmp
65 store i32 0, ptr addrspace(5) %y
66 store i32 0, ptr addrspace(5) %z
67 store i32 0, ptr addrspace(5) %w
68 %tmp1 = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 %w_index
69 store i32 1, ptr addrspace(5) %tmp1
70 %tmp2 = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 %r_index
71 %tmp3 = load i32, ptr addrspace(5) %tmp2
72 store i32 %tmp3, ptr addrspace(1) %out
76 ; OPT-LABEL: @vector_write_read_bitcast_to_float(
79 ; OPT: %promotealloca = phi <6 x float> [ undef, %bb ], [ %0, %bb2 ]
80 ; OPT: %0 = insertelement <6 x float> %promotealloca, float %tmp71, i32 %tmp10
82 ; OPT: %bc = bitcast <6 x float> %0 to <6 x i32>
83 ; OPT: %1 = extractelement <6 x i32> %bc, i32 %tmp20
85 ; GCN-LABEL: {{^}}vector_write_read_bitcast_to_float:
86 ; GCN-ALLOCA: buffer_store_dword
88 ; GCN-PROMOTE: v_cmp_eq_u16
89 ; GCN-PROMOTE: v_cndmask
93 ; GCN-ALLOCA: buffer_load_dword
95 ; GCN-PROMOTE: ScratchSize: 0
97 define amdgpu_kernel void @vector_write_read_bitcast_to_float(ptr addrspace(1) %arg) {
99 %tmp = alloca [6 x float], align 4, addrspace(5)
100 call void @llvm.lifetime.start.p5(i64 24, ptr addrspace(5) %tmp) #2
103 bb2: ; preds = %bb2, %bb
104 %tmp3 = phi i32 [ 0, %bb ], [ %tmp13, %bb2 ]
105 %tmp4 = zext i32 %tmp3 to i64
106 %tmp5 = getelementptr inbounds float, ptr addrspace(1) %arg, i64 %tmp4
107 %tmp7 = load i32, ptr addrspace(1) %tmp5, align 4
108 %tmp8 = trunc i32 %tmp3 to i16
109 %tmp9 = urem i16 %tmp8, 6
110 %tmp10 = zext i16 %tmp9 to i32
111 %tmp11 = getelementptr inbounds [6 x float], ptr addrspace(5) %tmp, i32 0, i32 %tmp10
112 store i32 %tmp7, ptr addrspace(5) %tmp11, align 4
113 %tmp13 = add nuw nsw i32 %tmp3, 1
114 %tmp14 = icmp eq i32 %tmp13, 1000
115 br i1 %tmp14, label %.preheader, label %bb2
117 bb15: ; preds = %.preheader
118 call void @llvm.lifetime.end.p5(i64 24, ptr addrspace(5) %tmp) #2
121 .preheader: ; preds = %.preheader, %bb2
122 %tmp16 = phi i32 [ %tmp27, %.preheader ], [ 0, %bb2 ]
123 %tmp17 = trunc i32 %tmp16 to i16
124 %tmp18 = urem i16 %tmp17, 6
125 %tmp19 = sub nuw nsw i16 5, %tmp18
126 %tmp20 = zext i16 %tmp19 to i32
127 %tmp21 = getelementptr inbounds [6 x float], ptr addrspace(5) %tmp, i32 0, i32 %tmp20
128 %tmp23 = load i32, ptr addrspace(5) %tmp21, align 4
129 %tmp24 = zext i32 %tmp16 to i64
130 %tmp25 = getelementptr inbounds float, ptr addrspace(1) %arg, i64 %tmp24
131 store i32 %tmp23, ptr addrspace(1) %tmp25, align 4
132 %tmp27 = add nuw nsw i32 %tmp16, 1
133 %tmp28 = icmp eq i32 %tmp27, 1000
134 br i1 %tmp28, label %bb15, label %.preheader
137 ; OPT-LABEL: @vector_write_read_bitcast_to_double(
140 ; OPT: %promotealloca = phi <6 x double> [ undef, %bb ], [ %0, %bb2 ]
141 ; OPT: %0 = insertelement <6 x double> %promotealloca, double %tmp71, i32 %tmp10
143 ; OPT: %bc = bitcast <6 x double> %0 to <6 x i64>
144 ; OPT: %1 = extractelement <6 x i64> %bc, i32 %tmp20
146 ; GCN-LABEL: {{^}}vector_write_read_bitcast_to_double:
148 ; GCN-ALLOCA-COUNT-2: buffer_store_dword
149 ; GCN-PROMOTE-COUNT-2: v_movreld_b32_e32
153 ; GCN-ALLOCA-COUNT-2: buffer_load_dword
154 ; GCN-PROMOTE-COUNT-2: v_movrels_b32_e32
156 ; GCN-PROMOTE: ScratchSize: 0
158 define amdgpu_kernel void @vector_write_read_bitcast_to_double(ptr addrspace(1) %arg) {
160 %tmp = alloca [6 x double], align 8, addrspace(5)
161 call void @llvm.lifetime.start.p5(i64 48, ptr addrspace(5) %tmp) #2
164 bb2: ; preds = %bb2, %bb
165 %tmp3 = phi i32 [ 0, %bb ], [ %tmp13, %bb2 ]
166 %tmp4 = zext i32 %tmp3 to i64
167 %tmp5 = getelementptr inbounds double, ptr addrspace(1) %arg, i64 %tmp4
168 %tmp7 = load i64, ptr addrspace(1) %tmp5, align 8
169 %tmp8 = trunc i32 %tmp3 to i16
170 %tmp9 = urem i16 %tmp8, 6
171 %tmp10 = zext i16 %tmp9 to i32
172 %tmp11 = getelementptr inbounds [6 x double], ptr addrspace(5) %tmp, i32 0, i32 %tmp10
173 store i64 %tmp7, ptr addrspace(5) %tmp11, align 8
174 %tmp13 = add nuw nsw i32 %tmp3, 1
175 %tmp14 = icmp eq i32 %tmp13, 1000
176 br i1 %tmp14, label %.preheader, label %bb2
178 bb15: ; preds = %.preheader
179 call void @llvm.lifetime.end.p5(i64 48, ptr addrspace(5) %tmp) #2
182 .preheader: ; preds = %.preheader, %bb2
183 %tmp16 = phi i32 [ %tmp27, %.preheader ], [ 0, %bb2 ]
184 %tmp17 = trunc i32 %tmp16 to i16
185 %tmp18 = urem i16 %tmp17, 6
186 %tmp19 = sub nuw nsw i16 5, %tmp18
187 %tmp20 = zext i16 %tmp19 to i32
188 %tmp21 = getelementptr inbounds [6 x double], ptr addrspace(5) %tmp, i32 0, i32 %tmp20
189 %tmp23 = load i64, ptr addrspace(5) %tmp21, align 8
190 %tmp24 = zext i32 %tmp16 to i64
191 %tmp25 = getelementptr inbounds double, ptr addrspace(1) %arg, i64 %tmp24
192 store i64 %tmp23, ptr addrspace(1) %tmp25, align 8
193 %tmp27 = add nuw nsw i32 %tmp16, 1
194 %tmp28 = icmp eq i32 %tmp27, 1000
195 br i1 %tmp28, label %bb15, label %.preheader
198 ; OPT-LABEL: @vector_write_read_bitcast_to_i64(
201 ; OPT: %promotealloca = phi <6 x i64> [ undef, %bb ], [ %0, %bb2 ]
202 ; OPT: %0 = insertelement <6 x i64> %promotealloca, i64 %tmp6, i32 %tmp9
204 ; OPT: %1 = extractelement <6 x i64> %0, i32 %tmp18
206 ; GCN-LABEL: {{^}}vector_write_read_bitcast_to_i64:
208 ; GCN-ALLOCA-COUNT-2: buffer_store_dword
209 ; GCN-PROMOTE-COUNT-2: v_movreld_b32_e32
213 ; GCN-ALLOCA-COUNT-2: buffer_load_dword
214 ; GCN-PROMOTE-COUNT-2: v_movrels_b32_e32
216 ; GCN-PROMOTE: ScratchSize: 0
218 define amdgpu_kernel void @vector_write_read_bitcast_to_i64(ptr addrspace(1) %arg) {
220 %tmp = alloca [6 x i64], align 8, addrspace(5)
221 call void @llvm.lifetime.start.p5(i64 48, ptr addrspace(5) %tmp) #2
224 bb2: ; preds = %bb2, %bb
225 %tmp3 = phi i32 [ 0, %bb ], [ %tmp11, %bb2 ]
226 %tmp4 = zext i32 %tmp3 to i64
227 %tmp5 = getelementptr inbounds i64, ptr addrspace(1) %arg, i64 %tmp4
228 %tmp6 = load i64, ptr addrspace(1) %tmp5, align 8
229 %tmp7 = trunc i32 %tmp3 to i16
230 %tmp8 = urem i16 %tmp7, 6
231 %tmp9 = zext i16 %tmp8 to i32
232 %tmp10 = getelementptr inbounds [6 x i64], ptr addrspace(5) %tmp, i32 0, i32 %tmp9
233 store i64 %tmp6, ptr addrspace(5) %tmp10, align 8
234 %tmp11 = add nuw nsw i32 %tmp3, 1
235 %tmp12 = icmp eq i32 %tmp11, 1000
236 br i1 %tmp12, label %.preheader, label %bb2
238 bb13: ; preds = %.preheader
239 call void @llvm.lifetime.end.p5(i64 48, ptr addrspace(5) %tmp) #2
242 .preheader: ; preds = %.preheader, %bb2
243 %tmp14 = phi i32 [ %tmp23, %.preheader ], [ 0, %bb2 ]
244 %tmp15 = trunc i32 %tmp14 to i16
245 %tmp16 = urem i16 %tmp15, 6
246 %tmp17 = sub nuw nsw i16 5, %tmp16
247 %tmp18 = zext i16 %tmp17 to i32
248 %tmp19 = getelementptr inbounds [6 x i64], ptr addrspace(5) %tmp, i32 0, i32 %tmp18
249 %tmp20 = load i64, ptr addrspace(5) %tmp19, align 8
250 %tmp21 = zext i32 %tmp14 to i64
251 %tmp22 = getelementptr inbounds i64, ptr addrspace(1) %arg, i64 %tmp21
252 store i64 %tmp20, ptr addrspace(1) %tmp22, align 8
253 %tmp23 = add nuw nsw i32 %tmp14, 1
254 %tmp24 = icmp eq i32 %tmp23, 1000
255 br i1 %tmp24, label %bb13, label %.preheader
258 ; TODO: llvm.assume can be ingored
260 ; OPT-LABEL: @vector_read_alloca_bitcast_assume(
261 ; OPT: %0 = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
262 ; OPT: store i32 %0, ptr addrspace(1) %out, align 4
264 ; GCN-LABEL: {{^}}vector_read_alloca_bitcast_assume:
265 ; GCN-COUNT: buffer_store_dword
267 define amdgpu_kernel void @vector_read_alloca_bitcast_assume(ptr addrspace(1) %out, i32 %index) {
269 %tmp = alloca [4 x i32], addrspace(5)
270 %cmp = icmp ne ptr addrspace(5) %tmp, null
271 call void @llvm.assume(i1 %cmp)
272 %y = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 1
273 %z = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 2
274 %w = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 3
275 store i32 0, ptr addrspace(5) %tmp
276 store i32 1, ptr addrspace(5) %y
277 store i32 2, ptr addrspace(5) %z
278 store i32 3, ptr addrspace(5) %w
279 %tmp1 = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 %index
280 %tmp2 = load i32, ptr addrspace(5) %tmp1
281 store i32 %tmp2, ptr addrspace(1) %out
285 ; OPT-LABEL: @vector_read_alloca_multiuse(
287 ; OPT: %0 = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
288 ; OPT-NEXT: %add2 = add nuw nsw i32 %0, 1
289 ; OPT-NEXT: store i32 %add2, ptr addrspace(1) %out, align 4
291 ; GCN-LABEL: {{^}}vector_read_alloca_multiuse:
292 ; GCN-ALLOCA-COUNT-4: buffer_store_dword
293 ; GCN-ALLOCA: buffer_load_dword
295 ; GCN-PROMOTE: s_cmp_eq_u32 s{{[0-9]+}}, 1
296 ; GCN-PROMOTE: s_cselect_b64 [[CC1:[^,]+]], -1, 0
297 ; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 2
298 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]]
299 ; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0
300 ; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 3
301 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], vcc
302 ; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0
303 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], vcc
305 ; GCN-PROMOTE: ScratchSize: 0
307 define amdgpu_kernel void @vector_read_alloca_multiuse(ptr addrspace(1) %out, i32 %index) {
309 %tmp = alloca [4 x i32], addrspace(5)
310 %y = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 1
311 %z = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 2
312 %w = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 3
313 store i32 0, ptr addrspace(5) %tmp
314 store i32 1, ptr addrspace(5) %y
315 store i32 2, ptr addrspace(5) %z
316 store i32 3, ptr addrspace(5) %w
317 %tmp1 = getelementptr [4 x i32], ptr addrspace(5) %tmp, i32 0, i32 %index
318 %tmp2 = load i32, ptr addrspace(5) %tmp1
319 %tmp3 = load i32, ptr addrspace(5) %tmp
320 %tmp4 = load i32, ptr addrspace(5) %y
321 %add1 = add i32 %tmp2, %tmp3
322 %add2 = add i32 %add1, %tmp4
323 store i32 %add2, ptr addrspace(1) %out
327 ; OPT-LABEL: @bitcast_vector_to_vector(
329 ; OPT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr addrspace(1) %out, align 16
331 ; GCN-LABEL: {{^}}bitcast_vector_to_vector:
332 ; GCN: v_mov_b32_e32 v0, 1
333 ; GCN: v_mov_b32_e32 v1, 2
334 ; GCN: v_mov_b32_e32 v2, 3
335 ; GCN: v_mov_b32_e32 v3, 4
337 ; GCN: ScratchSize: 0
339 define amdgpu_kernel void @bitcast_vector_to_vector(ptr addrspace(1) %out) {
341 %alloca = alloca <4 x float>, align 16, addrspace(5)
342 store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr addrspace(5) %alloca
343 %load = load <4 x i32>, ptr addrspace(5) %alloca, align 16
344 store <4 x i32> %load, ptr addrspace(1) %out
348 ; OPT-LABEL: @vector_bitcast_from_alloca_array(
350 ; OPT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr addrspace(1) %out, align 16
352 ; GCN-LABEL: {{^}}vector_bitcast_from_alloca_array:
353 ; GCN: v_mov_b32_e32 v0, 1
354 ; GCN: v_mov_b32_e32 v1, 2
355 ; GCN: v_mov_b32_e32 v2, 3
356 ; GCN: v_mov_b32_e32 v3, 4
358 ; GCN: ScratchSize: 0
360 define amdgpu_kernel void @vector_bitcast_from_alloca_array(ptr addrspace(1) %out) {
362 %alloca = alloca [4 x float], align 16, addrspace(5)
363 store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr addrspace(5) %alloca
364 %load = load <4 x i32>, ptr addrspace(5) %alloca, align 16
365 store <4 x i32> %load, ptr addrspace(1) %out
369 ; OPT-LABEL: @vector_bitcast_to_array_from_alloca_array(
371 ; OPT-NEXT: store i32 1, ptr addrspace(1) %out, align 4
372 ; OPT-NEXT: %out.repack1 = getelementptr inbounds [4 x i32], ptr addrspace(1) %out, i64 0, i64 1
373 ; OPT-NEXT: store i32 2, ptr addrspace(1) %out.repack1, align 4
374 ; OPT-NEXT: %out.repack2 = getelementptr inbounds [4 x i32], ptr addrspace(1) %out, i64 0, i64 2
375 ; OPT-NEXT: store i32 3, ptr addrspace(1) %out.repack2, align 4
376 ; OPT-NEXT: %out.repack3 = getelementptr inbounds [4 x i32], ptr addrspace(1) %out, i64 0, i64 3
377 ; OPT-NEXT: store i32 4, ptr addrspace(1) %out.repack3, align 4
379 ; GCN-LABEL: {{^}}vector_bitcast_to_array_from_alloca_array:
380 ; GCN: v_mov_b32_e32 v0, 1
381 ; GCN: v_mov_b32_e32 v1, 2
382 ; GCN: v_mov_b32_e32 v2, 3
383 ; GCN: v_mov_b32_e32 v3, 4
385 ; GCN: ScratchSize: 0
387 define amdgpu_kernel void @vector_bitcast_to_array_from_alloca_array(ptr addrspace(1) %out) {
388 %alloca = alloca [4 x float], align 16, addrspace(5)
389 store [4 x i32] [i32 1, i32 2, i32 3, i32 4], ptr addrspace(5) %alloca
390 %load = load [4 x i32], ptr addrspace(5) %alloca, align 16
391 store [4 x i32] %load, ptr addrspace(1) %out
395 ; OPT-LABEL: @vector_bitcast_to_struct_from_alloca_array(
397 ; OPT-NEXT: store i32 1, ptr addrspace(1) %out, align 4
398 ; OPT-NEXT: %out.repack1 = getelementptr inbounds %struct.v4, ptr addrspace(1) %out, i64 0, i32 1
399 ; OPT-NEXT: store i32 2, ptr addrspace(1) %out.repack1, align 4
400 ; OPT-NEXT: %out.repack2 = getelementptr inbounds %struct.v4, ptr addrspace(1) %out, i64 0, i32 2
401 ; OPT-NEXT: store i32 3, ptr addrspace(1) %out.repack2, align 4
402 ; OPT-NEXT: %out.repack3 = getelementptr inbounds %struct.v4, ptr addrspace(1) %out, i64 0, i32 3
403 ; OPT-NEXT: store i32 4, ptr addrspace(1) %out.repack3, align 4
405 ; GCN-LABEL: {{^}}vector_bitcast_to_struct_from_alloca_array:
406 ; GCN: v_mov_b32_e32 v0, 1
407 ; GCN: v_mov_b32_e32 v1, 2
408 ; GCN: v_mov_b32_e32 v2, 3
409 ; GCN: v_mov_b32_e32 v3, 4
411 ; GCN: ScratchSize: 0
413 %struct.v4 = type { i32, i32, i32, i32 }
415 define amdgpu_kernel void @vector_bitcast_to_struct_from_alloca_array(ptr addrspace(1) %out) {
416 %alloca = alloca [4 x float], align 16, addrspace(5)
417 store %struct.v4 { i32 1, i32 2, i32 3, i32 4 }, ptr addrspace(5) %alloca
418 %load = load %struct.v4, ptr addrspace(5) %alloca, align 16
419 store %struct.v4 %load, ptr addrspace(1) %out
423 declare void @llvm.lifetime.start.p5(i64 immarg, ptr addrspace(5) nocapture)
425 declare void @llvm.lifetime.end.p5(i64 immarg, ptr addrspace(5) nocapture)
427 declare void @llvm.assume(i1)