1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -march=amdgcn -mcpu=tahiti -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs -o - %s | FileCheck %s
4 define void @void_func_i1(i1 %arg0) #0 {
5 ; CHECK-LABEL: name: void_func_i1
6 ; CHECK: bb.1 (%ir-block.0):
7 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
8 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
9 ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
10 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
11 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
12 ; CHECK: G_STORE [[TRUNC]](s1), [[DEF]](p1) :: (store 1 into `i1 addrspace(1)* undef`, addrspace 1)
13 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
14 ; CHECK: S_SETPC_B64_return [[COPY2]]
15 store i1 %arg0, i1 addrspace(1)* undef
19 define void @void_func_i1_zeroext(i1 zeroext %arg0) #0 {
20 ; CHECK-LABEL: name: void_func_i1_zeroext
21 ; CHECK: bb.1 (%ir-block.0):
22 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
23 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
24 ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
25 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
26 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
27 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
28 ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s1)
29 ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[C]]
30 ; CHECK: G_STORE [[ADD]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
31 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
32 ; CHECK: S_SETPC_B64_return [[COPY2]]
33 %ext = zext i1 %arg0 to i32
34 %add = add i32 %ext, 12
35 store i32 %add, i32 addrspace(1)* undef
39 define void @void_func_i1_signext(i1 signext %arg0) #0 {
40 ; CHECK-LABEL: name: void_func_i1_signext
41 ; CHECK: bb.1 (%ir-block.0):
42 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
43 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
44 ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
45 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
46 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
47 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
48 ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC]](s1)
49 ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SEXT]], [[C]]
50 ; CHECK: G_STORE [[ADD]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
51 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
52 ; CHECK: S_SETPC_B64_return [[COPY2]]
53 %ext = sext i1 %arg0 to i32
54 %add = add i32 %ext, 12
55 store i32 %add, i32 addrspace(1)* undef
59 define void @i1_arg_i1_use(i1 %arg) #0 {
60 ; CHECK-LABEL: name: i1_arg_i1_use
62 ; CHECK: successors: %bb.2(0x40000000), %bb.3(0x40000000)
63 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
64 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
65 ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
66 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
67 ; CHECK: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
68 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
69 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
70 ; CHECK: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[TRUNC]], [[C]]
71 ; CHECK: [[INT:%[0-9]+]]:_(s1), [[INT1:%[0-9]+]]:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if), [[XOR]](s1)
72 ; CHECK: G_BRCOND [[INT]](s1), %bb.2
75 ; CHECK: successors: %bb.3(0x80000000)
76 ; CHECK: G_STORE [[C1]](s32), [[DEF]](p1) :: (volatile store 4 into `i32 addrspace(1)* undef`, addrspace 1)
78 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT1]](s64)
79 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
80 ; CHECK: S_SETPC_B64_return [[COPY2]]
82 br i1 %arg, label %bb2, label %bb1
85 store volatile i32 0, i32 addrspace(1)* undef
92 define void @void_func_i8(i8 %arg0) #0 {
93 ; CHECK-LABEL: name: void_func_i8
94 ; CHECK: bb.1 (%ir-block.0):
95 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
96 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
97 ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
98 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
99 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
100 ; CHECK: G_STORE [[TRUNC]](s8), [[DEF]](p1) :: (store 1 into `i8 addrspace(1)* undef`, addrspace 1)
101 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
102 ; CHECK: S_SETPC_B64_return [[COPY2]]
103 store i8 %arg0, i8 addrspace(1)* undef
107 define void @void_func_i8_zeroext(i8 zeroext %arg0) #0 {
108 ; CHECK-LABEL: name: void_func_i8_zeroext
109 ; CHECK: bb.1 (%ir-block.0):
110 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
111 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
112 ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
113 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
114 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
115 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
116 ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
117 ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[C]]
118 ; CHECK: G_STORE [[ADD]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
119 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
120 ; CHECK: S_SETPC_B64_return [[COPY2]]
121 %ext = zext i8 %arg0 to i32
122 %add = add i32 %ext, 12
123 store i32 %add, i32 addrspace(1)* undef
127 define void @void_func_i8_signext(i8 signext %arg0) #0 {
128 ; CHECK-LABEL: name: void_func_i8_signext
129 ; CHECK: bb.1 (%ir-block.0):
130 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
131 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
132 ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
133 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
134 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
135 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
136 ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC]](s8)
137 ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SEXT]], [[C]]
138 ; CHECK: G_STORE [[ADD]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
139 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
140 ; CHECK: S_SETPC_B64_return [[COPY2]]
141 %ext = sext i8 %arg0 to i32
142 %add = add i32 %ext, 12
143 store i32 %add, i32 addrspace(1)* undef
147 define void @void_func_i16(i16 %arg0) #0 {
148 ; CHECK-LABEL: name: void_func_i16
149 ; CHECK: bb.1 (%ir-block.0):
150 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
151 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
152 ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
153 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
154 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
155 ; CHECK: G_STORE [[TRUNC]](s16), [[DEF]](p1) :: (store 2 into `i16 addrspace(1)* undef`, addrspace 1)
156 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
157 ; CHECK: S_SETPC_B64_return [[COPY2]]
158 store i16 %arg0, i16 addrspace(1)* undef
162 define void @void_func_i16_zeroext(i16 zeroext %arg0) #0 {
163 ; CHECK-LABEL: name: void_func_i16_zeroext
164 ; CHECK: bb.1 (%ir-block.0):
165 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
166 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
167 ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
168 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
169 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
170 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
171 ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s16)
172 ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[C]]
173 ; CHECK: G_STORE [[ADD]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
174 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
175 ; CHECK: S_SETPC_B64_return [[COPY2]]
176 %ext = zext i16 %arg0 to i32
177 %add = add i32 %ext, 12
178 store i32 %add, i32 addrspace(1)* undef
182 define void @void_func_i16_signext(i16 signext %arg0) #0 {
183 ; CHECK-LABEL: name: void_func_i16_signext
184 ; CHECK: bb.1 (%ir-block.0):
185 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
186 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
187 ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
188 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
189 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
190 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
191 ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC]](s16)
192 ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SEXT]], [[C]]
193 ; CHECK: G_STORE [[ADD]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
194 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
195 ; CHECK: S_SETPC_B64_return [[COPY2]]
196 %ext = sext i16 %arg0 to i32
197 %add = add i32 %ext, 12
198 store i32 %add, i32 addrspace(1)* undef
202 define void @void_func_i32(i32 %arg0) #0 {
203 ; CHECK-LABEL: name: void_func_i32
204 ; CHECK: bb.1 (%ir-block.0):
205 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
206 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
207 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
208 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
209 ; CHECK: G_STORE [[COPY]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
210 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
211 ; CHECK: S_SETPC_B64_return [[COPY2]]
212 store i32 %arg0, i32 addrspace(1)* undef
216 define void @void_func_i64(i64 %arg0) #0 {
217 ; CHECK-LABEL: name: void_func_i64
218 ; CHECK: bb.1 (%ir-block.0):
219 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
220 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
221 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
222 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
223 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
224 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
225 ; CHECK: G_STORE [[MV]](s64), [[DEF]](p1) :: (store 8 into `i64 addrspace(1)* undef`, addrspace 1)
226 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
227 ; CHECK: S_SETPC_B64_return [[COPY3]]
228 store i64 %arg0, i64 addrspace(1)* undef
232 define void @void_func_f16(half %arg0) #0 {
233 ; CHECK-LABEL: name: void_func_f16
234 ; CHECK: bb.1 (%ir-block.0):
235 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
236 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
237 ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
238 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
239 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
240 ; CHECK: G_STORE [[TRUNC]](s16), [[DEF]](p1) :: (store 2 into `half addrspace(1)* undef`, addrspace 1)
241 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
242 ; CHECK: S_SETPC_B64_return [[COPY2]]
243 store half %arg0, half addrspace(1)* undef
247 define void @void_func_f32(float %arg0) #0 {
248 ; CHECK-LABEL: name: void_func_f32
249 ; CHECK: bb.1 (%ir-block.0):
250 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
251 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
252 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
253 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
254 ; CHECK: G_STORE [[COPY]](s32), [[DEF]](p1) :: (store 4 into `float addrspace(1)* undef`, addrspace 1)
255 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
256 ; CHECK: S_SETPC_B64_return [[COPY2]]
257 store float %arg0, float addrspace(1)* undef
261 define void @void_func_f64(double %arg0) #0 {
262 ; CHECK-LABEL: name: void_func_f64
263 ; CHECK: bb.1 (%ir-block.0):
264 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
265 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
266 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
267 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
268 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
269 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
270 ; CHECK: G_STORE [[MV]](s64), [[DEF]](p1) :: (store 8 into `double addrspace(1)* undef`, addrspace 1)
271 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
272 ; CHECK: S_SETPC_B64_return [[COPY3]]
273 store double %arg0, double addrspace(1)* undef
277 define void @void_func_v2i32(<2 x i32> %arg0) #0 {
278 ; CHECK-LABEL: name: void_func_v2i32
279 ; CHECK: bb.1 (%ir-block.0):
280 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
281 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
282 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
283 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
284 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32)
285 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
286 ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s32>), [[DEF]](p1) :: (store 8 into `<2 x i32> addrspace(1)* undef`, addrspace 1)
287 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
288 ; CHECK: S_SETPC_B64_return [[COPY3]]
289 store <2 x i32> %arg0, <2 x i32> addrspace(1)* undef
293 define void @void_func_v3i32(<3 x i32> %arg0) #0 {
294 ; CHECK-LABEL: name: void_func_v3i32
295 ; CHECK: bb.1 (%ir-block.0):
296 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
297 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
298 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
299 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
300 ; CHECK: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
301 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
302 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
303 ; CHECK: G_STORE [[BUILD_VECTOR]](<3 x s32>), [[DEF]](p1) :: (store 12 into `<3 x i32> addrspace(1)* undef`, align 16, addrspace 1)
304 ; CHECK: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
305 ; CHECK: S_SETPC_B64_return [[COPY4]]
306 store <3 x i32> %arg0, <3 x i32> addrspace(1)* undef
310 define void @void_func_v4i32(<4 x i32> %arg0) #0 {
311 ; CHECK-LABEL: name: void_func_v4i32
312 ; CHECK: bb.1 (%ir-block.0):
313 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
314 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
315 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
316 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
317 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
318 ; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
319 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32)
320 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
321 ; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[DEF]](p1) :: (store 16 into `<4 x i32> addrspace(1)* undef`, addrspace 1)
322 ; CHECK: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
323 ; CHECK: S_SETPC_B64_return [[COPY5]]
324 store <4 x i32> %arg0, <4 x i32> addrspace(1)* undef
328 define void @void_func_v5i32(<5 x i32> %arg0) #0 {
329 ; CHECK-LABEL: name: void_func_v5i32
330 ; CHECK: bb.1 (%ir-block.0):
331 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
332 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
333 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
334 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
335 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
336 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
337 ; CHECK: [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
338 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32)
339 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
340 ; CHECK: G_STORE [[BUILD_VECTOR]](<5 x s32>), [[DEF]](p1) :: (store 20 into `<5 x i32> addrspace(1)* undef`, align 32, addrspace 1)
341 ; CHECK: [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY5]]
342 ; CHECK: S_SETPC_B64_return [[COPY6]]
343 store <5 x i32> %arg0, <5 x i32> addrspace(1)* undef
347 define void @void_func_v8i32(<8 x i32> %arg0) #0 {
348 ; CHECK-LABEL: name: void_func_v8i32
349 ; CHECK: bb.1 (%ir-block.0):
350 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
351 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
352 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
353 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
354 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
355 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
356 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
357 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
358 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
359 ; CHECK: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
360 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
361 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
362 ; CHECK: G_STORE [[BUILD_VECTOR]](<8 x s32>), [[DEF]](p1) :: (store 32 into `<8 x i32> addrspace(1)* undef`, addrspace 1)
363 ; CHECK: [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
364 ; CHECK: S_SETPC_B64_return [[COPY9]]
365 store <8 x i32> %arg0, <8 x i32> addrspace(1)* undef
369 define void @void_func_v16i32(<16 x i32> %arg0) #0 {
370 ; CHECK-LABEL: name: void_func_v16i32
371 ; CHECK: bb.1 (%ir-block.0):
372 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $sgpr30_sgpr31
373 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
374 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
375 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
376 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
377 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
378 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
379 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
380 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
381 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
382 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
383 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
384 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
385 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
386 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
387 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
388 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
389 ; CHECK: [[COPY16:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
390 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
391 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
392 ; CHECK: G_STORE [[BUILD_VECTOR]](<16 x s32>), [[DEF]](p1) :: (store 64 into `<16 x i32> addrspace(1)* undef`, addrspace 1)
393 ; CHECK: [[COPY17:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY16]]
394 ; CHECK: S_SETPC_B64_return [[COPY17]]
395 store <16 x i32> %arg0, <16 x i32> addrspace(1)* undef
399 define void @void_func_v32i32(<32 x i32> %arg0) #0 {
400 ; CHECK-LABEL: name: void_func_v32i32
401 ; CHECK: bb.1 (%ir-block.0):
402 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31
403 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
404 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
405 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
406 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
407 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
408 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
409 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
410 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
411 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
412 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
413 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
414 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
415 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
416 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
417 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
418 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
419 ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
420 ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
421 ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
422 ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
423 ; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
424 ; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
425 ; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
426 ; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
427 ; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
428 ; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
429 ; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
430 ; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
431 ; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
432 ; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
433 ; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
434 ; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31
435 ; CHECK: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
436 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[COPY31]](s32)
437 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
438 ; CHECK: G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (store 128 into `<32 x i32> addrspace(1)* undef`, addrspace 1)
439 ; CHECK: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
440 ; CHECK: S_SETPC_B64_return [[COPY33]]
441 store <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
445 ; 1 over register limit
446 define void @void_func_v33i32(<33 x i32> %arg0) #0 {
447 ; CHECK-LABEL: name: void_func_v33i32
448 ; CHECK: bb.1 (%ir-block.0):
449 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31
450 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
451 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
452 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
453 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
454 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
455 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
456 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
457 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
458 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
459 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
460 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
461 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
462 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
463 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
464 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
465 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
466 ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
467 ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
468 ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
469 ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
470 ; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
471 ; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
472 ; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
473 ; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
474 ; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
475 ; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
476 ; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
477 ; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
478 ; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
479 ; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
480 ; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
481 ; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31
482 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
483 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 4 from %fixed-stack.0, align 1, addrspace 5)
484 ; CHECK: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
485 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<33 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[COPY31]](s32), [[LOAD]](s32)
486 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
487 ; CHECK: G_STORE [[BUILD_VECTOR]](<33 x s32>), [[DEF]](p1) :: (store 132 into `<33 x i32> addrspace(1)* undef`, align 256, addrspace 1)
488 ; CHECK: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
489 ; CHECK: S_SETPC_B64_return [[COPY33]]
490 store <33 x i32> %arg0, <33 x i32> addrspace(1)* undef
494 define void @void_func_v2i64(<2 x i64> %arg0) #0 {
495 ; CHECK-LABEL: name: void_func_v2i64
496 ; CHECK: bb.1 (%ir-block.0):
497 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
498 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
499 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
500 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
501 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
502 ; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
503 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
504 ; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
505 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
506 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
507 ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[DEF]](p1) :: (store 16 into `<2 x i64> addrspace(1)* undef`, addrspace 1)
508 ; CHECK: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
509 ; CHECK: S_SETPC_B64_return [[COPY5]]
510 store <2 x i64> %arg0, <2 x i64> addrspace(1)* undef
514 define void @void_func_v3i64(<3 x i64> %arg0) #0 {
515 ; CHECK-LABEL: name: void_func_v3i64
516 ; CHECK: bb.1 (%ir-block.0):
517 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
518 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
519 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
520 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
521 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
522 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
523 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
524 ; CHECK: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
525 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
526 ; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
527 ; CHECK: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
528 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64)
529 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
530 ; CHECK: G_STORE [[BUILD_VECTOR]](<3 x s64>), [[DEF]](p1) :: (store 24 into `<3 x i64> addrspace(1)* undef`, align 32, addrspace 1)
531 ; CHECK: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
532 ; CHECK: S_SETPC_B64_return [[COPY7]]
533 store <3 x i64> %arg0, <3 x i64> addrspace(1)* undef
537 define void @void_func_v4i64(<4 x i64> %arg0) #0 {
538 ; CHECK-LABEL: name: void_func_v4i64
539 ; CHECK: bb.1 (%ir-block.0):
540 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
541 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
542 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
543 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
544 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
545 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
546 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
547 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
548 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
549 ; CHECK: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
550 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
551 ; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
552 ; CHECK: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
553 ; CHECK: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32)
554 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
555 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
556 ; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s64>), [[DEF]](p1) :: (store 32 into `<4 x i64> addrspace(1)* undef`, addrspace 1)
557 ; CHECK: [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
558 ; CHECK: S_SETPC_B64_return [[COPY9]]
559 store <4 x i64> %arg0, <4 x i64> addrspace(1)* undef
563 define void @void_func_v5i64(<5 x i64> %arg0) #0 {
564 ; CHECK-LABEL: name: void_func_v5i64
565 ; CHECK: bb.1 (%ir-block.0):
566 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $sgpr30_sgpr31
567 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
568 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
569 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
570 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
571 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
572 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
573 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
574 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
575 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
576 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
577 ; CHECK: [[COPY10:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
578 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
579 ; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
580 ; CHECK: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
581 ; CHECK: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32)
582 ; CHECK: [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32)
583 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64), [[MV4]](s64)
584 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
585 ; CHECK: G_STORE [[BUILD_VECTOR]](<5 x s64>), [[DEF]](p1) :: (store 40 into `<5 x i64> addrspace(1)* undef`, align 64, addrspace 1)
586 ; CHECK: [[COPY11:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY10]]
587 ; CHECK: S_SETPC_B64_return [[COPY11]]
588 store <5 x i64> %arg0, <5 x i64> addrspace(1)* undef
592 define void @void_func_v8i64(<8 x i64> %arg0) #0 {
593 ; CHECK-LABEL: name: void_func_v8i64
594 ; CHECK: bb.1 (%ir-block.0):
595 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $sgpr30_sgpr31
596 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
597 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
598 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
599 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
600 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
601 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
602 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
603 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
604 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
605 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
606 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
607 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
608 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
609 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
610 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
611 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
612 ; CHECK: [[COPY16:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
613 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
614 ; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
615 ; CHECK: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
616 ; CHECK: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32)
617 ; CHECK: [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32)
618 ; CHECK: [[MV5:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY10]](s32), [[COPY11]](s32)
619 ; CHECK: [[MV6:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY12]](s32), [[COPY13]](s32)
620 ; CHECK: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
621 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64), [[MV4]](s64), [[MV5]](s64), [[MV6]](s64), [[MV7]](s64)
622 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
623 ; CHECK: G_STORE [[BUILD_VECTOR]](<8 x s64>), [[DEF]](p1) :: (store 64 into `<8 x i64> addrspace(1)* undef`, addrspace 1)
624 ; CHECK: [[COPY17:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY16]]
625 ; CHECK: S_SETPC_B64_return [[COPY17]]
626 store <8 x i64> %arg0, <8 x i64> addrspace(1)* undef
630 define void @void_func_v16i64(<16 x i64> %arg0) #0 {
631 ; CHECK-LABEL: name: void_func_v16i64
632 ; CHECK: bb.1 (%ir-block.0):
633 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31
634 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
635 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
636 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
637 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
638 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
639 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
640 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
641 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
642 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
643 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
644 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
645 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
646 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
647 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
648 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
649 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
650 ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
651 ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
652 ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
653 ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
654 ; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
655 ; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
656 ; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
657 ; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
658 ; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
659 ; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
660 ; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
661 ; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
662 ; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
663 ; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
664 ; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
665 ; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31
666 ; CHECK: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
667 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
668 ; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
669 ; CHECK: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
670 ; CHECK: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32)
671 ; CHECK: [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32)
672 ; CHECK: [[MV5:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY10]](s32), [[COPY11]](s32)
673 ; CHECK: [[MV6:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY12]](s32), [[COPY13]](s32)
674 ; CHECK: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
675 ; CHECK: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
676 ; CHECK: [[MV9:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY18]](s32), [[COPY19]](s32)
677 ; CHECK: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
678 ; CHECK: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
679 ; CHECK: [[MV12:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY24]](s32), [[COPY25]](s32)
680 ; CHECK: [[MV13:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY26]](s32), [[COPY27]](s32)
681 ; CHECK: [[MV14:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY28]](s32), [[COPY29]](s32)
682 ; CHECK: [[MV15:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY30]](s32), [[COPY31]](s32)
683 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64), [[MV4]](s64), [[MV5]](s64), [[MV6]](s64), [[MV7]](s64), [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64), [[MV12]](s64), [[MV13]](s64), [[MV14]](s64), [[MV15]](s64)
684 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
685 ; CHECK: G_STORE [[BUILD_VECTOR]](<16 x s64>), [[DEF]](p1) :: (store 128 into `<16 x i64> addrspace(1)* undef`, addrspace 1)
686 ; CHECK: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
687 ; CHECK: S_SETPC_B64_return [[COPY33]]
688 store <16 x i64> %arg0, <16 x i64> addrspace(1)* undef
692 define void @void_func_v2i16(<2 x i16> %arg0) #0 {
693 ; CHECK-LABEL: name: void_func_v2i16
694 ; CHECK: bb.1 (%ir-block.0):
695 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
696 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
697 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
698 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
699 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32)
700 ; CHECK: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR]](<2 x s32>)
701 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
702 ; CHECK: G_STORE [[TRUNC]](<2 x s16>), [[DEF]](p1) :: (store 4 into `<2 x i16> addrspace(1)* undef`, addrspace 1)
703 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
704 ; CHECK: S_SETPC_B64_return [[COPY3]]
705 store <2 x i16> %arg0, <2 x i16> addrspace(1)* undef
709 define void @void_func_v3i16(<3 x i16> %arg0) #0 {
710 ; CHECK-LABEL: name: void_func_v3i16
711 ; CHECK: bb.1 (%ir-block.0):
712 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
713 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
714 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
715 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
716 ; CHECK: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
717 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
718 ; CHECK: [[TRUNC:%[0-9]+]]:_(<3 x s16>) = G_TRUNC [[BUILD_VECTOR]](<3 x s32>)
719 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
720 ; CHECK: G_STORE [[TRUNC]](<3 x s16>), [[DEF]](p1) :: (store 6 into `<3 x i16> addrspace(1)* undef`, align 8, addrspace 1)
721 ; CHECK: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
722 ; CHECK: S_SETPC_B64_return [[COPY4]]
723 store <3 x i16> %arg0, <3 x i16> addrspace(1)* undef
727 define void @void_func_v4i16(<4 x i16> %arg0) #0 {
728 ; CHECK-LABEL: name: void_func_v4i16
729 ; CHECK: bb.1 (%ir-block.0):
730 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
731 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
732 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
733 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
734 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
735 ; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
736 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32)
737 ; CHECK: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[BUILD_VECTOR]](<4 x s32>)
738 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
739 ; CHECK: G_STORE [[TRUNC]](<4 x s16>), [[DEF]](p1) :: (store 8 into `<4 x i16> addrspace(1)* undef`, addrspace 1)
740 ; CHECK: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
741 ; CHECK: S_SETPC_B64_return [[COPY5]]
742 store <4 x i16> %arg0, <4 x i16> addrspace(1)* undef
746 define void @void_func_v5i16(<5 x i16> %arg0) #0 {
747 ; CHECK-LABEL: name: void_func_v5i16
748 ; CHECK: bb.1 (%ir-block.0):
749 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
750 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
751 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
752 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
753 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
754 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
755 ; CHECK: [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
756 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32)
757 ; CHECK: [[TRUNC:%[0-9]+]]:_(<5 x s16>) = G_TRUNC [[BUILD_VECTOR]](<5 x s32>)
758 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
759 ; CHECK: G_STORE [[TRUNC]](<5 x s16>), [[DEF]](p1) :: (store 10 into `<5 x i16> addrspace(1)* undef`, align 16, addrspace 1)
760 ; CHECK: [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY5]]
761 ; CHECK: S_SETPC_B64_return [[COPY6]]
762 store <5 x i16> %arg0, <5 x i16> addrspace(1)* undef
766 define void @void_func_v8i16(<8 x i16> %arg0) #0 {
767 ; CHECK-LABEL: name: void_func_v8i16
768 ; CHECK: bb.1 (%ir-block.0):
769 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
770 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
771 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
772 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
773 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
774 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
775 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
776 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
777 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
778 ; CHECK: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
779 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
780 ; CHECK: [[TRUNC:%[0-9]+]]:_(<8 x s16>) = G_TRUNC [[BUILD_VECTOR]](<8 x s32>)
781 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
782 ; CHECK: G_STORE [[TRUNC]](<8 x s16>), [[DEF]](p1) :: (store 16 into `<8 x i16> addrspace(1)* undef`, addrspace 1)
783 ; CHECK: [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
784 ; CHECK: S_SETPC_B64_return [[COPY9]]
785 store <8 x i16> %arg0, <8 x i16> addrspace(1)* undef
789 define void @void_func_v16i16(<16 x i16> %arg0) #0 {
790 ; CHECK-LABEL: name: void_func_v16i16
791 ; CHECK: bb.1 (%ir-block.0):
792 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $sgpr30_sgpr31
793 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
794 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
795 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
796 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
797 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
798 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
799 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
800 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
801 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
802 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
803 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
804 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
805 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
806 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
807 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
808 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
809 ; CHECK: [[COPY16:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
810 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
811 ; CHECK: [[TRUNC:%[0-9]+]]:_(<16 x s16>) = G_TRUNC [[BUILD_VECTOR]](<16 x s32>)
812 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
813 ; CHECK: G_STORE [[TRUNC]](<16 x s16>), [[DEF]](p1) :: (store 32 into `<16 x i16> addrspace(1)* undef`, addrspace 1)
814 ; CHECK: [[COPY17:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY16]]
815 ; CHECK: S_SETPC_B64_return [[COPY17]]
816 store <16 x i16> %arg0, <16 x i16> addrspace(1)* undef
820 define void @void_func_v2f32(<2 x float> %arg0) #0 {
821 ; CHECK-LABEL: name: void_func_v2f32
822 ; CHECK: bb.1 (%ir-block.0):
823 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
824 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
825 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
826 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
827 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32)
828 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
829 ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s32>), [[DEF]](p1) :: (store 8 into `<2 x float> addrspace(1)* undef`, addrspace 1)
830 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
831 ; CHECK: S_SETPC_B64_return [[COPY3]]
832 store <2 x float> %arg0, <2 x float> addrspace(1)* undef
836 define void @void_func_v3f32(<3 x float> %arg0) #0 {
837 ; CHECK-LABEL: name: void_func_v3f32
838 ; CHECK: bb.1 (%ir-block.0):
839 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
840 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
841 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
842 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
843 ; CHECK: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
844 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
845 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
846 ; CHECK: G_STORE [[BUILD_VECTOR]](<3 x s32>), [[DEF]](p1) :: (store 12 into `<3 x float> addrspace(1)* undef`, align 16, addrspace 1)
847 ; CHECK: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
848 ; CHECK: S_SETPC_B64_return [[COPY4]]
849 store <3 x float> %arg0, <3 x float> addrspace(1)* undef
853 define void @void_func_v4f32(<4 x float> %arg0) #0 {
854 ; CHECK-LABEL: name: void_func_v4f32
855 ; CHECK: bb.1 (%ir-block.0):
856 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
857 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
858 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
859 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
860 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
861 ; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
862 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32)
863 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
864 ; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[DEF]](p1) :: (store 16 into `<4 x float> addrspace(1)* undef`, addrspace 1)
865 ; CHECK: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
866 ; CHECK: S_SETPC_B64_return [[COPY5]]
867 store <4 x float> %arg0, <4 x float> addrspace(1)* undef
871 define void @void_func_v8f32(<8 x float> %arg0) #0 {
872 ; CHECK-LABEL: name: void_func_v8f32
873 ; CHECK: bb.1 (%ir-block.0):
874 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
875 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
876 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
877 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
878 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
879 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
880 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
881 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
882 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
883 ; CHECK: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
884 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
885 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
886 ; CHECK: G_STORE [[BUILD_VECTOR]](<8 x s32>), [[DEF]](p1) :: (store 32 into `<8 x float> addrspace(1)* undef`, addrspace 1)
887 ; CHECK: [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
888 ; CHECK: S_SETPC_B64_return [[COPY9]]
889 store <8 x float> %arg0, <8 x float> addrspace(1)* undef
893 define void @void_func_v16f32(<16 x float> %arg0) #0 {
894 ; CHECK-LABEL: name: void_func_v16f32
895 ; CHECK: bb.1 (%ir-block.0):
896 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $sgpr30_sgpr31
897 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
898 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
899 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
900 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
901 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
902 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
903 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
904 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
905 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
906 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
907 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
908 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
909 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
910 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
911 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
912 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
913 ; CHECK: [[COPY16:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
914 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
915 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
916 ; CHECK: G_STORE [[BUILD_VECTOR]](<16 x s32>), [[DEF]](p1) :: (store 64 into `<16 x float> addrspace(1)* undef`, addrspace 1)
917 ; CHECK: [[COPY17:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY16]]
918 ; CHECK: S_SETPC_B64_return [[COPY17]]
919 store <16 x float> %arg0, <16 x float> addrspace(1)* undef
923 define void @void_func_v2f64(<2 x double> %arg0) #0 {
924 ; CHECK-LABEL: name: void_func_v2f64
925 ; CHECK: bb.1 (%ir-block.0):
926 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
927 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
928 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
929 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
930 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
931 ; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
932 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
933 ; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
934 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
935 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
936 ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[DEF]](p1) :: (store 16 into `<2 x double> addrspace(1)* undef`, addrspace 1)
937 ; CHECK: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
938 ; CHECK: S_SETPC_B64_return [[COPY5]]
939 store <2 x double> %arg0, <2 x double> addrspace(1)* undef
943 define void @void_func_v3f64(<3 x double> %arg0) #0 {
944 ; CHECK-LABEL: name: void_func_v3f64
945 ; CHECK: bb.1 (%ir-block.0):
946 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
947 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
948 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
949 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
950 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
951 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
952 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
953 ; CHECK: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
954 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
955 ; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
956 ; CHECK: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
957 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64)
958 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
959 ; CHECK: G_STORE [[BUILD_VECTOR]](<3 x s64>), [[DEF]](p1) :: (store 24 into `<3 x double> addrspace(1)* undef`, align 32, addrspace 1)
960 ; CHECK: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
961 ; CHECK: S_SETPC_B64_return [[COPY7]]
962 store <3 x double> %arg0, <3 x double> addrspace(1)* undef
966 define void @void_func_v4f64(<4 x double> %arg0) #0 {
967 ; CHECK-LABEL: name: void_func_v4f64
968 ; CHECK: bb.1 (%ir-block.0):
969 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
970 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
971 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
972 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
973 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
974 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
975 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
976 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
977 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
978 ; CHECK: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
979 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
980 ; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
981 ; CHECK: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
982 ; CHECK: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32)
983 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
984 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
985 ; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s64>), [[DEF]](p1) :: (store 32 into `<4 x double> addrspace(1)* undef`, addrspace 1)
986 ; CHECK: [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
987 ; CHECK: S_SETPC_B64_return [[COPY9]]
988 store <4 x double> %arg0, <4 x double> addrspace(1)* undef
992 define void @void_func_v8f64(<8 x double> %arg0) #0 {
993 ; CHECK-LABEL: name: void_func_v8f64
994 ; CHECK: bb.1 (%ir-block.0):
995 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $sgpr30_sgpr31
996 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
997 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
998 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
999 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1000 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
1001 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
1002 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
1003 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
1004 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
1005 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
1006 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
1007 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
1008 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
1009 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
1010 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
1011 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
1012 ; CHECK: [[COPY16:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1013 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
1014 ; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
1015 ; CHECK: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
1016 ; CHECK: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32)
1017 ; CHECK: [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32)
1018 ; CHECK: [[MV5:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY10]](s32), [[COPY11]](s32)
1019 ; CHECK: [[MV6:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY12]](s32), [[COPY13]](s32)
1020 ; CHECK: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
1021 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64), [[MV4]](s64), [[MV5]](s64), [[MV6]](s64), [[MV7]](s64)
1022 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1023 ; CHECK: G_STORE [[BUILD_VECTOR]](<8 x s64>), [[DEF]](p1) :: (store 64 into `<8 x double> addrspace(1)* undef`, addrspace 1)
1024 ; CHECK: [[COPY17:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY16]]
1025 ; CHECK: S_SETPC_B64_return [[COPY17]]
1026 store <8 x double> %arg0, <8 x double> addrspace(1)* undef
1030 define void @void_func_v16f64(<16 x double> %arg0) #0 {
1031 ; CHECK-LABEL: name: void_func_v16f64
1032 ; CHECK: bb.1 (%ir-block.0):
1033 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31
1034 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1035 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1036 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1037 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1038 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
1039 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
1040 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
1041 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
1042 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
1043 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
1044 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
1045 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
1046 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
1047 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
1048 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
1049 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
1050 ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
1051 ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
1052 ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
1053 ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
1054 ; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
1055 ; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
1056 ; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
1057 ; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
1058 ; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
1059 ; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
1060 ; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
1061 ; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
1062 ; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
1063 ; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
1064 ; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
1065 ; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31
1066 ; CHECK: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1067 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
1068 ; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
1069 ; CHECK: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
1070 ; CHECK: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32)
1071 ; CHECK: [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32)
1072 ; CHECK: [[MV5:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY10]](s32), [[COPY11]](s32)
1073 ; CHECK: [[MV6:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY12]](s32), [[COPY13]](s32)
1074 ; CHECK: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
1075 ; CHECK: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
1076 ; CHECK: [[MV9:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY18]](s32), [[COPY19]](s32)
1077 ; CHECK: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
1078 ; CHECK: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
1079 ; CHECK: [[MV12:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY24]](s32), [[COPY25]](s32)
1080 ; CHECK: [[MV13:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY26]](s32), [[COPY27]](s32)
1081 ; CHECK: [[MV14:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY28]](s32), [[COPY29]](s32)
1082 ; CHECK: [[MV15:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY30]](s32), [[COPY31]](s32)
1083 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64), [[MV4]](s64), [[MV5]](s64), [[MV6]](s64), [[MV7]](s64), [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64), [[MV12]](s64), [[MV13]](s64), [[MV14]](s64), [[MV15]](s64)
1084 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1085 ; CHECK: G_STORE [[BUILD_VECTOR]](<16 x s64>), [[DEF]](p1) :: (store 128 into `<16 x double> addrspace(1)* undef`, addrspace 1)
1086 ; CHECK: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
1087 ; CHECK: S_SETPC_B64_return [[COPY33]]
1088 store <16 x double> %arg0, <16 x double> addrspace(1)* undef
1092 define void @void_func_v2f16(<2 x half> %arg0) #0 {
1093 ; CHECK-LABEL: name: void_func_v2f16
1094 ; CHECK: bb.1 (%ir-block.0):
1095 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
1096 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1097 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1098 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1099 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32)
1100 ; CHECK: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR]](<2 x s32>)
1101 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1102 ; CHECK: G_STORE [[TRUNC]](<2 x s16>), [[DEF]](p1) :: (store 4 into `<2 x half> addrspace(1)* undef`, addrspace 1)
1103 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
1104 ; CHECK: S_SETPC_B64_return [[COPY3]]
1105 store <2 x half> %arg0, <2 x half> addrspace(1)* undef
1109 define void @void_func_v3f16(<3 x half> %arg0) #0 {
1110 ; CHECK-LABEL: name: void_func_v3f16
1111 ; CHECK: bb.1 (%ir-block.0):
1112 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
1113 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1114 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1115 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1116 ; CHECK: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1117 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
1118 ; CHECK: [[TRUNC:%[0-9]+]]:_(<3 x s16>) = G_TRUNC [[BUILD_VECTOR]](<3 x s32>)
1119 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1120 ; CHECK: G_STORE [[TRUNC]](<3 x s16>), [[DEF]](p1) :: (store 6 into `<3 x half> addrspace(1)* undef`, align 8, addrspace 1)
1121 ; CHECK: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
1122 ; CHECK: S_SETPC_B64_return [[COPY4]]
1123 store <3 x half> %arg0, <3 x half> addrspace(1)* undef
1127 define void @void_func_v4f16(<4 x half> %arg0) #0 {
1128 ; CHECK-LABEL: name: void_func_v4f16
1129 ; CHECK: bb.1 (%ir-block.0):
1130 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
1131 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1132 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1133 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1134 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1135 ; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1136 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32)
1137 ; CHECK: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[BUILD_VECTOR]](<4 x s32>)
1138 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1139 ; CHECK: G_STORE [[TRUNC]](<4 x s16>), [[DEF]](p1) :: (store 8 into `<4 x half> addrspace(1)* undef`, addrspace 1)
1140 ; CHECK: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
1141 ; CHECK: S_SETPC_B64_return [[COPY5]]
1142 store <4 x half> %arg0, <4 x half> addrspace(1)* undef
1146 define void @void_func_v8f16(<8 x half> %arg0) #0 {
1147 ; CHECK-LABEL: name: void_func_v8f16
1148 ; CHECK: bb.1 (%ir-block.0):
1149 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
1150 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1151 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1152 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1153 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1154 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
1155 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
1156 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
1157 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
1158 ; CHECK: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1159 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
1160 ; CHECK: [[TRUNC:%[0-9]+]]:_(<8 x s16>) = G_TRUNC [[BUILD_VECTOR]](<8 x s32>)
1161 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1162 ; CHECK: G_STORE [[TRUNC]](<8 x s16>), [[DEF]](p1) :: (store 16 into `<8 x half> addrspace(1)* undef`, addrspace 1)
1163 ; CHECK: [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
1164 ; CHECK: S_SETPC_B64_return [[COPY9]]
1165 store <8 x half> %arg0, <8 x half> addrspace(1)* undef
1169 define void @void_func_v16f16(<16 x half> %arg0) #0 {
1170 ; CHECK-LABEL: name: void_func_v16f16
1171 ; CHECK: bb.1 (%ir-block.0):
1172 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $sgpr30_sgpr31
1173 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1174 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1175 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1176 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1177 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
1178 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
1179 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
1180 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
1181 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
1182 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
1183 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
1184 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
1185 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
1186 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
1187 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
1188 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
1189 ; CHECK: [[COPY16:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1190 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
1191 ; CHECK: [[TRUNC:%[0-9]+]]:_(<16 x s16>) = G_TRUNC [[BUILD_VECTOR]](<16 x s32>)
1192 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1193 ; CHECK: G_STORE [[TRUNC]](<16 x s16>), [[DEF]](p1) :: (store 32 into `<16 x half> addrspace(1)* undef`, addrspace 1)
1194 ; CHECK: [[COPY17:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY16]]
1195 ; CHECK: S_SETPC_B64_return [[COPY17]]
1196 store <16 x half> %arg0, <16 x half> addrspace(1)* undef
1200 ; Make sure there is no alignment requirement for passed vgprs.
1201 define void @void_func_i32_i64_i32(i32 %arg0, i64 %arg1, i32 %arg2) #0 {
1202 ; CHECK-LABEL: name: void_func_i32_i64_i32
1203 ; CHECK: bb.1 (%ir-block.0):
1204 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
1205 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1206 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1207 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1208 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1209 ; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1210 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
1211 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1212 ; CHECK: [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1213 ; CHECK: G_STORE [[COPY]](s32), [[DEF]](p1) :: (volatile store 4 into `i32 addrspace(1)* undef`, addrspace 1)
1214 ; CHECK: G_STORE [[MV]](s64), [[DEF1]](p1) :: (volatile store 8 into `i64 addrspace(1)* undef`, addrspace 1)
1215 ; CHECK: G_STORE [[COPY3]](s32), [[DEF]](p1) :: (volatile store 4 into `i32 addrspace(1)* undef`, addrspace 1)
1216 ; CHECK: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
1217 ; CHECK: S_SETPC_B64_return [[COPY5]]
1218 store volatile i32 %arg0, i32 addrspace(1)* undef
1219 store volatile i64 %arg1, i64 addrspace(1)* undef
1220 store volatile i32 %arg2, i32 addrspace(1)* undef
1224 define void @void_func_struct_i32({ i32 } %arg0) #0 {
1225 ; CHECK-LABEL: name: void_func_struct_i32
1226 ; CHECK: bb.1 (%ir-block.0):
1227 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
1228 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1229 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1230 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1231 ; CHECK: G_STORE [[COPY]](s32), [[DEF]](p1) :: (store 4 into `{ i32 } addrspace(1)* undef`, addrspace 1)
1232 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1233 ; CHECK: S_SETPC_B64_return [[COPY2]]
1234 store { i32 } %arg0, { i32 } addrspace(1)* undef
1238 define void @void_func_struct_i8_i32({ i8, i32 } %arg0) #0 {
1239 ; CHECK-LABEL: name: void_func_struct_i8_i32
1240 ; CHECK: bb.1 (%ir-block.0):
1241 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
1242 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1243 ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
1244 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1245 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1246 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1247 ; CHECK: G_STORE [[TRUNC]](s8), [[DEF]](p1) :: (store 1 into `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
1248 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1249 ; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[DEF]], [[C]](s64)
1250 ; CHECK: G_STORE [[COPY1]](s32), [[GEP]](p1) :: (store 4 into `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
1251 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
1252 ; CHECK: S_SETPC_B64_return [[COPY3]]
1253 store { i8, i32 } %arg0, { i8, i32 } addrspace(1)* undef
1257 define void @void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval %arg0) #0 {
1258 ; CHECK-LABEL: name: void_func_byval_struct_i8_i32
1259 ; CHECK: bb.1 (%ir-block.0):
1260 ; CHECK: liveins: $sgpr30_sgpr31
1261 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
1262 ; CHECK: [[LOAD:%[0-9]+]]:_(p5) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 4 from %fixed-stack.0, align 1, addrspace 5)
1263 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1264 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1265 ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p5) :: (load 1 from %ir.arg0, align 4, addrspace 5)
1266 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1267 ; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_GEP [[LOAD]], [[C]](s32)
1268 ; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p5) :: (load 4 from %ir.arg0 + 4, addrspace 5)
1269 ; CHECK: G_STORE [[LOAD1]](s8), [[DEF]](p1) :: (store 1 into `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
1270 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1271 ; CHECK: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[DEF]], [[C1]](s64)
1272 ; CHECK: G_STORE [[LOAD2]](s32), [[GEP1]](p1) :: (store 4 into `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
1273 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1274 ; CHECK: S_SETPC_B64_return [[COPY1]]
1275 %arg0.load = load { i8, i32 }, { i8, i32 } addrspace(5)* %arg0
1276 store { i8, i32 } %arg0.load, { i8, i32 } addrspace(1)* undef
1280 define void @void_func_byval_struct_i8_i32_x2({ i8, i32 } addrspace(5)* byval %arg0, { i8, i32 } addrspace(5)* byval %arg1, i32 %arg2) #0 {
1281 ; CHECK-LABEL: name: void_func_byval_struct_i8_i32_x2
1282 ; CHECK: bb.1 (%ir-block.0):
1283 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
1284 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
1285 ; CHECK: [[LOAD:%[0-9]+]]:_(p5) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 4 from %fixed-stack.1, align 1, addrspace 5)
1286 ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
1287 ; CHECK: [[LOAD1:%[0-9]+]]:_(p5) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load 4 from %fixed-stack.0, align 1, addrspace 5)
1288 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1289 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1290 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1291 ; CHECK: [[DEF1:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1292 ; CHECK: [[LOAD2:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p5) :: (volatile load 1 from %ir.arg0, align 4, addrspace 5)
1293 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1294 ; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_GEP [[LOAD]], [[C]](s32)
1295 ; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p5) :: (volatile load 4 from %ir.arg0 + 4, addrspace 5)
1296 ; CHECK: [[LOAD4:%[0-9]+]]:_(s8) = G_LOAD [[LOAD1]](p5) :: (volatile load 1 from %ir.arg1, align 4, addrspace 5)
1297 ; CHECK: [[GEP1:%[0-9]+]]:_(p5) = G_GEP [[LOAD1]], [[C]](s32)
1298 ; CHECK: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p5) :: (volatile load 4 from %ir.arg1 + 4, addrspace 5)
1299 ; CHECK: G_STORE [[LOAD2]](s8), [[DEF]](p1) :: (volatile store 1 into `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
1300 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1301 ; CHECK: [[GEP2:%[0-9]+]]:_(p1) = G_GEP [[DEF]], [[C1]](s64)
1302 ; CHECK: G_STORE [[LOAD3]](s32), [[GEP2]](p1) :: (volatile store 4 into `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
1303 ; CHECK: G_STORE [[LOAD4]](s8), [[DEF]](p1) :: (volatile store 1 into `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
1304 ; CHECK: [[GEP3:%[0-9]+]]:_(p1) = G_GEP [[DEF]], [[C1]](s64)
1305 ; CHECK: G_STORE [[LOAD5]](s32), [[GEP3]](p1) :: (volatile store 4 into `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
1306 ; CHECK: G_STORE [[COPY]](s32), [[DEF1]](p3) :: (volatile store 4 into `i32 addrspace(3)* undef`, addrspace 3)
1307 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1308 ; CHECK: S_SETPC_B64_return [[COPY2]]
1309 %arg0.load = load volatile { i8, i32 }, { i8, i32 } addrspace(5)* %arg0
1310 %arg1.load = load volatile { i8, i32 }, { i8, i32 } addrspace(5)* %arg1
1311 store volatile { i8, i32 } %arg0.load, { i8, i32 } addrspace(1)* undef
1312 store volatile { i8, i32 } %arg1.load, { i8, i32 } addrspace(1)* undef
1313 store volatile i32 %arg2, i32 addrspace(3)* undef
1317 define void @void_func_byval_i32_byval_i64(i32 addrspace(5)* byval %arg0, i64 addrspace(5)* byval %arg1) #0 {
1318 ; CHECK-LABEL: name: void_func_byval_i32_byval_i64
1319 ; CHECK: bb.1 (%ir-block.0):
1320 ; CHECK: liveins: $sgpr30_sgpr31
1321 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
1322 ; CHECK: [[LOAD:%[0-9]+]]:_(p5) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 4 from %fixed-stack.1, align 1, addrspace 5)
1323 ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
1324 ; CHECK: [[LOAD1:%[0-9]+]]:_(p5) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load 4 from %fixed-stack.0, align 1, addrspace 5)
1325 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1326 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1327 ; CHECK: [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1328 ; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p5) :: (load 4 from %ir.arg0, addrspace 5)
1329 ; CHECK: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[LOAD1]](p5) :: (load 8 from %ir.arg1, addrspace 5)
1330 ; CHECK: G_STORE [[LOAD2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
1331 ; CHECK: G_STORE [[LOAD3]](s64), [[DEF1]](p1) :: (store 8 into `i64 addrspace(1)* undef`, addrspace 1)
1332 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1333 ; CHECK: S_SETPC_B64_return [[COPY1]]
1334 %arg0.load = load i32, i32 addrspace(5)* %arg0
1335 %arg1.load = load i64, i64 addrspace(5)* %arg1
1336 store i32 %arg0.load, i32 addrspace(1)* undef
1337 store i64 %arg1.load, i64 addrspace(1)* undef
1341 define void @void_func_v32i32_i32_i64(<32 x i32> %arg0, i32 %arg1, i64 %arg2) #0 {
1342 ; CHECK-LABEL: name: void_func_v32i32_i32_i64
1343 ; CHECK: bb.1 (%ir-block.0):
1344 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31
1345 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1346 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1347 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1348 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1349 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
1350 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
1351 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
1352 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
1353 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
1354 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
1355 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
1356 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
1357 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
1358 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
1359 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
1360 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
1361 ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
1362 ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
1363 ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
1364 ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
1365 ; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
1366 ; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
1367 ; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
1368 ; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
1369 ; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
1370 ; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
1371 ; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
1372 ; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
1373 ; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
1374 ; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
1375 ; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
1376 ; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31
1377 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
1378 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 4 from %fixed-stack.2, align 1, addrspace 5)
1379 ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
1380 ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load 4 from %fixed-stack.1, align 1, addrspace 5)
1381 ; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
1382 ; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load 4 from %fixed-stack.0, align 1, addrspace 5)
1383 ; CHECK: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1384 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[COPY31]](s32)
1385 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD1]](s32), [[LOAD2]](s32)
1386 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1387 ; CHECK: [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1388 ; CHECK: [[DEF2:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1389 ; CHECK: G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store 128 into `<32 x i32> addrspace(1)* undef`, addrspace 1)
1390 ; CHECK: G_STORE [[LOAD]](s32), [[DEF1]](p1) :: (volatile store 4 into `i32 addrspace(1)* undef`, addrspace 1)
1391 ; CHECK: G_STORE [[MV]](s64), [[DEF2]](p1) :: (volatile store 8 into `i64 addrspace(1)* undef`, addrspace 1)
1392 ; CHECK: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
1393 ; CHECK: S_SETPC_B64_return [[COPY33]]
1394 store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
1395 store volatile i32 %arg1, i32 addrspace(1)* undef
1396 store volatile i64 %arg2, i64 addrspace(1)* undef
1400 ; FIXME: Different ext load types on CI vs. VI
1401 define void @void_func_v32i32_i1_i8_i16(<32 x i32> %arg0, i1 %arg1, i8 %arg2, i16 %arg3, half %arg4) #0 {
1402 ; CHECK-LABEL: name: void_func_v32i32_i1_i8_i16
1403 ; CHECK: bb.1 (%ir-block.0):
1404 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31
1405 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1406 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1407 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1408 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1409 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
1410 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
1411 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
1412 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
1413 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
1414 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
1415 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
1416 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
1417 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
1418 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
1419 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
1420 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
1421 ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
1422 ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
1423 ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
1424 ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
1425 ; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
1426 ; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
1427 ; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
1428 ; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
1429 ; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
1430 ; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
1431 ; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
1432 ; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
1433 ; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
1434 ; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
1435 ; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
1436 ; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31
1437 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
1438 ; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 1 from %fixed-stack.3, addrspace 5)
1439 ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
1440 ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load 1 from %fixed-stack.2, addrspace 5)
1441 ; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
1442 ; CHECK: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load 2 from %fixed-stack.1, align 1, addrspace 5)
1443 ; CHECK: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
1444 ; CHECK: [[LOAD3:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX3]](p5) :: (invariant load 2 from %fixed-stack.0, align 1, addrspace 5)
1445 ; CHECK: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1446 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[COPY31]](s32)
1447 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1448 ; CHECK: [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1449 ; CHECK: [[DEF2:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1450 ; CHECK: [[DEF3:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1451 ; CHECK: [[DEF4:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1452 ; CHECK: G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store 128 into `<32 x i32> addrspace(1)* undef`, addrspace 1)
1453 ; CHECK: G_STORE [[LOAD]](s1), [[DEF1]](p1) :: (volatile store 1 into `i1 addrspace(1)* undef`, addrspace 1)
1454 ; CHECK: G_STORE [[LOAD1]](s8), [[DEF2]](p1) :: (volatile store 1 into `i8 addrspace(1)* undef`, addrspace 1)
1455 ; CHECK: G_STORE [[LOAD2]](s16), [[DEF3]](p1) :: (volatile store 2 into `i16 addrspace(1)* undef`, addrspace 1)
1456 ; CHECK: G_STORE [[LOAD3]](s16), [[DEF4]](p1) :: (volatile store 2 into `half addrspace(1)* undef`, addrspace 1)
1457 ; CHECK: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
1458 ; CHECK: S_SETPC_B64_return [[COPY33]]
1459 store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
1460 store volatile i1 %arg1, i1 addrspace(1)* undef
1461 store volatile i8 %arg2, i8 addrspace(1)* undef
1462 store volatile i16 %arg3, i16 addrspace(1)* undef
1463 store volatile half %arg4, half addrspace(1)* undef
1467 define void @void_func_v32i32_v2i32_v2f32(<32 x i32> %arg0, <2 x i32> %arg1, <2 x float> %arg2) #0 {
1468 ; CHECK-LABEL: name: void_func_v32i32_v2i32_v2f32
1469 ; CHECK: bb.1 (%ir-block.0):
1470 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31
1471 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1472 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1473 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1474 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1475 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
1476 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
1477 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
1478 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
1479 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
1480 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
1481 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
1482 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
1483 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
1484 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
1485 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
1486 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
1487 ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
1488 ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
1489 ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
1490 ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
1491 ; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
1492 ; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
1493 ; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
1494 ; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
1495 ; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
1496 ; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
1497 ; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
1498 ; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
1499 ; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
1500 ; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
1501 ; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
1502 ; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31
1503 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
1504 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 4 from %fixed-stack.3, align 1, addrspace 5)
1505 ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
1506 ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load 4 from %fixed-stack.2, align 1, addrspace 5)
1507 ; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
1508 ; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load 4 from %fixed-stack.1, align 1, addrspace 5)
1509 ; CHECK: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
1510 ; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p5) :: (invariant load 4 from %fixed-stack.0, align 1, addrspace 5)
1511 ; CHECK: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1512 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[COPY31]](s32)
1513 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
1514 ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD2]](s32), [[LOAD3]](s32)
1515 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1516 ; CHECK: [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1517 ; CHECK: [[DEF2:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1518 ; CHECK: G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store 128 into `<32 x i32> addrspace(1)* undef`, addrspace 1)
1519 ; CHECK: G_STORE [[BUILD_VECTOR1]](<2 x s32>), [[DEF1]](p1) :: (volatile store 8 into `<2 x i32> addrspace(1)* undef`, addrspace 1)
1520 ; CHECK: G_STORE [[BUILD_VECTOR2]](<2 x s32>), [[DEF2]](p1) :: (volatile store 8 into `<2 x float> addrspace(1)* undef`, addrspace 1)
1521 ; CHECK: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
1522 ; CHECK: S_SETPC_B64_return [[COPY33]]
1523 store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
1524 store volatile <2 x i32> %arg1, <2 x i32> addrspace(1)* undef
1525 store volatile <2 x float> %arg2, <2 x float> addrspace(1)* undef
1529 define void @void_func_v32i32_v2i16_v2f16(<32 x i32> %arg0, <2 x i16> %arg1, <2 x half> %arg2) #0 {
1530 ; CHECK-LABEL: name: void_func_v32i32_v2i16_v2f16
1531 ; CHECK: bb.1 (%ir-block.0):
1532 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31
1533 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1534 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1535 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1536 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1537 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
1538 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
1539 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
1540 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
1541 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
1542 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
1543 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
1544 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
1545 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
1546 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
1547 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
1548 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
1549 ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
1550 ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
1551 ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
1552 ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
1553 ; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
1554 ; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
1555 ; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
1556 ; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
1557 ; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
1558 ; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
1559 ; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
1560 ; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
1561 ; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
1562 ; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
1563 ; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
1564 ; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31
1565 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
1566 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 4 from %fixed-stack.3, align 1, addrspace 5)
1567 ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
1568 ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load 4 from %fixed-stack.2, align 1, addrspace 5)
1569 ; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
1570 ; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load 4 from %fixed-stack.1, align 1, addrspace 5)
1571 ; CHECK: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
1572 ; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p5) :: (invariant load 4 from %fixed-stack.0, align 1, addrspace 5)
1573 ; CHECK: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1574 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[COPY31]](s32)
1575 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
1576 ; CHECK: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR1]](<2 x s32>)
1577 ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD2]](s32), [[LOAD3]](s32)
1578 ; CHECK: [[TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR2]](<2 x s32>)
1579 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1580 ; CHECK: [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1581 ; CHECK: [[DEF2:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1582 ; CHECK: G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store 128 into `<32 x i32> addrspace(1)* undef`, addrspace 1)
1583 ; CHECK: G_STORE [[TRUNC]](<2 x s16>), [[DEF1]](p1) :: (volatile store 4 into `<2 x i16> addrspace(1)* undef`, addrspace 1)
1584 ; CHECK: G_STORE [[TRUNC1]](<2 x s16>), [[DEF2]](p1) :: (volatile store 4 into `<2 x half> addrspace(1)* undef`, addrspace 1)
1585 ; CHECK: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
1586 ; CHECK: S_SETPC_B64_return [[COPY33]]
1587 store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
1588 store volatile <2 x i16> %arg1, <2 x i16> addrspace(1)* undef
1589 store volatile <2 x half> %arg2, <2 x half> addrspace(1)* undef
1593 define void @void_func_v32i32_v2i64_v2f64(<32 x i32> %arg0, <2 x i64> %arg1, <2 x double> %arg2) #0 {
1594 ; CHECK-LABEL: name: void_func_v32i32_v2i64_v2f64
1595 ; CHECK: bb.1 (%ir-block.0):
1596 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31
1597 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1598 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1599 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1600 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1601 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
1602 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
1603 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
1604 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
1605 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
1606 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
1607 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
1608 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
1609 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
1610 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
1611 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
1612 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
1613 ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
1614 ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
1615 ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
1616 ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
1617 ; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
1618 ; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
1619 ; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
1620 ; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
1621 ; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
1622 ; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
1623 ; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
1624 ; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
1625 ; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
1626 ; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
1627 ; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
1628 ; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31
1629 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.7
1630 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 4 from %fixed-stack.7, align 1, addrspace 5)
1631 ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.6
1632 ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load 4 from %fixed-stack.6, align 1, addrspace 5)
1633 ; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.5
1634 ; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load 4 from %fixed-stack.5, align 1, addrspace 5)
1635 ; CHECK: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.4
1636 ; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p5) :: (invariant load 4 from %fixed-stack.4, align 1, addrspace 5)
1637 ; CHECK: [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
1638 ; CHECK: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p5) :: (invariant load 4 from %fixed-stack.3, align 1, addrspace 5)
1639 ; CHECK: [[FRAME_INDEX5:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
1640 ; CHECK: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p5) :: (invariant load 4 from %fixed-stack.2, align 1, addrspace 5)
1641 ; CHECK: [[FRAME_INDEX6:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
1642 ; CHECK: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p5) :: (invariant load 4 from %fixed-stack.1, align 1, addrspace 5)
1643 ; CHECK: [[FRAME_INDEX7:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
1644 ; CHECK: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p5) :: (invariant load 4 from %fixed-stack.0, align 1, addrspace 5)
1645 ; CHECK: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1646 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[COPY31]](s32)
1647 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
1648 ; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
1649 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
1650 ; CHECK: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
1651 ; CHECK: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
1652 ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV2]](s64), [[MV3]](s64)
1653 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1654 ; CHECK: [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1655 ; CHECK: [[DEF2:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1656 ; CHECK: G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store 128 into `<32 x i32> addrspace(1)* undef`, addrspace 1)
1657 ; CHECK: G_STORE [[BUILD_VECTOR1]](<2 x s64>), [[DEF1]](p1) :: (volatile store 16 into `<2 x i64> addrspace(1)* undef`, addrspace 1)
1658 ; CHECK: G_STORE [[BUILD_VECTOR2]](<2 x s64>), [[DEF2]](p1) :: (volatile store 16 into `<2 x double> addrspace(1)* undef`, addrspace 1)
1659 ; CHECK: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
1660 ; CHECK: S_SETPC_B64_return [[COPY33]]
1661 store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
1662 store volatile <2 x i64> %arg1, <2 x i64> addrspace(1)* undef
1663 store volatile <2 x double> %arg2, <2 x double> addrspace(1)* undef
1667 define void @void_func_v32i32_v4i32_v4f32(<32 x i32> %arg0, <4 x i32> %arg1, <4 x float> %arg2) #0 {
1668 ; CHECK-LABEL: name: void_func_v32i32_v4i32_v4f32
1669 ; CHECK: bb.1 (%ir-block.0):
1670 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31
1671 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1672 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1673 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1674 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1675 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
1676 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
1677 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
1678 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
1679 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
1680 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
1681 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
1682 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
1683 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
1684 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
1685 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
1686 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
1687 ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
1688 ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
1689 ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
1690 ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
1691 ; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
1692 ; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
1693 ; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
1694 ; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
1695 ; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
1696 ; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
1697 ; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
1698 ; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
1699 ; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
1700 ; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
1701 ; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
1702 ; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31
1703 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.7
1704 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 4 from %fixed-stack.7, align 1, addrspace 5)
1705 ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.6
1706 ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load 4 from %fixed-stack.6, align 1, addrspace 5)
1707 ; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.5
1708 ; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load 4 from %fixed-stack.5, align 1, addrspace 5)
1709 ; CHECK: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.4
1710 ; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p5) :: (invariant load 4 from %fixed-stack.4, align 1, addrspace 5)
1711 ; CHECK: [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
1712 ; CHECK: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p5) :: (invariant load 4 from %fixed-stack.3, align 1, addrspace 5)
1713 ; CHECK: [[FRAME_INDEX5:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
1714 ; CHECK: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p5) :: (invariant load 4 from %fixed-stack.2, align 1, addrspace 5)
1715 ; CHECK: [[FRAME_INDEX6:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
1716 ; CHECK: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p5) :: (invariant load 4 from %fixed-stack.1, align 1, addrspace 5)
1717 ; CHECK: [[FRAME_INDEX7:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
1718 ; CHECK: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p5) :: (invariant load 4 from %fixed-stack.0, align 1, addrspace 5)
1719 ; CHECK: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1720 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[COPY31]](s32)
1721 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
1722 ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
1723 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1724 ; CHECK: [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1725 ; CHECK: [[DEF2:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1726 ; CHECK: G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store 128 into `<32 x i32> addrspace(1)* undef`, addrspace 1)
1727 ; CHECK: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[DEF1]](p1) :: (volatile store 16 into `<4 x i32> addrspace(1)* undef`, addrspace 1)
1728 ; CHECK: G_STORE [[BUILD_VECTOR2]](<4 x s32>), [[DEF2]](p1) :: (volatile store 16 into `<4 x float> addrspace(1)* undef`, addrspace 1)
1729 ; CHECK: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
1730 ; CHECK: S_SETPC_B64_return [[COPY33]]
1731 store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
1732 store volatile <4 x i32> %arg1, <4 x i32> addrspace(1)* undef
1733 store volatile <4 x float> %arg2, <4 x float> addrspace(1)* undef
1737 define void @void_func_v32i32_v8i32_v8f32(<32 x i32> %arg0, <8 x i32> %arg1, <8 x float> %arg2) #0 {
1738 ; CHECK-LABEL: name: void_func_v32i32_v8i32_v8f32
1739 ; CHECK: bb.1 (%ir-block.0):
1740 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31
1741 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1742 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1743 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1744 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1745 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
1746 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
1747 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
1748 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
1749 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
1750 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
1751 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
1752 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
1753 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
1754 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
1755 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
1756 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
1757 ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
1758 ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
1759 ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
1760 ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
1761 ; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
1762 ; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
1763 ; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
1764 ; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
1765 ; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
1766 ; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
1767 ; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
1768 ; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
1769 ; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
1770 ; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
1771 ; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
1772 ; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31
1773 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.15
1774 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 4 from %fixed-stack.15, align 1, addrspace 5)
1775 ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.14
1776 ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load 4 from %fixed-stack.14, align 1, addrspace 5)
1777 ; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.13
1778 ; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load 4 from %fixed-stack.13, align 1, addrspace 5)
1779 ; CHECK: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.12
1780 ; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p5) :: (invariant load 4 from %fixed-stack.12, align 1, addrspace 5)
1781 ; CHECK: [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.11
1782 ; CHECK: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p5) :: (invariant load 4 from %fixed-stack.11, align 1, addrspace 5)
1783 ; CHECK: [[FRAME_INDEX5:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.10
1784 ; CHECK: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p5) :: (invariant load 4 from %fixed-stack.10, align 1, addrspace 5)
1785 ; CHECK: [[FRAME_INDEX6:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.9
1786 ; CHECK: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p5) :: (invariant load 4 from %fixed-stack.9, align 1, addrspace 5)
1787 ; CHECK: [[FRAME_INDEX7:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.8
1788 ; CHECK: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p5) :: (invariant load 4 from %fixed-stack.8, align 1, addrspace 5)
1789 ; CHECK: [[FRAME_INDEX8:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.7
1790 ; CHECK: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX8]](p5) :: (invariant load 4 from %fixed-stack.7, align 1, addrspace 5)
1791 ; CHECK: [[FRAME_INDEX9:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.6
1792 ; CHECK: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX9]](p5) :: (invariant load 4 from %fixed-stack.6, align 1, addrspace 5)
1793 ; CHECK: [[FRAME_INDEX10:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.5
1794 ; CHECK: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX10]](p5) :: (invariant load 4 from %fixed-stack.5, align 1, addrspace 5)
1795 ; CHECK: [[FRAME_INDEX11:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.4
1796 ; CHECK: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX11]](p5) :: (invariant load 4 from %fixed-stack.4, align 1, addrspace 5)
1797 ; CHECK: [[FRAME_INDEX12:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
1798 ; CHECK: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX12]](p5) :: (invariant load 4 from %fixed-stack.3, align 1, addrspace 5)
1799 ; CHECK: [[FRAME_INDEX13:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
1800 ; CHECK: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX13]](p5) :: (invariant load 4 from %fixed-stack.2, align 1, addrspace 5)
1801 ; CHECK: [[FRAME_INDEX14:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
1802 ; CHECK: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX14]](p5) :: (invariant load 4 from %fixed-stack.1, align 1, addrspace 5)
1803 ; CHECK: [[FRAME_INDEX15:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
1804 ; CHECK: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX15]](p5) :: (invariant load 4 from %fixed-stack.0, align 1, addrspace 5)
1805 ; CHECK: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1806 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[COPY31]](s32)
1807 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
1808 ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
1809 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1810 ; CHECK: [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1811 ; CHECK: [[DEF2:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1812 ; CHECK: G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store 128 into `<32 x i32> addrspace(1)* undef`, addrspace 1)
1813 ; CHECK: G_STORE [[BUILD_VECTOR1]](<8 x s32>), [[DEF1]](p1) :: (volatile store 32 into `<8 x i32> addrspace(1)* undef`, addrspace 1)
1814 ; CHECK: G_STORE [[BUILD_VECTOR2]](<8 x s32>), [[DEF2]](p1) :: (volatile store 32 into `<8 x float> addrspace(1)* undef`, addrspace 1)
1815 ; CHECK: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
1816 ; CHECK: S_SETPC_B64_return [[COPY33]]
1817 store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
1818 store volatile <8 x i32> %arg1, <8 x i32> addrspace(1)* undef
1819 store volatile <8 x float> %arg2, <8 x float> addrspace(1)* undef
1823 define void @void_func_v32i32_v16i32_v16f32(<32 x i32> %arg0, <16 x i32> %arg1, <16 x float> %arg2) #0 {
1824 ; CHECK-LABEL: name: void_func_v32i32_v16i32_v16f32
1825 ; CHECK: bb.1 (%ir-block.0):
1826 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31
1827 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1828 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1829 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1830 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1831 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
1832 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
1833 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
1834 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
1835 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
1836 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
1837 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
1838 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
1839 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
1840 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
1841 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
1842 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
1843 ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
1844 ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
1845 ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
1846 ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
1847 ; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
1848 ; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
1849 ; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
1850 ; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
1851 ; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
1852 ; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
1853 ; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
1854 ; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
1855 ; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
1856 ; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
1857 ; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
1858 ; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31
1859 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.31
1860 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 4 from %fixed-stack.31, align 1, addrspace 5)
1861 ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.30
1862 ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load 4 from %fixed-stack.30, align 1, addrspace 5)
1863 ; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.29
1864 ; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load 4 from %fixed-stack.29, align 1, addrspace 5)
1865 ; CHECK: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.28
1866 ; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p5) :: (invariant load 4 from %fixed-stack.28, align 1, addrspace 5)
1867 ; CHECK: [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.27
1868 ; CHECK: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p5) :: (invariant load 4 from %fixed-stack.27, align 1, addrspace 5)
1869 ; CHECK: [[FRAME_INDEX5:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.26
1870 ; CHECK: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p5) :: (invariant load 4 from %fixed-stack.26, align 1, addrspace 5)
1871 ; CHECK: [[FRAME_INDEX6:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.25
1872 ; CHECK: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p5) :: (invariant load 4 from %fixed-stack.25, align 1, addrspace 5)
1873 ; CHECK: [[FRAME_INDEX7:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.24
1874 ; CHECK: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p5) :: (invariant load 4 from %fixed-stack.24, align 1, addrspace 5)
1875 ; CHECK: [[FRAME_INDEX8:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.23
1876 ; CHECK: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX8]](p5) :: (invariant load 4 from %fixed-stack.23, align 1, addrspace 5)
1877 ; CHECK: [[FRAME_INDEX9:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.22
1878 ; CHECK: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX9]](p5) :: (invariant load 4 from %fixed-stack.22, align 1, addrspace 5)
1879 ; CHECK: [[FRAME_INDEX10:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.21
1880 ; CHECK: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX10]](p5) :: (invariant load 4 from %fixed-stack.21, align 1, addrspace 5)
1881 ; CHECK: [[FRAME_INDEX11:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.20
1882 ; CHECK: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX11]](p5) :: (invariant load 4 from %fixed-stack.20, align 1, addrspace 5)
1883 ; CHECK: [[FRAME_INDEX12:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.19
1884 ; CHECK: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX12]](p5) :: (invariant load 4 from %fixed-stack.19, align 1, addrspace 5)
1885 ; CHECK: [[FRAME_INDEX13:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.18
1886 ; CHECK: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX13]](p5) :: (invariant load 4 from %fixed-stack.18, align 1, addrspace 5)
1887 ; CHECK: [[FRAME_INDEX14:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.17
1888 ; CHECK: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX14]](p5) :: (invariant load 4 from %fixed-stack.17, align 1, addrspace 5)
1889 ; CHECK: [[FRAME_INDEX15:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.16
1890 ; CHECK: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX15]](p5) :: (invariant load 4 from %fixed-stack.16, align 1, addrspace 5)
1891 ; CHECK: [[FRAME_INDEX16:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.15
1892 ; CHECK: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX16]](p5) :: (invariant load 4 from %fixed-stack.15, align 1, addrspace 5)
1893 ; CHECK: [[FRAME_INDEX17:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.14
1894 ; CHECK: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX17]](p5) :: (invariant load 4 from %fixed-stack.14, align 1, addrspace 5)
1895 ; CHECK: [[FRAME_INDEX18:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.13
1896 ; CHECK: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX18]](p5) :: (invariant load 4 from %fixed-stack.13, align 1, addrspace 5)
1897 ; CHECK: [[FRAME_INDEX19:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.12
1898 ; CHECK: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX19]](p5) :: (invariant load 4 from %fixed-stack.12, align 1, addrspace 5)
1899 ; CHECK: [[FRAME_INDEX20:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.11
1900 ; CHECK: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX20]](p5) :: (invariant load 4 from %fixed-stack.11, align 1, addrspace 5)
1901 ; CHECK: [[FRAME_INDEX21:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.10
1902 ; CHECK: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX21]](p5) :: (invariant load 4 from %fixed-stack.10, align 1, addrspace 5)
1903 ; CHECK: [[FRAME_INDEX22:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.9
1904 ; CHECK: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX22]](p5) :: (invariant load 4 from %fixed-stack.9, align 1, addrspace 5)
1905 ; CHECK: [[FRAME_INDEX23:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.8
1906 ; CHECK: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX23]](p5) :: (invariant load 4 from %fixed-stack.8, align 1, addrspace 5)
1907 ; CHECK: [[FRAME_INDEX24:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.7
1908 ; CHECK: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX24]](p5) :: (invariant load 4 from %fixed-stack.7, align 1, addrspace 5)
1909 ; CHECK: [[FRAME_INDEX25:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.6
1910 ; CHECK: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX25]](p5) :: (invariant load 4 from %fixed-stack.6, align 1, addrspace 5)
1911 ; CHECK: [[FRAME_INDEX26:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.5
1912 ; CHECK: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX26]](p5) :: (invariant load 4 from %fixed-stack.5, align 1, addrspace 5)
1913 ; CHECK: [[FRAME_INDEX27:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.4
1914 ; CHECK: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX27]](p5) :: (invariant load 4 from %fixed-stack.4, align 1, addrspace 5)
1915 ; CHECK: [[FRAME_INDEX28:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
1916 ; CHECK: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX28]](p5) :: (invariant load 4 from %fixed-stack.3, align 1, addrspace 5)
1917 ; CHECK: [[FRAME_INDEX29:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
1918 ; CHECK: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX29]](p5) :: (invariant load 4 from %fixed-stack.2, align 1, addrspace 5)
1919 ; CHECK: [[FRAME_INDEX30:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
1920 ; CHECK: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX30]](p5) :: (invariant load 4 from %fixed-stack.1, align 1, addrspace 5)
1921 ; CHECK: [[FRAME_INDEX31:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
1922 ; CHECK: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX31]](p5) :: (invariant load 4 from %fixed-stack.0, align 1, addrspace 5)
1923 ; CHECK: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1924 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[COPY31]](s32)
1925 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
1926 ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD16]](s32), [[LOAD17]](s32), [[LOAD18]](s32), [[LOAD19]](s32), [[LOAD20]](s32), [[LOAD21]](s32), [[LOAD22]](s32), [[LOAD23]](s32), [[LOAD24]](s32), [[LOAD25]](s32), [[LOAD26]](s32), [[LOAD27]](s32), [[LOAD28]](s32), [[LOAD29]](s32), [[LOAD30]](s32), [[LOAD31]](s32)
1927 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1928 ; CHECK: [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1929 ; CHECK: [[DEF2:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1930 ; CHECK: G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store 128 into `<32 x i32> addrspace(1)* undef`, addrspace 1)
1931 ; CHECK: G_STORE [[BUILD_VECTOR1]](<16 x s32>), [[DEF1]](p1) :: (volatile store 64 into `<16 x i32> addrspace(1)* undef`, addrspace 1)
1932 ; CHECK: G_STORE [[BUILD_VECTOR2]](<16 x s32>), [[DEF2]](p1) :: (volatile store 64 into `<16 x float> addrspace(1)* undef`, addrspace 1)
1933 ; CHECK: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
1934 ; CHECK: S_SETPC_B64_return [[COPY33]]
1935 store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
1936 store volatile <16 x i32> %arg1, <16 x i32> addrspace(1)* undef
1937 store volatile <16 x float> %arg2, <16 x float> addrspace(1)* undef
1941 ; Make sure v3 isn't a wasted register because of v3 types being promoted to v4
1942 define void @void_func_v3f32_wasted_reg(<3 x float> %arg0, i32 %arg1) #0 {
1943 ; CHECK-LABEL: name: void_func_v3f32_wasted_reg
1944 ; CHECK: bb.1 (%ir-block.0):
1945 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
1946 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1947 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1948 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1949 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1950 ; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1951 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
1952 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1953 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1954 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1955 ; CHECK: [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1956 ; CHECK: [[DEF1:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1957 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[C]](s32)
1958 ; CHECK: [[EVEC1:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[C1]](s32)
1959 ; CHECK: [[EVEC2:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[C2]](s32)
1960 ; CHECK: G_STORE [[EVEC]](s32), [[DEF]](p3) :: (volatile store 4 into `float addrspace(3)* undef`, addrspace 3)
1961 ; CHECK: G_STORE [[EVEC1]](s32), [[DEF]](p3) :: (volatile store 4 into `float addrspace(3)* undef`, addrspace 3)
1962 ; CHECK: G_STORE [[EVEC2]](s32), [[DEF]](p3) :: (volatile store 4 into `float addrspace(3)* undef`, addrspace 3)
1963 ; CHECK: G_STORE [[COPY3]](s32), [[DEF1]](p3) :: (volatile store 4 into `i32 addrspace(3)* undef`, addrspace 3)
1964 ; CHECK: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
1965 ; CHECK: S_SETPC_B64_return [[COPY5]]
1966 %arg0.0 = extractelement <3 x float> %arg0, i32 0
1967 %arg0.1 = extractelement <3 x float> %arg0, i32 1
1968 %arg0.2 = extractelement <3 x float> %arg0, i32 2
1969 store volatile float %arg0.0, float addrspace(3)* undef
1970 store volatile float %arg0.1, float addrspace(3)* undef
1971 store volatile float %arg0.2, float addrspace(3)* undef
1972 store volatile i32 %arg1, i32 addrspace(3)* undef
1976 define void @void_func_v3i32_wasted_reg(<3 x i32> %arg0, i32 %arg1) #0 {
1977 ; CHECK-LABEL: name: void_func_v3i32_wasted_reg
1978 ; CHECK: bb.1 (%ir-block.0):
1979 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
1980 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1981 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1982 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1983 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1984 ; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1985 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
1986 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1987 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1988 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1989 ; CHECK: [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1990 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[C]](s32)
1991 ; CHECK: [[EVEC1:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[C1]](s32)
1992 ; CHECK: [[EVEC2:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[C2]](s32)
1993 ; CHECK: G_STORE [[EVEC]](s32), [[DEF]](p3) :: (volatile store 4 into `i32 addrspace(3)* undef`, addrspace 3)
1994 ; CHECK: G_STORE [[EVEC1]](s32), [[DEF]](p3) :: (volatile store 4 into `i32 addrspace(3)* undef`, addrspace 3)
1995 ; CHECK: G_STORE [[EVEC2]](s32), [[DEF]](p3) :: (volatile store 4 into `i32 addrspace(3)* undef`, addrspace 3)
1996 ; CHECK: G_STORE [[COPY3]](s32), [[DEF]](p3) :: (volatile store 4 into `i32 addrspace(3)* undef`, addrspace 3)
1997 ; CHECK: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
1998 ; CHECK: S_SETPC_B64_return [[COPY5]]
1999 %arg0.0 = extractelement <3 x i32> %arg0, i32 0
2000 %arg0.1 = extractelement <3 x i32> %arg0, i32 1
2001 %arg0.2 = extractelement <3 x i32> %arg0, i32 2
2002 store volatile i32 %arg0.0, i32 addrspace(3)* undef
2003 store volatile i32 %arg0.1, i32 addrspace(3)* undef
2004 store volatile i32 %arg0.2, i32 addrspace(3)* undef
2005 store volatile i32 %arg1, i32 addrspace(3)* undef
2009 ; Check there is no crash.
2010 define void @void_func_v16i8(<16 x i8> %arg0) #0 {
2011 ; CHECK-LABEL: name: void_func_v16i8
2012 ; CHECK: bb.1 (%ir-block.0):
2013 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $sgpr30_sgpr31
2014 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
2015 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
2016 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
2017 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
2018 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
2019 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
2020 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
2021 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
2022 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
2023 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
2024 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
2025 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
2026 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
2027 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
2028 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
2029 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
2030 ; CHECK: [[COPY16:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
2031 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
2032 ; CHECK: [[TRUNC:%[0-9]+]]:_(<16 x s8>) = G_TRUNC [[BUILD_VECTOR]](<16 x s32>)
2033 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
2034 ; CHECK: G_STORE [[TRUNC]](<16 x s8>), [[DEF]](p1) :: (volatile store 16 into `<16 x i8> addrspace(1)* undef`, addrspace 1)
2035 ; CHECK: [[COPY17:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY16]]
2036 ; CHECK: S_SETPC_B64_return [[COPY17]]
2037 store volatile <16 x i8> %arg0, <16 x i8> addrspace(1)* undef
2041 ; Check there is no crash.
2042 define void @void_func_v32i32_v16i8(<32 x i32> %arg0, <16 x i8> %arg1) #0 {
2043 ; CHECK-LABEL: name: void_func_v32i32_v16i8
2044 ; CHECK: bb.1 (%ir-block.0):
2045 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr30_sgpr31
2046 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
2047 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
2048 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
2049 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
2050 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
2051 ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
2052 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
2053 ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
2054 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
2055 ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
2056 ; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
2057 ; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
2058 ; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
2059 ; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
2060 ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
2061 ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
2062 ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
2063 ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
2064 ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
2065 ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
2066 ; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
2067 ; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
2068 ; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
2069 ; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
2070 ; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
2071 ; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
2072 ; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
2073 ; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
2074 ; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
2075 ; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
2076 ; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
2077 ; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr31
2078 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.15
2079 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load 4 from %fixed-stack.15, align 1, addrspace 5)
2080 ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.14
2081 ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load 4 from %fixed-stack.14, align 1, addrspace 5)
2082 ; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.13
2083 ; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load 4 from %fixed-stack.13, align 1, addrspace 5)
2084 ; CHECK: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.12
2085 ; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p5) :: (invariant load 4 from %fixed-stack.12, align 1, addrspace 5)
2086 ; CHECK: [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.11
2087 ; CHECK: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p5) :: (invariant load 4 from %fixed-stack.11, align 1, addrspace 5)
2088 ; CHECK: [[FRAME_INDEX5:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.10
2089 ; CHECK: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p5) :: (invariant load 4 from %fixed-stack.10, align 1, addrspace 5)
2090 ; CHECK: [[FRAME_INDEX6:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.9
2091 ; CHECK: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p5) :: (invariant load 4 from %fixed-stack.9, align 1, addrspace 5)
2092 ; CHECK: [[FRAME_INDEX7:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.8
2093 ; CHECK: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p5) :: (invariant load 4 from %fixed-stack.8, align 1, addrspace 5)
2094 ; CHECK: [[FRAME_INDEX8:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.7
2095 ; CHECK: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX8]](p5) :: (invariant load 4 from %fixed-stack.7, align 1, addrspace 5)
2096 ; CHECK: [[FRAME_INDEX9:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.6
2097 ; CHECK: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX9]](p5) :: (invariant load 4 from %fixed-stack.6, align 1, addrspace 5)
2098 ; CHECK: [[FRAME_INDEX10:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.5
2099 ; CHECK: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX10]](p5) :: (invariant load 4 from %fixed-stack.5, align 1, addrspace 5)
2100 ; CHECK: [[FRAME_INDEX11:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.4
2101 ; CHECK: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX11]](p5) :: (invariant load 4 from %fixed-stack.4, align 1, addrspace 5)
2102 ; CHECK: [[FRAME_INDEX12:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
2103 ; CHECK: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX12]](p5) :: (invariant load 4 from %fixed-stack.3, align 1, addrspace 5)
2104 ; CHECK: [[FRAME_INDEX13:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
2105 ; CHECK: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX13]](p5) :: (invariant load 4 from %fixed-stack.2, align 1, addrspace 5)
2106 ; CHECK: [[FRAME_INDEX14:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
2107 ; CHECK: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX14]](p5) :: (invariant load 4 from %fixed-stack.1, align 1, addrspace 5)
2108 ; CHECK: [[FRAME_INDEX15:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
2109 ; CHECK: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX15]](p5) :: (invariant load 4 from %fixed-stack.0, align 1, addrspace 5)
2110 ; CHECK: [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
2111 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[COPY31]](s32)
2112 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
2113 ; CHECK: [[TRUNC:%[0-9]+]]:_(<16 x s8>) = G_TRUNC [[BUILD_VECTOR1]](<16 x s32>)
2114 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
2115 ; CHECK: [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
2116 ; CHECK: G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store 128 into `<32 x i32> addrspace(1)* undef`, addrspace 1)
2117 ; CHECK: G_STORE [[TRUNC]](<16 x s8>), [[DEF1]](p1) :: (volatile store 16 into `<16 x i8> addrspace(1)* undef`, addrspace 1)
2118 ; CHECK: [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
2119 ; CHECK: S_SETPC_B64_return [[COPY33]]
2120 store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
2121 store volatile <16 x i8> %arg1, <16 x i8> addrspace(1)* undef
2125 attributes #0 = { nounwind }