1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -global-isel -global-isel-abort=0 -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -verify-machineinstrs -o - %s | FileCheck -enable-var-scope %s
4 define i1 @i1_func_void() #0 {
5 ; CHECK-LABEL: name: i1_func_void
6 ; CHECK: bb.1 (%ir-block.0):
7 ; CHECK: liveins: $sgpr30_sgpr31
8 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
9 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
10 ; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load 1 from `i1 addrspace(1)* undef`, addrspace 1)
11 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s1)
12 ; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
13 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
14 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
15 %val = load i1, i1 addrspace(1)* undef
19 define zeroext i1 @i1_zeroext_func_void() #0 {
20 ; CHECK-LABEL: name: i1_zeroext_func_void
21 ; CHECK: bb.1 (%ir-block.0):
22 ; CHECK: liveins: $sgpr30_sgpr31
23 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
24 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
25 ; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load 1 from `i1 addrspace(1)* undef`, addrspace 1)
26 ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s1)
27 ; CHECK: $vgpr0 = COPY [[ZEXT]](s32)
28 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
29 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
30 %val = load i1, i1 addrspace(1)* undef
34 define signext i1 @i1_signext_func_void() #0 {
35 ; CHECK-LABEL: name: i1_signext_func_void
36 ; CHECK: bb.1 (%ir-block.0):
37 ; CHECK: liveins: $sgpr30_sgpr31
38 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
39 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
40 ; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load 1 from `i1 addrspace(1)* undef`, addrspace 1)
41 ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s1)
42 ; CHECK: $vgpr0 = COPY [[SEXT]](s32)
43 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
44 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
45 %val = load i1, i1 addrspace(1)* undef
49 define i8 @i8_func_void() #0 {
50 ; CHECK-LABEL: name: i8_func_void
52 ; CHECK: successors: %bb.1(0x80000000)
53 ; CHECK: liveins: $sgpr30_sgpr31
54 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
55 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
56 ; CHECK: bb.1 (%ir-block.0):
57 ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load 1 from `i8 addrspace(1)* undef`, addrspace 1)
58 %val = load i8, i8 addrspace(1)* undef
62 define zeroext i8 @i8_zeroext_func_void() #0 {
63 ; CHECK-LABEL: name: i8_zeroext_func_void
65 ; CHECK: successors: %bb.1(0x80000000)
66 ; CHECK: liveins: $sgpr30_sgpr31
67 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
68 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
69 ; CHECK: bb.1 (%ir-block.0):
70 ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load 1 from `i8 addrspace(1)* undef`, addrspace 1)
71 %val = load i8, i8 addrspace(1)* undef
75 define signext i8 @i8_signext_func_void() #0 {
76 ; CHECK-LABEL: name: i8_signext_func_void
78 ; CHECK: successors: %bb.1(0x80000000)
79 ; CHECK: liveins: $sgpr30_sgpr31
80 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
81 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
82 ; CHECK: bb.1 (%ir-block.0):
83 ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load 1 from `i8 addrspace(1)* undef`, addrspace 1)
84 %val = load i8, i8 addrspace(1)* undef
88 define i16 @i16_func_void() #0 {
89 ; CHECK-LABEL: name: i16_func_void
90 ; CHECK: bb.1 (%ir-block.0):
91 ; CHECK: liveins: $sgpr30_sgpr31
92 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
93 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
94 ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load 2 from `i16 addrspace(1)* undef`, addrspace 1)
95 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
96 ; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
97 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
98 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
99 %val = load i16, i16 addrspace(1)* undef
103 define zeroext i16 @i16_zeroext_func_void() #0 {
104 ; CHECK-LABEL: name: i16_zeroext_func_void
105 ; CHECK: bb.1 (%ir-block.0):
106 ; CHECK: liveins: $sgpr30_sgpr31
107 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
108 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
109 ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load 2 from `i16 addrspace(1)* undef`, addrspace 1)
110 ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s16)
111 ; CHECK: $vgpr0 = COPY [[ZEXT]](s32)
112 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
113 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
114 %val = load i16, i16 addrspace(1)* undef
118 define signext i16 @i16_signext_func_void() #0 {
119 ; CHECK-LABEL: name: i16_signext_func_void
120 ; CHECK: bb.1 (%ir-block.0):
121 ; CHECK: liveins: $sgpr30_sgpr31
122 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
123 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
124 ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load 2 from `i16 addrspace(1)* undef`, addrspace 1)
125 ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s16)
126 ; CHECK: $vgpr0 = COPY [[SEXT]](s32)
127 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
128 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
129 %val = load i16, i16 addrspace(1)* undef
133 define i32 @i32_func_void() #0 {
134 ; CHECK-LABEL: name: i32_func_void
135 ; CHECK: bb.1 (%ir-block.0):
136 ; CHECK: liveins: $sgpr30_sgpr31
137 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
138 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
139 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load 4 from `i32 addrspace(1)* undef`, addrspace 1)
140 ; CHECK: $vgpr0 = COPY [[LOAD]](s32)
141 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
142 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
143 %val = load i32, i32 addrspace(1)* undef
147 define i48 @i48_func_void() #0 {
148 ; CHECK-LABEL: name: i48_func_void
149 ; CHECK: bb.1 (%ir-block.0):
150 ; CHECK: liveins: $sgpr30_sgpr31
151 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
152 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
153 ; CHECK: [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load 6 from `i48 addrspace(1)* undef`, align 8, addrspace 1)
154 ; CHECK: [[DEF1:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
155 ; CHECK: [[INSERT:%[0-9]+]]:_(s64) = G_INSERT [[DEF1]], [[LOAD]](s48), 0
156 ; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[INSERT]](s64), 0
157 ; CHECK: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INSERT]](s64), 32
158 ; CHECK: $vgpr0 = COPY [[EXTRACT]](s32)
159 ; CHECK: $vgpr1 = COPY [[EXTRACT1]](s32)
160 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
161 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
162 %val = load i48, i48 addrspace(1)* undef, align 8
166 define i64 @i64_func_void() #0 {
167 ; CHECK-LABEL: name: i64_func_void
168 ; CHECK: bb.1 (%ir-block.0):
169 ; CHECK: liveins: $sgpr30_sgpr31
170 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
171 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
172 ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p1) :: (load 8 from `i64 addrspace(1)* undef`, addrspace 1)
173 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
174 ; CHECK: $vgpr0 = COPY [[UV]](s32)
175 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
176 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
177 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
178 %val = load i64, i64 addrspace(1)* undef
182 define i65 @i65_func_void() #0 {
183 ; CHECK-LABEL: name: i65_func_void
184 ; CHECK: bb.1 (%ir-block.0):
185 ; CHECK: liveins: $sgpr30_sgpr31
186 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
187 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
188 ; CHECK: [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load 9 from `i65 addrspace(1)* undef`, align 8, addrspace 1)
189 ; CHECK: [[DEF1:%[0-9]+]]:_(s96) = G_IMPLICIT_DEF
190 ; CHECK: [[INSERT:%[0-9]+]]:_(s96) = G_INSERT [[DEF1]], [[LOAD]](s65), 0
191 ; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[INSERT]](s96), 0
192 ; CHECK: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INSERT]](s96), 32
193 ; CHECK: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[INSERT]](s96), 64
194 ; CHECK: $vgpr0 = COPY [[EXTRACT]](s32)
195 ; CHECK: $vgpr1 = COPY [[EXTRACT1]](s32)
196 ; CHECK: $vgpr2 = COPY [[EXTRACT2]](s32)
197 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
198 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
199 %val = load i65, i65 addrspace(1)* undef
203 define float @f32_func_void() #0 {
204 ; CHECK-LABEL: name: f32_func_void
205 ; CHECK: bb.1 (%ir-block.0):
206 ; CHECK: liveins: $sgpr30_sgpr31
207 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
208 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
209 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load 4 from `float addrspace(1)* undef`, addrspace 1)
210 ; CHECK: $vgpr0 = COPY [[LOAD]](s32)
211 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
212 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
213 %val = load float, float addrspace(1)* undef
217 define double @f64_func_void() #0 {
218 ; CHECK-LABEL: name: f64_func_void
219 ; CHECK: bb.1 (%ir-block.0):
220 ; CHECK: liveins: $sgpr30_sgpr31
221 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
222 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
223 ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p1) :: (load 8 from `double addrspace(1)* undef`, addrspace 1)
224 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
225 ; CHECK: $vgpr0 = COPY [[UV]](s32)
226 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
227 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
228 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
229 %val = load double, double addrspace(1)* undef
233 define <2 x double> @v2f64_func_void() #0 {
234 ; CHECK-LABEL: name: v2f64_func_void
235 ; CHECK: bb.1 (%ir-block.0):
236 ; CHECK: liveins: $sgpr30_sgpr31
237 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
238 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
239 ; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[DEF]](p1) :: (load 16 from `<2 x double> addrspace(1)* undef`, addrspace 1)
240 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
241 ; CHECK: $vgpr0 = COPY [[UV]](s32)
242 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
243 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
244 ; CHECK: $vgpr3 = COPY [[UV3]](s32)
245 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
246 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
247 %val = load <2 x double>, <2 x double> addrspace(1)* undef
248 ret <2 x double> %val
251 define <2 x i32> @v2i32_func_void() #0 {
252 ; CHECK-LABEL: name: v2i32_func_void
253 ; CHECK: bb.1 (%ir-block.0):
254 ; CHECK: liveins: $sgpr30_sgpr31
255 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
256 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
257 ; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[DEF]](p1) :: (load 8 from `<2 x i32> addrspace(1)* undef`, addrspace 1)
258 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
259 ; CHECK: $vgpr0 = COPY [[UV]](s32)
260 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
261 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
262 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
263 %val = load <2 x i32>, <2 x i32> addrspace(1)* undef
267 define <3 x i32> @v3i32_func_void() #0 {
268 ; CHECK-LABEL: name: v3i32_func_void
269 ; CHECK: bb.1 (%ir-block.0):
270 ; CHECK: liveins: $sgpr30_sgpr31
271 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
272 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
273 ; CHECK: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[DEF]](p1) :: (load 12 from `<3 x i32> addrspace(1)* undef`, align 16, addrspace 1)
274 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<3 x s32>)
275 ; CHECK: $vgpr0 = COPY [[UV]](s32)
276 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
277 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
278 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
279 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
280 %val = load <3 x i32>, <3 x i32> addrspace(1)* undef
284 define <4 x i32> @v4i32_func_void() #0 {
285 ; CHECK-LABEL: name: v4i32_func_void
286 ; CHECK: bb.1 (%ir-block.0):
287 ; CHECK: liveins: $sgpr30_sgpr31
288 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
289 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
290 ; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[DEF]](p1) :: (load 16 from `<4 x i32> addrspace(1)* undef`, addrspace 1)
291 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
292 ; CHECK: $vgpr0 = COPY [[UV]](s32)
293 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
294 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
295 ; CHECK: $vgpr3 = COPY [[UV3]](s32)
296 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
297 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
298 %val = load <4 x i32>, <4 x i32> addrspace(1)* undef
302 define <5 x i32> @v5i32_func_void() #0 {
303 ; CHECK-LABEL: name: v5i32_func_void
304 ; CHECK: bb.1 (%ir-block.0):
305 ; CHECK: liveins: $sgpr30_sgpr31
306 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
307 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
308 ; CHECK: [[LOAD:%[0-9]+]]:_(<5 x s32>) = G_LOAD [[DEF]](p1) :: (volatile load 20 from `<5 x i32> addrspace(1)* undef`, align 32, addrspace 1)
309 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<5 x s32>)
310 ; CHECK: $vgpr0 = COPY [[UV]](s32)
311 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
312 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
313 ; CHECK: $vgpr3 = COPY [[UV3]](s32)
314 ; CHECK: $vgpr4 = COPY [[UV4]](s32)
315 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
316 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4
317 %val = load volatile <5 x i32>, <5 x i32> addrspace(1)* undef
321 define <8 x i32> @v8i32_func_void() #0 {
322 ; CHECK-LABEL: name: v8i32_func_void
323 ; CHECK: bb.1 (%ir-block.0):
324 ; CHECK: liveins: $sgpr30_sgpr31
325 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
326 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
327 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<8 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
328 ; CHECK: [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[LOAD]](p1) :: (load 32 from %ir.ptr, addrspace 1)
329 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<8 x s32>)
330 ; CHECK: $vgpr0 = COPY [[UV]](s32)
331 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
332 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
333 ; CHECK: $vgpr3 = COPY [[UV3]](s32)
334 ; CHECK: $vgpr4 = COPY [[UV4]](s32)
335 ; CHECK: $vgpr5 = COPY [[UV5]](s32)
336 ; CHECK: $vgpr6 = COPY [[UV6]](s32)
337 ; CHECK: $vgpr7 = COPY [[UV7]](s32)
338 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
339 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
340 %ptr = load volatile <8 x i32> addrspace(1)*, <8 x i32> addrspace(1)* addrspace(4)* undef
341 %val = load <8 x i32>, <8 x i32> addrspace(1)* %ptr
345 define <16 x i32> @v16i32_func_void() #0 {
346 ; CHECK-LABEL: name: v16i32_func_void
347 ; CHECK: bb.1 (%ir-block.0):
348 ; CHECK: liveins: $sgpr30_sgpr31
349 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
350 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
351 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<16 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
352 ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[LOAD]](p1) :: (load 64 from %ir.ptr, addrspace 1)
353 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s32>)
354 ; CHECK: $vgpr0 = COPY [[UV]](s32)
355 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
356 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
357 ; CHECK: $vgpr3 = COPY [[UV3]](s32)
358 ; CHECK: $vgpr4 = COPY [[UV4]](s32)
359 ; CHECK: $vgpr5 = COPY [[UV5]](s32)
360 ; CHECK: $vgpr6 = COPY [[UV6]](s32)
361 ; CHECK: $vgpr7 = COPY [[UV7]](s32)
362 ; CHECK: $vgpr8 = COPY [[UV8]](s32)
363 ; CHECK: $vgpr9 = COPY [[UV9]](s32)
364 ; CHECK: $vgpr10 = COPY [[UV10]](s32)
365 ; CHECK: $vgpr11 = COPY [[UV11]](s32)
366 ; CHECK: $vgpr12 = COPY [[UV12]](s32)
367 ; CHECK: $vgpr13 = COPY [[UV13]](s32)
368 ; CHECK: $vgpr14 = COPY [[UV14]](s32)
369 ; CHECK: $vgpr15 = COPY [[UV15]](s32)
370 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
371 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
372 %ptr = load volatile <16 x i32> addrspace(1)*, <16 x i32> addrspace(1)* addrspace(4)* undef
373 %val = load <16 x i32>, <16 x i32> addrspace(1)* %ptr
377 define <32 x i32> @v32i32_func_void() #0 {
378 ; CHECK-LABEL: name: v32i32_func_void
379 ; CHECK: bb.1 (%ir-block.0):
380 ; CHECK: liveins: $sgpr30_sgpr31
381 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
382 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
383 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<32 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
384 ; CHECK: [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load 128 from %ir.ptr, addrspace 1)
385 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<32 x s32>)
386 ; CHECK: $vgpr0 = COPY [[UV]](s32)
387 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
388 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
389 ; CHECK: $vgpr3 = COPY [[UV3]](s32)
390 ; CHECK: $vgpr4 = COPY [[UV4]](s32)
391 ; CHECK: $vgpr5 = COPY [[UV5]](s32)
392 ; CHECK: $vgpr6 = COPY [[UV6]](s32)
393 ; CHECK: $vgpr7 = COPY [[UV7]](s32)
394 ; CHECK: $vgpr8 = COPY [[UV8]](s32)
395 ; CHECK: $vgpr9 = COPY [[UV9]](s32)
396 ; CHECK: $vgpr10 = COPY [[UV10]](s32)
397 ; CHECK: $vgpr11 = COPY [[UV11]](s32)
398 ; CHECK: $vgpr12 = COPY [[UV12]](s32)
399 ; CHECK: $vgpr13 = COPY [[UV13]](s32)
400 ; CHECK: $vgpr14 = COPY [[UV14]](s32)
401 ; CHECK: $vgpr15 = COPY [[UV15]](s32)
402 ; CHECK: $vgpr16 = COPY [[UV16]](s32)
403 ; CHECK: $vgpr17 = COPY [[UV17]](s32)
404 ; CHECK: $vgpr18 = COPY [[UV18]](s32)
405 ; CHECK: $vgpr19 = COPY [[UV19]](s32)
406 ; CHECK: $vgpr20 = COPY [[UV20]](s32)
407 ; CHECK: $vgpr21 = COPY [[UV21]](s32)
408 ; CHECK: $vgpr22 = COPY [[UV22]](s32)
409 ; CHECK: $vgpr23 = COPY [[UV23]](s32)
410 ; CHECK: $vgpr24 = COPY [[UV24]](s32)
411 ; CHECK: $vgpr25 = COPY [[UV25]](s32)
412 ; CHECK: $vgpr26 = COPY [[UV26]](s32)
413 ; CHECK: $vgpr27 = COPY [[UV27]](s32)
414 ; CHECK: $vgpr28 = COPY [[UV28]](s32)
415 ; CHECK: $vgpr29 = COPY [[UV29]](s32)
416 ; CHECK: $vgpr30 = COPY [[UV30]](s32)
417 ; CHECK: $vgpr31 = COPY [[UV31]](s32)
418 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
419 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
420 %ptr = load volatile <32 x i32> addrspace(1)*, <32 x i32> addrspace(1)* addrspace(4)* undef
421 %val = load <32 x i32>, <32 x i32> addrspace(1)* %ptr
425 define <2 x i64> @v2i64_func_void() #0 {
426 ; CHECK-LABEL: name: v2i64_func_void
427 ; CHECK: bb.1 (%ir-block.0):
428 ; CHECK: liveins: $sgpr30_sgpr31
429 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
430 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
431 ; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[DEF]](p1) :: (load 16 from `<2 x i64> addrspace(1)* undef`, addrspace 1)
432 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
433 ; CHECK: $vgpr0 = COPY [[UV]](s32)
434 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
435 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
436 ; CHECK: $vgpr3 = COPY [[UV3]](s32)
437 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
438 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
439 %val = load <2 x i64>, <2 x i64> addrspace(1)* undef
443 define <3 x i64> @v3i64_func_void() #0 {
444 ; CHECK-LABEL: name: v3i64_func_void
445 ; CHECK: bb.1 (%ir-block.0):
446 ; CHECK: liveins: $sgpr30_sgpr31
447 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
448 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
449 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<3 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
450 ; CHECK: [[LOAD1:%[0-9]+]]:_(<3 x s64>) = G_LOAD [[LOAD]](p1) :: (load 24 from %ir.ptr, align 32, addrspace 1)
451 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s64>)
452 ; CHECK: $vgpr0 = COPY [[UV]](s32)
453 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
454 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
455 ; CHECK: $vgpr3 = COPY [[UV3]](s32)
456 ; CHECK: $vgpr4 = COPY [[UV4]](s32)
457 ; CHECK: $vgpr5 = COPY [[UV5]](s32)
458 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
459 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
460 %ptr = load volatile <3 x i64> addrspace(1)*, <3 x i64> addrspace(1)* addrspace(4)* undef
461 %val = load <3 x i64>, <3 x i64> addrspace(1)* %ptr
465 define <4 x i64> @v4i64_func_void() #0 {
466 ; CHECK-LABEL: name: v4i64_func_void
467 ; CHECK: bb.1 (%ir-block.0):
468 ; CHECK: liveins: $sgpr30_sgpr31
469 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
470 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
471 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<4 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
472 ; CHECK: [[LOAD1:%[0-9]+]]:_(<4 x s64>) = G_LOAD [[LOAD]](p1) :: (load 32 from %ir.ptr, addrspace 1)
473 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<4 x s64>)
474 ; CHECK: $vgpr0 = COPY [[UV]](s32)
475 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
476 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
477 ; CHECK: $vgpr3 = COPY [[UV3]](s32)
478 ; CHECK: $vgpr4 = COPY [[UV4]](s32)
479 ; CHECK: $vgpr5 = COPY [[UV5]](s32)
480 ; CHECK: $vgpr6 = COPY [[UV6]](s32)
481 ; CHECK: $vgpr7 = COPY [[UV7]](s32)
482 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
483 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
484 %ptr = load volatile <4 x i64> addrspace(1)*, <4 x i64> addrspace(1)* addrspace(4)* undef
485 %val = load <4 x i64>, <4 x i64> addrspace(1)* %ptr
489 define <5 x i64> @v5i64_func_void() #0 {
490 ; CHECK-LABEL: name: v5i64_func_void
491 ; CHECK: bb.1 (%ir-block.0):
492 ; CHECK: liveins: $sgpr30_sgpr31
493 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
494 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
495 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<5 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
496 ; CHECK: [[LOAD1:%[0-9]+]]:_(<5 x s64>) = G_LOAD [[LOAD]](p1) :: (load 40 from %ir.ptr, align 64, addrspace 1)
497 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<5 x s64>)
498 ; CHECK: $vgpr0 = COPY [[UV]](s32)
499 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
500 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
501 ; CHECK: $vgpr3 = COPY [[UV3]](s32)
502 ; CHECK: $vgpr4 = COPY [[UV4]](s32)
503 ; CHECK: $vgpr5 = COPY [[UV5]](s32)
504 ; CHECK: $vgpr6 = COPY [[UV6]](s32)
505 ; CHECK: $vgpr7 = COPY [[UV7]](s32)
506 ; CHECK: $vgpr8 = COPY [[UV8]](s32)
507 ; CHECK: $vgpr9 = COPY [[UV9]](s32)
508 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
509 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9
510 %ptr = load volatile <5 x i64> addrspace(1)*, <5 x i64> addrspace(1)* addrspace(4)* undef
511 %val = load <5 x i64>, <5 x i64> addrspace(1)* %ptr
515 define <8 x i64> @v8i64_func_void() #0 {
516 ; CHECK-LABEL: name: v8i64_func_void
517 ; CHECK: bb.1 (%ir-block.0):
518 ; CHECK: liveins: $sgpr30_sgpr31
519 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
520 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
521 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<8 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
522 ; CHECK: [[LOAD1:%[0-9]+]]:_(<8 x s64>) = G_LOAD [[LOAD]](p1) :: (load 64 from %ir.ptr, addrspace 1)
523 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<8 x s64>)
524 ; CHECK: $vgpr0 = COPY [[UV]](s32)
525 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
526 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
527 ; CHECK: $vgpr3 = COPY [[UV3]](s32)
528 ; CHECK: $vgpr4 = COPY [[UV4]](s32)
529 ; CHECK: $vgpr5 = COPY [[UV5]](s32)
530 ; CHECK: $vgpr6 = COPY [[UV6]](s32)
531 ; CHECK: $vgpr7 = COPY [[UV7]](s32)
532 ; CHECK: $vgpr8 = COPY [[UV8]](s32)
533 ; CHECK: $vgpr9 = COPY [[UV9]](s32)
534 ; CHECK: $vgpr10 = COPY [[UV10]](s32)
535 ; CHECK: $vgpr11 = COPY [[UV11]](s32)
536 ; CHECK: $vgpr12 = COPY [[UV12]](s32)
537 ; CHECK: $vgpr13 = COPY [[UV13]](s32)
538 ; CHECK: $vgpr14 = COPY [[UV14]](s32)
539 ; CHECK: $vgpr15 = COPY [[UV15]](s32)
540 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
541 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
542 %ptr = load volatile <8 x i64> addrspace(1)*, <8 x i64> addrspace(1)* addrspace(4)* undef
543 %val = load <8 x i64>, <8 x i64> addrspace(1)* %ptr
547 define <16 x i64> @v16i64_func_void() #0 {
548 ; CHECK-LABEL: name: v16i64_func_void
549 ; CHECK: bb.1 (%ir-block.0):
550 ; CHECK: liveins: $sgpr30_sgpr31
551 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
552 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
553 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<16 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
554 ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s64>) = G_LOAD [[LOAD]](p1) :: (load 128 from %ir.ptr, addrspace 1)
555 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s64>)
556 ; CHECK: $vgpr0 = COPY [[UV]](s32)
557 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
558 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
559 ; CHECK: $vgpr3 = COPY [[UV3]](s32)
560 ; CHECK: $vgpr4 = COPY [[UV4]](s32)
561 ; CHECK: $vgpr5 = COPY [[UV5]](s32)
562 ; CHECK: $vgpr6 = COPY [[UV6]](s32)
563 ; CHECK: $vgpr7 = COPY [[UV7]](s32)
564 ; CHECK: $vgpr8 = COPY [[UV8]](s32)
565 ; CHECK: $vgpr9 = COPY [[UV9]](s32)
566 ; CHECK: $vgpr10 = COPY [[UV10]](s32)
567 ; CHECK: $vgpr11 = COPY [[UV11]](s32)
568 ; CHECK: $vgpr12 = COPY [[UV12]](s32)
569 ; CHECK: $vgpr13 = COPY [[UV13]](s32)
570 ; CHECK: $vgpr14 = COPY [[UV14]](s32)
571 ; CHECK: $vgpr15 = COPY [[UV15]](s32)
572 ; CHECK: $vgpr16 = COPY [[UV16]](s32)
573 ; CHECK: $vgpr17 = COPY [[UV17]](s32)
574 ; CHECK: $vgpr18 = COPY [[UV18]](s32)
575 ; CHECK: $vgpr19 = COPY [[UV19]](s32)
576 ; CHECK: $vgpr20 = COPY [[UV20]](s32)
577 ; CHECK: $vgpr21 = COPY [[UV21]](s32)
578 ; CHECK: $vgpr22 = COPY [[UV22]](s32)
579 ; CHECK: $vgpr23 = COPY [[UV23]](s32)
580 ; CHECK: $vgpr24 = COPY [[UV24]](s32)
581 ; CHECK: $vgpr25 = COPY [[UV25]](s32)
582 ; CHECK: $vgpr26 = COPY [[UV26]](s32)
583 ; CHECK: $vgpr27 = COPY [[UV27]](s32)
584 ; CHECK: $vgpr28 = COPY [[UV28]](s32)
585 ; CHECK: $vgpr29 = COPY [[UV29]](s32)
586 ; CHECK: $vgpr30 = COPY [[UV30]](s32)
587 ; CHECK: $vgpr31 = COPY [[UV31]](s32)
588 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
589 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
590 %ptr = load volatile <16 x i64> addrspace(1)*, <16 x i64> addrspace(1)* addrspace(4)* undef
591 %val = load <16 x i64>, <16 x i64> addrspace(1)* %ptr
595 define <2 x i16> @v2i16_func_void() #0 {
596 ; CHECK-LABEL: name: v2i16_func_void
597 ; CHECK: bb.1 (%ir-block.0):
598 ; CHECK: liveins: $sgpr30_sgpr31
599 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
600 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
601 ; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[DEF]](p1) :: (load 4 from `<2 x i16> addrspace(1)* undef`, addrspace 1)
602 ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<2 x s16>)
603 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
604 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
605 ; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
606 ; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
607 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
608 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
609 %val = load <2 x i16>, <2 x i16> addrspace(1)* undef
613 define <3 x i16> @v3i16_func_void() #0 {
614 ; CHECK-LABEL: name: v3i16_func_void
615 ; CHECK: bb.1 (%ir-block.0):
616 ; CHECK: liveins: $sgpr30_sgpr31
617 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
618 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
619 ; CHECK: [[LOAD:%[0-9]+]]:_(<3 x s16>) = G_LOAD [[DEF]](p1) :: (load 6 from `<3 x i16> addrspace(1)* undef`, align 8, addrspace 1)
620 ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<3 x s16>)
621 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
622 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
623 ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
624 ; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
625 ; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
626 ; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
627 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
628 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
629 %val = load <3 x i16>, <3 x i16> addrspace(1)* undef
633 define <4 x i16> @v4i16_func_void() #0 {
634 ; CHECK-LABEL: name: v4i16_func_void
635 ; CHECK: bb.1 (%ir-block.0):
636 ; CHECK: liveins: $sgpr30_sgpr31
637 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
638 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
639 ; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[DEF]](p1) :: (load 8 from `<4 x i16> addrspace(1)* undef`, addrspace 1)
640 ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
641 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
642 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
643 ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
644 ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
645 ; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
646 ; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
647 ; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
648 ; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
649 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
650 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
651 %val = load <4 x i16>, <4 x i16> addrspace(1)* undef
655 define <4 x half> @v4f16_func_void() #0 {
656 ; CHECK-LABEL: name: v4f16_func_void
657 ; CHECK: bb.1 (%ir-block.0):
658 ; CHECK: liveins: $sgpr30_sgpr31
659 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
660 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
661 ; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[DEF]](p1) :: (load 8 from `<4 x half> addrspace(1)* undef`, addrspace 1)
662 ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
663 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
664 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
665 ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
666 ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
667 ; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
668 ; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
669 ; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
670 ; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
671 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
672 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
673 %val = load <4 x half>, <4 x half> addrspace(1)* undef
677 define <5 x i16> @v5i16_func_void() #0 {
678 ; CHECK-LABEL: name: v5i16_func_void
679 ; CHECK: bb.1 (%ir-block.0):
680 ; CHECK: liveins: $sgpr30_sgpr31
681 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
682 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
683 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<5 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
684 ; CHECK: [[LOAD1:%[0-9]+]]:_(<5 x s16>) = G_LOAD [[LOAD]](p1) :: (load 10 from %ir.ptr, align 16, addrspace 1)
685 ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD1]](<5 x s16>)
686 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
687 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
688 ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
689 ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
690 ; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
691 ; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
692 ; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
693 ; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
694 ; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
695 ; CHECK: $vgpr4 = COPY [[ANYEXT4]](s32)
696 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
697 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4
698 %ptr = load volatile <5 x i16> addrspace(1)*, <5 x i16> addrspace(1)* addrspace(4)* undef
699 %val = load <5 x i16>, <5 x i16> addrspace(1)* %ptr
703 define <8 x i16> @v8i16_func_void() #0 {
704 ; CHECK-LABEL: name: v8i16_func_void
705 ; CHECK: bb.1 (%ir-block.0):
706 ; CHECK: liveins: $sgpr30_sgpr31
707 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
708 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
709 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<8 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
710 ; CHECK: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[LOAD]](p1) :: (load 16 from %ir.ptr, addrspace 1)
711 ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD1]](<8 x s16>)
712 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
713 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
714 ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
715 ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
716 ; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
717 ; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s16)
718 ; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s16)
719 ; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s16)
720 ; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
721 ; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
722 ; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
723 ; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
724 ; CHECK: $vgpr4 = COPY [[ANYEXT4]](s32)
725 ; CHECK: $vgpr5 = COPY [[ANYEXT5]](s32)
726 ; CHECK: $vgpr6 = COPY [[ANYEXT6]](s32)
727 ; CHECK: $vgpr7 = COPY [[ANYEXT7]](s32)
728 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
729 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
730 %ptr = load volatile <8 x i16> addrspace(1)*, <8 x i16> addrspace(1)* addrspace(4)* undef
731 %val = load <8 x i16>, <8 x i16> addrspace(1)* %ptr
735 define <16 x i16> @v16i16_func_void() #0 {
736 ; CHECK-LABEL: name: v16i16_func_void
737 ; CHECK: bb.1 (%ir-block.0):
738 ; CHECK: liveins: $sgpr30_sgpr31
739 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
740 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
741 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<16 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
742 ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s16>) = G_LOAD [[LOAD]](p1) :: (load 32 from %ir.ptr, addrspace 1)
743 ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16), [[UV12:%[0-9]+]]:_(s16), [[UV13:%[0-9]+]]:_(s16), [[UV14:%[0-9]+]]:_(s16), [[UV15:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD1]](<16 x s16>)
744 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
745 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
746 ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
747 ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
748 ; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
749 ; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s16)
750 ; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s16)
751 ; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s16)
752 ; CHECK: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s16)
753 ; CHECK: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s16)
754 ; CHECK: [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[UV10]](s16)
755 ; CHECK: [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[UV11]](s16)
756 ; CHECK: [[ANYEXT12:%[0-9]+]]:_(s32) = G_ANYEXT [[UV12]](s16)
757 ; CHECK: [[ANYEXT13:%[0-9]+]]:_(s32) = G_ANYEXT [[UV13]](s16)
758 ; CHECK: [[ANYEXT14:%[0-9]+]]:_(s32) = G_ANYEXT [[UV14]](s16)
759 ; CHECK: [[ANYEXT15:%[0-9]+]]:_(s32) = G_ANYEXT [[UV15]](s16)
760 ; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
761 ; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
762 ; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
763 ; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
764 ; CHECK: $vgpr4 = COPY [[ANYEXT4]](s32)
765 ; CHECK: $vgpr5 = COPY [[ANYEXT5]](s32)
766 ; CHECK: $vgpr6 = COPY [[ANYEXT6]](s32)
767 ; CHECK: $vgpr7 = COPY [[ANYEXT7]](s32)
768 ; CHECK: $vgpr8 = COPY [[ANYEXT8]](s32)
769 ; CHECK: $vgpr9 = COPY [[ANYEXT9]](s32)
770 ; CHECK: $vgpr10 = COPY [[ANYEXT10]](s32)
771 ; CHECK: $vgpr11 = COPY [[ANYEXT11]](s32)
772 ; CHECK: $vgpr12 = COPY [[ANYEXT12]](s32)
773 ; CHECK: $vgpr13 = COPY [[ANYEXT13]](s32)
774 ; CHECK: $vgpr14 = COPY [[ANYEXT14]](s32)
775 ; CHECK: $vgpr15 = COPY [[ANYEXT15]](s32)
776 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
777 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
778 %ptr = load volatile <16 x i16> addrspace(1)*, <16 x i16> addrspace(1)* addrspace(4)* undef
779 %val = load <16 x i16>, <16 x i16> addrspace(1)* %ptr
783 define <16 x i8> @v16i8_func_void() #0 {
784 ; CHECK-LABEL: name: v16i8_func_void
785 ; CHECK: bb.1 (%ir-block.0):
786 ; CHECK: liveins: $sgpr30_sgpr31
787 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
788 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
789 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<16 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
790 ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[LOAD]](p1) :: (load 16 from %ir.ptr, addrspace 1)
791 ; CHECK: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8), [[UV12:%[0-9]+]]:_(s8), [[UV13:%[0-9]+]]:_(s8), [[UV14:%[0-9]+]]:_(s8), [[UV15:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<16 x s8>)
792 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
793 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
794 ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
795 ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
796 ; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
797 ; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
798 ; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
799 ; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
800 ; CHECK: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
801 ; CHECK: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
802 ; CHECK: [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[UV10]](s8)
803 ; CHECK: [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[UV11]](s8)
804 ; CHECK: [[ANYEXT12:%[0-9]+]]:_(s32) = G_ANYEXT [[UV12]](s8)
805 ; CHECK: [[ANYEXT13:%[0-9]+]]:_(s32) = G_ANYEXT [[UV13]](s8)
806 ; CHECK: [[ANYEXT14:%[0-9]+]]:_(s32) = G_ANYEXT [[UV14]](s8)
807 ; CHECK: [[ANYEXT15:%[0-9]+]]:_(s32) = G_ANYEXT [[UV15]](s8)
808 ; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
809 ; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
810 ; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
811 ; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
812 ; CHECK: $vgpr4 = COPY [[ANYEXT4]](s32)
813 ; CHECK: $vgpr5 = COPY [[ANYEXT5]](s32)
814 ; CHECK: $vgpr6 = COPY [[ANYEXT6]](s32)
815 ; CHECK: $vgpr7 = COPY [[ANYEXT7]](s32)
816 ; CHECK: $vgpr8 = COPY [[ANYEXT8]](s32)
817 ; CHECK: $vgpr9 = COPY [[ANYEXT9]](s32)
818 ; CHECK: $vgpr10 = COPY [[ANYEXT10]](s32)
819 ; CHECK: $vgpr11 = COPY [[ANYEXT11]](s32)
820 ; CHECK: $vgpr12 = COPY [[ANYEXT12]](s32)
821 ; CHECK: $vgpr13 = COPY [[ANYEXT13]](s32)
822 ; CHECK: $vgpr14 = COPY [[ANYEXT14]](s32)
823 ; CHECK: $vgpr15 = COPY [[ANYEXT15]](s32)
824 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
825 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
826 %ptr = load volatile <16 x i8> addrspace(1)*, <16 x i8> addrspace(1)* addrspace(4)* undef
827 %val = load <16 x i8>, <16 x i8> addrspace(1)* %ptr
831 define <4 x i8> @v4i8_func_void() #0 {
832 ; CHECK-LABEL: name: v4i8_func_void
833 ; CHECK: bb.1 (%ir-block.0):
834 ; CHECK: liveins: $sgpr30_sgpr31
835 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
836 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
837 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<4 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
838 ; CHECK: [[LOAD1:%[0-9]+]]:_(<4 x s8>) = G_LOAD [[LOAD]](p1) :: (load 4 from %ir.ptr, addrspace 1)
839 ; CHECK: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<4 x s8>)
840 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
841 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
842 ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
843 ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
844 ; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
845 ; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
846 ; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
847 ; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
848 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
849 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
850 %ptr = load volatile <4 x i8> addrspace(1)*, <4 x i8> addrspace(1)* addrspace(4)* undef
851 %val = load <4 x i8>, <4 x i8> addrspace(1)* %ptr
855 define {i8, i32} @struct_i8_i32_func_void() #0 {
856 ; CHECK-LABEL: name: struct_i8_i32_func_void
858 ; CHECK: successors: %bb.1(0x80000000)
859 ; CHECK: liveins: $sgpr30_sgpr31
860 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
861 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
862 ; CHECK: bb.1 (%ir-block.0):
863 ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load 1 from `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
864 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
865 ; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[DEF]], [[C]](s64)
866 ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p1) :: (load 4 from `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
867 %val = load { i8, i32 }, { i8, i32 } addrspace(1)* undef
871 define void @void_func_sret_struct_i8_i32({ i8, i32 } addrspace(5)* sret %arg0) #0 {
872 ; CHECK-LABEL: name: void_func_sret_struct_i8_i32
873 ; CHECK: bb.1 (%ir-block.0):
874 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
875 ; CHECK: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
876 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
877 ; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
878 ; CHECK: [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
879 ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (volatile load 1 from `i8 addrspace(1)* undef`, addrspace 1)
880 ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p1) :: (volatile load 4 from `i32 addrspace(1)* undef`, addrspace 1)
881 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
882 ; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_GEP [[COPY]], [[C]](s32)
883 ; CHECK: G_STORE [[LOAD]](s8), [[COPY]](p5) :: (store 1 into %ir.gep01, addrspace 5)
884 ; CHECK: G_STORE [[LOAD1]](s32), [[GEP]](p5) :: (store 4 into %ir.gep1, addrspace 5)
885 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
886 ; CHECK: S_SETPC_B64_return [[COPY2]]
887 %val0 = load volatile i8, i8 addrspace(1)* undef
888 %val1 = load volatile i32, i32 addrspace(1)* undef
889 %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 0
890 %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 1
891 store i8 %val0, i8 addrspace(5)* %gep0
892 store i32 %val1, i32 addrspace(5)* %gep1
896 ; FIXME: Should be able to fold offsets in all of these pre-gfx9. Call
897 ; lowering introduces an extra CopyToReg/CopyFromReg obscuring the
898 ; AssertZext inserted. Not using it introduces the spills.
900 define <33 x i32> @v33i32_func_void() #0 {
901 ; CHECK-LABEL: name: v33i32_func_void
903 ; CHECK: successors: %bb.1(0x80000000)
904 ; CHECK: liveins: $sgpr30_sgpr31
905 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
906 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
907 ; CHECK: bb.1 (%ir-block.0):
908 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<33 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
909 ; CHECK: [[LOAD1:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[LOAD]](p1) :: (load 132 from %ir.ptr, align 256, addrspace 1)
910 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32), [[UV32:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<33 x s32>)
911 %ptr = load volatile <33 x i32> addrspace(1)*, <33 x i32> addrspace(1)* addrspace(4)* undef
912 %val = load <33 x i32>, <33 x i32> addrspace(1)* %ptr
916 define { <32 x i32>, i32 } @struct_v32i32_i32_func_void() #0 {
917 ; CHECK-LABEL: name: struct_v32i32_i32_func_void
919 ; CHECK: successors: %bb.1(0x80000000)
920 ; CHECK: liveins: $sgpr30_sgpr31
921 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
922 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
923 ; CHECK: bb.1 (%ir-block.0):
924 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `{ <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
925 ; CHECK: [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load 128 from %ir.ptr, addrspace 1)
926 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
927 ; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[LOAD]], [[C]](s64)
928 ; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p1) :: (load 4 from %ir.ptr + 128, align 128, addrspace 1)
929 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<32 x s32>)
930 %ptr = load volatile { <32 x i32>, i32 } addrspace(1)*, { <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef
931 %val = load { <32 x i32>, i32 }, { <32 x i32>, i32 } addrspace(1)* %ptr
932 ret { <32 x i32>, i32 }%val
935 define { i32, <32 x i32> } @struct_i32_v32i32_func_void() #0 {
936 ; CHECK-LABEL: name: struct_i32_v32i32_func_void
938 ; CHECK: successors: %bb.1(0x80000000)
939 ; CHECK: liveins: $sgpr30_sgpr31
940 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
941 ; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
942 ; CHECK: bb.1 (%ir-block.0):
943 ; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `{ i32, <32 x i32> } addrspace(1)* addrspace(4)* undef`, addrspace 4)
944 ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p1) :: (load 4 from %ir.ptr, align 128, addrspace 1)
945 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
946 ; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[LOAD]], [[C]](s64)
947 ; CHECK: [[LOAD2:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[GEP]](p1) :: (load 128 from %ir.ptr + 128, addrspace 1)
948 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<32 x s32>)
949 %ptr = load volatile { i32, <32 x i32> } addrspace(1)*, { i32, <32 x i32> } addrspace(1)* addrspace(4)* undef
950 %val = load { i32, <32 x i32> }, { i32, <32 x i32> } addrspace(1)* %ptr
951 ret { i32, <32 x i32> }%val
954 ; Make sure the last struct component is returned in v3, not v4.
955 define { <3 x i32>, i32 } @v3i32_struct_func_void_wasted_reg() #0 {
956 ; CHECK-LABEL: name: v3i32_struct_func_void_wasted_reg
957 ; CHECK: bb.1 (%ir-block.0):
958 ; CHECK: liveins: $sgpr30_sgpr31
959 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
960 ; CHECK: [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
961 ; CHECK: [[DEF1:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
962 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
963 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
964 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
965 ; CHECK: [[DEF2:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
966 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `i32 addrspace(3)* undef`, addrspace 3)
967 ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `i32 addrspace(3)* undef`, addrspace 3)
968 ; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `i32 addrspace(3)* undef`, addrspace 3)
969 ; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `i32 addrspace(3)* undef`, addrspace 3)
970 ; CHECK: [[IVEC:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[DEF1]], [[LOAD]](s32), [[C]](s32)
971 ; CHECK: [[IVEC1:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC]], [[LOAD1]](s32), [[C1]](s32)
972 ; CHECK: [[IVEC2:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC1]], [[LOAD2]](s32), [[C2]](s32)
973 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[IVEC2]](<3 x s32>)
974 ; CHECK: $vgpr0 = COPY [[UV]](s32)
975 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
976 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
977 ; CHECK: $vgpr3 = COPY [[LOAD3]](s32)
978 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
979 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
980 %load0 = load volatile i32, i32 addrspace(3)* undef
981 %load1 = load volatile i32, i32 addrspace(3)* undef
982 %load2 = load volatile i32, i32 addrspace(3)* undef
983 %load3 = load volatile i32, i32 addrspace(3)* undef
985 %insert.0 = insertelement <3 x i32> undef, i32 %load0, i32 0
986 %insert.1 = insertelement <3 x i32> %insert.0, i32 %load1, i32 1
987 %insert.2 = insertelement <3 x i32> %insert.1, i32 %load2, i32 2
988 %insert.3 = insertvalue { <3 x i32>, i32 } undef, <3 x i32> %insert.2, 0
989 %insert.4 = insertvalue { <3 x i32>, i32 } %insert.3, i32 %load3, 1
990 ret { <3 x i32>, i32 } %insert.4
993 define { <3 x float>, i32 } @v3f32_struct_func_void_wasted_reg() #0 {
994 ; CHECK-LABEL: name: v3f32_struct_func_void_wasted_reg
995 ; CHECK: bb.1 (%ir-block.0):
996 ; CHECK: liveins: $sgpr30_sgpr31
997 ; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
998 ; CHECK: [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
999 ; CHECK: [[DEF1:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1000 ; CHECK: [[DEF2:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
1001 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1002 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1003 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1004 ; CHECK: [[DEF3:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
1005 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `float addrspace(3)* undef`, addrspace 3)
1006 ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `float addrspace(3)* undef`, addrspace 3)
1007 ; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `float addrspace(3)* undef`, addrspace 3)
1008 ; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p3) :: (volatile load 4 from `i32 addrspace(3)* undef`, addrspace 3)
1009 ; CHECK: [[IVEC:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[DEF2]], [[LOAD]](s32), [[C]](s32)
1010 ; CHECK: [[IVEC1:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC]], [[LOAD1]](s32), [[C1]](s32)
1011 ; CHECK: [[IVEC2:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC1]], [[LOAD2]](s32), [[C2]](s32)
1012 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[IVEC2]](<3 x s32>)
1013 ; CHECK: $vgpr0 = COPY [[UV]](s32)
1014 ; CHECK: $vgpr1 = COPY [[UV1]](s32)
1015 ; CHECK: $vgpr2 = COPY [[UV2]](s32)
1016 ; CHECK: $vgpr3 = COPY [[LOAD3]](s32)
1017 ; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1018 ; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1019 %load0 = load volatile float, float addrspace(3)* undef
1020 %load1 = load volatile float, float addrspace(3)* undef
1021 %load2 = load volatile float, float addrspace(3)* undef
1022 %load3 = load volatile i32, i32 addrspace(3)* undef
1024 %insert.0 = insertelement <3 x float> undef, float %load0, i32 0
1025 %insert.1 = insertelement <3 x float> %insert.0, float %load1, i32 1
1026 %insert.2 = insertelement <3 x float> %insert.1, float %load2, i32 2
1027 %insert.3 = insertvalue { <3 x float>, i32 } undef, <3 x float> %insert.2, 0
1028 %insert.4 = insertvalue { <3 x float>, i32 } %insert.3, i32 %load3, 1
1029 ret { <3 x float>, i32 } %insert.4
1032 define void @void_func_sret_max_known_zero_bits(i8 addrspace(5)* sret %arg0) #0 {
1033 ; CHECK-LABEL: name: void_func_sret_max_known_zero_bits
1034 ; CHECK: bb.1 (%ir-block.0):
1035 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
1036 ; CHECK: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1037 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1038 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
1039 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 17
1040 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 18
1041 ; CHECK: [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1042 ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p5)
1043 ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C]](s32)
1044 ; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C1]](s32)
1045 ; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C2]](s32)
1046 ; CHECK: G_STORE [[LSHR]](s32), [[DEF]](p3) :: (volatile store 4 into `i32 addrspace(3)* undef`, addrspace 3)
1047 ; CHECK: G_STORE [[LSHR1]](s32), [[DEF]](p3) :: (volatile store 4 into `i32 addrspace(3)* undef`, addrspace 3)
1048 ; CHECK: G_STORE [[LSHR2]](s32), [[DEF]](p3) :: (volatile store 4 into `i32 addrspace(3)* undef`, addrspace 3)
1049 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1050 ; CHECK: S_SETPC_B64_return [[COPY2]]
1051 %arg0.int = ptrtoint i8 addrspace(5)* %arg0 to i32
1053 %lshr0 = lshr i32 %arg0.int, 16
1054 %lshr1 = lshr i32 %arg0.int, 17
1055 %lshr2 = lshr i32 %arg0.int, 18
1057 store volatile i32 %lshr0, i32 addrspace(3)* undef
1058 store volatile i32 %lshr1, i32 addrspace(3)* undef
1059 store volatile i32 %lshr2, i32 addrspace(3)* undef
1063 attributes #0 = { nounwind }