1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -global-isel -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -verify-machineinstrs -o - %s | FileCheck -enable-var-scope %s
4 ; FIXME: Also test with a pre-gfx8 target.
6 define i1 @i1_func_void() #0 {
7 ; CHECK-LABEL: name: i1_func_void
8 ; CHECK: bb.1 (%ir-block.0):
9 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
11 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
12 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
13 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `i1 addrspace(1)* undef`, addrspace 1)
14 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s1)
15 ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
16 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
17 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
18 %val = load i1, i1 addrspace(1)* undef
22 define zeroext i1 @i1_zeroext_func_void() #0 {
23 ; CHECK-LABEL: name: i1_zeroext_func_void
24 ; CHECK: bb.1 (%ir-block.0):
25 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
27 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
28 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
29 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `i1 addrspace(1)* undef`, addrspace 1)
30 ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s1)
31 ; CHECK-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
32 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
33 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
34 %val = load i1, i1 addrspace(1)* undef
38 define signext i1 @i1_signext_func_void() #0 {
39 ; CHECK-LABEL: name: i1_signext_func_void
40 ; CHECK: bb.1 (%ir-block.0):
41 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
43 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
44 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
45 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `i1 addrspace(1)* undef`, addrspace 1)
46 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s1)
47 ; CHECK-NEXT: $vgpr0 = COPY [[SEXT]](s32)
48 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
49 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
50 %val = load i1, i1 addrspace(1)* undef
54 define i7 @i7_func_void() #0 {
55 ; CHECK-LABEL: name: i7_func_void
56 ; CHECK: bb.1 (%ir-block.0):
57 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
59 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
60 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
61 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `i7 addrspace(1)* undef`, addrspace 1)
62 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s7)
63 ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
64 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
65 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
66 %val = load i7, i7 addrspace(1)* undef
70 define zeroext i7 @i7_zeroext_func_void() #0 {
71 ; CHECK-LABEL: name: i7_zeroext_func_void
72 ; CHECK: bb.1 (%ir-block.0):
73 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
75 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
76 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
77 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `i7 addrspace(1)* undef`, addrspace 1)
78 ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s7)
79 ; CHECK-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
80 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
81 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
82 %val = load i7, i7 addrspace(1)* undef
86 define signext i7 @i7_signext_func_void() #0 {
87 ; CHECK-LABEL: name: i7_signext_func_void
88 ; CHECK: bb.1 (%ir-block.0):
89 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
91 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
92 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
93 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `i7 addrspace(1)* undef`, addrspace 1)
94 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s7)
95 ; CHECK-NEXT: $vgpr0 = COPY [[SEXT]](s32)
96 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
97 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
98 %val = load i7, i7 addrspace(1)* undef
102 define i8 @i8_func_void() #0 {
103 ; CHECK-LABEL: name: i8_func_void
104 ; CHECK: bb.1 (%ir-block.0):
105 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
107 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
108 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
109 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `i8 addrspace(1)* undef`, addrspace 1)
110 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
111 ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
112 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
113 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
114 %val = load i8, i8 addrspace(1)* undef
118 define zeroext i8 @i8_zeroext_func_void() #0 {
119 ; CHECK-LABEL: name: i8_zeroext_func_void
120 ; CHECK: bb.1 (%ir-block.0):
121 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
123 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
124 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
125 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `i8 addrspace(1)* undef`, addrspace 1)
126 ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s8)
127 ; CHECK-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
128 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
129 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
130 %val = load i8, i8 addrspace(1)* undef
134 define signext i8 @i8_signext_func_void() #0 {
135 ; CHECK-LABEL: name: i8_signext_func_void
136 ; CHECK: bb.1 (%ir-block.0):
137 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
139 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
140 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
141 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `i8 addrspace(1)* undef`, addrspace 1)
142 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s8)
143 ; CHECK-NEXT: $vgpr0 = COPY [[SEXT]](s32)
144 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
145 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
146 %val = load i8, i8 addrspace(1)* undef
150 define i16 @i16_func_void() #0 {
151 ; CHECK-LABEL: name: i16_func_void
152 ; CHECK: bb.1 (%ir-block.0):
153 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
155 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
156 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
157 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `i16 addrspace(1)* undef`, addrspace 1)
158 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
159 ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
160 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
161 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
162 %val = load i16, i16 addrspace(1)* undef
166 define zeroext i16 @i16_zeroext_func_void() #0 {
167 ; CHECK-LABEL: name: i16_zeroext_func_void
168 ; CHECK: bb.1 (%ir-block.0):
169 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
171 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
172 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
173 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `i16 addrspace(1)* undef`, addrspace 1)
174 ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s16)
175 ; CHECK-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
176 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
177 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
178 %val = load i16, i16 addrspace(1)* undef
182 define signext i16 @i16_signext_func_void() #0 {
183 ; CHECK-LABEL: name: i16_signext_func_void
184 ; CHECK: bb.1 (%ir-block.0):
185 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
187 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
188 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
189 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `i16 addrspace(1)* undef`, addrspace 1)
190 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s16)
191 ; CHECK-NEXT: $vgpr0 = COPY [[SEXT]](s32)
192 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
193 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
194 %val = load i16, i16 addrspace(1)* undef
198 define half @f16_func_void() #0 {
199 ; CHECK-LABEL: name: f16_func_void
200 ; CHECK: bb.1 (%ir-block.0):
201 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
203 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
204 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
205 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `half addrspace(1)* undef`, addrspace 1)
206 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
207 ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
208 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
209 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
210 %val = load half, half addrspace(1)* undef
214 define i24 @i24_func_void() #0 {
215 ; CHECK-LABEL: name: i24_func_void
216 ; CHECK: bb.1 (%ir-block.0):
217 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
219 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
220 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
221 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `i24 addrspace(1)* undef`, align 4, addrspace 1)
222 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s24)
223 ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
224 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
225 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
226 %val = load i24, i24 addrspace(1)* undef
230 define zeroext i24 @i24_zeroext_func_void() #0 {
231 ; CHECK-LABEL: name: i24_zeroext_func_void
232 ; CHECK: bb.1 (%ir-block.0):
233 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
235 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
236 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
237 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `i24 addrspace(1)* undef`, align 4, addrspace 1)
238 ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s24)
239 ; CHECK-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
240 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
241 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
242 %val = load i24, i24 addrspace(1)* undef
246 define signext i24 @i24_signext_func_void() #0 {
247 ; CHECK-LABEL: name: i24_signext_func_void
248 ; CHECK: bb.1 (%ir-block.0):
249 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
251 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
252 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
253 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `i24 addrspace(1)* undef`, align 4, addrspace 1)
254 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s24)
255 ; CHECK-NEXT: $vgpr0 = COPY [[SEXT]](s32)
256 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
257 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
258 %val = load i24, i24 addrspace(1)* undef
262 define <2 x i24> @v2i24_func_void() #0 {
263 ; CHECK-LABEL: name: v2i24_func_void
264 ; CHECK: bb.1 (%ir-block.0):
265 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
267 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
268 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
269 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s24>) = G_LOAD [[DEF]](p1) :: (load (<2 x s24>) from `<2 x i24> addrspace(1)* undef`, align 8, addrspace 1)
270 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s24), [[UV1:%[0-9]+]]:_(s24) = G_UNMERGE_VALUES [[LOAD]](<2 x s24>)
271 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s24)
272 ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s24)
273 ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
274 ; CHECK-NEXT: $vgpr1 = COPY [[ANYEXT1]](s32)
275 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
276 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
277 %val = load <2 x i24>, <2 x i24> addrspace(1)* undef
281 define <3 x i24> @v3i24_func_void() #0 {
282 ; CHECK-LABEL: name: v3i24_func_void
283 ; CHECK: bb.1 (%ir-block.0):
284 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
286 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
287 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
288 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s24>) = G_LOAD [[DEF]](p1) :: (load (<3 x s24>) from `<3 x i24> addrspace(1)* undef`, align 16, addrspace 1)
289 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s24), [[UV1:%[0-9]+]]:_(s24), [[UV2:%[0-9]+]]:_(s24) = G_UNMERGE_VALUES [[LOAD]](<3 x s24>)
290 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s24)
291 ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s24)
292 ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s24)
293 ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
294 ; CHECK-NEXT: $vgpr1 = COPY [[ANYEXT1]](s32)
295 ; CHECK-NEXT: $vgpr2 = COPY [[ANYEXT2]](s32)
296 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
297 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
298 %val = load <3 x i24>, <3 x i24> addrspace(1)* undef
302 define i32 @i32_func_void() #0 {
303 ; CHECK-LABEL: name: i32_func_void
304 ; CHECK: bb.1 (%ir-block.0):
305 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
307 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
308 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
309 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
310 ; CHECK-NEXT: $vgpr0 = COPY [[LOAD]](s32)
311 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
312 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
313 %val = load i32, i32 addrspace(1)* undef
317 define i48 @i48_func_void() #0 {
318 ; CHECK-LABEL: name: i48_func_void
319 ; CHECK: bb.1 (%ir-block.0):
320 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
322 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
323 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
324 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `i48 addrspace(1)* undef`, align 8, addrspace 1)
325 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s48)
326 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s64)
327 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
328 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
329 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
330 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
331 %val = load i48, i48 addrspace(1)* undef, align 8
335 define signext i48 @i48_signext_func_void() #0 {
336 ; CHECK-LABEL: name: i48_signext_func_void
337 ; CHECK: bb.1 (%ir-block.0):
338 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
340 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
341 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
342 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `i48 addrspace(1)* undef`, align 8, addrspace 1)
343 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD]](s48)
344 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
345 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
346 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
347 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
348 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
349 %val = load i48, i48 addrspace(1)* undef, align 8
353 define zeroext i48 @i48_zeroext_func_void() #0 {
354 ; CHECK-LABEL: name: i48_zeroext_func_void
355 ; CHECK: bb.1 (%ir-block.0):
356 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
358 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
359 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
360 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `i48 addrspace(1)* undef`, align 8, addrspace 1)
361 ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s48)
362 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s64)
363 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
364 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
365 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
366 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
367 %val = load i48, i48 addrspace(1)* undef, align 8
371 define i64 @i64_func_void() #0 {
372 ; CHECK-LABEL: name: i64_func_void
373 ; CHECK: bb.1 (%ir-block.0):
374 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
376 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
377 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
378 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p1) :: (load (s64) from `i64 addrspace(1)* undef`, addrspace 1)
379 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
380 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
381 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
382 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
383 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
384 %val = load i64, i64 addrspace(1)* undef
388 define i65 @i65_func_void() #0 {
389 ; CHECK-LABEL: name: i65_func_void
390 ; CHECK: bb.1 (%ir-block.0):
391 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
393 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
394 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
395 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load (s65) from `i65 addrspace(1)* undef`, align 8, addrspace 1)
396 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s96) = G_ANYEXT [[LOAD]](s65)
397 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s96)
398 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
399 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
400 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
401 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
402 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
403 %val = load i65, i65 addrspace(1)* undef
407 define signext i65 @i65_signext_func_void() #0 {
408 ; CHECK-LABEL: name: i65_signext_func_void
409 ; CHECK: bb.1 (%ir-block.0):
410 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
412 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
413 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
414 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load (s65) from `i65 addrspace(1)* undef`, align 8, addrspace 1)
415 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s96) = G_SEXT [[LOAD]](s65)
416 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s96)
417 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
418 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
419 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
420 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
421 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
422 %val = load i65, i65 addrspace(1)* undef
426 define zeroext i65 @i65_zeroext_func_void() #0 {
427 ; CHECK-LABEL: name: i65_zeroext_func_void
428 ; CHECK: bb.1 (%ir-block.0):
429 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
431 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
432 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
433 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load (s65) from `i65 addrspace(1)* undef`, align 8, addrspace 1)
434 ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s96) = G_ZEXT [[LOAD]](s65)
435 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s96)
436 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
437 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
438 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
439 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
440 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
441 %val = load i65, i65 addrspace(1)* undef
445 define float @f32_func_void() #0 {
446 ; CHECK-LABEL: name: f32_func_void
447 ; CHECK: bb.1 (%ir-block.0):
448 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
450 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
451 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
452 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load (s32) from `float addrspace(1)* undef`, addrspace 1)
453 ; CHECK-NEXT: $vgpr0 = COPY [[LOAD]](s32)
454 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
455 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
456 %val = load float, float addrspace(1)* undef
460 define double @f64_func_void() #0 {
461 ; CHECK-LABEL: name: f64_func_void
462 ; CHECK: bb.1 (%ir-block.0):
463 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
465 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
466 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
467 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p1) :: (load (s64) from `double addrspace(1)* undef`, addrspace 1)
468 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
469 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
470 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
471 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
472 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
473 %val = load double, double addrspace(1)* undef
477 define <2 x double> @v2f64_func_void() #0 {
478 ; CHECK-LABEL: name: v2f64_func_void
479 ; CHECK: bb.1 (%ir-block.0):
480 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
482 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
483 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
484 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[DEF]](p1) :: (load (<2 x s64>) from `<2 x double> addrspace(1)* undef`, addrspace 1)
485 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
486 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
487 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
488 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
489 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
490 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
491 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
492 %val = load <2 x double>, <2 x double> addrspace(1)* undef
493 ret <2 x double> %val
496 define <2 x i32> @v2i32_func_void() #0 {
497 ; CHECK-LABEL: name: v2i32_func_void
498 ; CHECK: bb.1 (%ir-block.0):
499 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
501 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
502 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
503 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[DEF]](p1) :: (load (<2 x s32>) from `<2 x i32> addrspace(1)* undef`, addrspace 1)
504 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
505 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
506 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
507 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
508 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
509 %val = load <2 x i32>, <2 x i32> addrspace(1)* undef
513 define <3 x i32> @v3i32_func_void() #0 {
514 ; CHECK-LABEL: name: v3i32_func_void
515 ; CHECK: bb.1 (%ir-block.0):
516 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
518 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
519 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
520 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[DEF]](p1) :: (load (<3 x s32>) from `<3 x i32> addrspace(1)* undef`, align 16, addrspace 1)
521 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<3 x s32>)
522 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
523 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
524 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
525 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
526 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
527 %val = load <3 x i32>, <3 x i32> addrspace(1)* undef
531 define <4 x i32> @v4i32_func_void() #0 {
532 ; CHECK-LABEL: name: v4i32_func_void
533 ; CHECK: bb.1 (%ir-block.0):
534 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
536 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
537 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
538 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[DEF]](p1) :: (load (<4 x s32>) from `<4 x i32> addrspace(1)* undef`, addrspace 1)
539 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
540 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
541 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
542 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
543 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
544 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
545 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
546 %val = load <4 x i32>, <4 x i32> addrspace(1)* undef
550 define <5 x i32> @v5i32_func_void() #0 {
551 ; CHECK-LABEL: name: v5i32_func_void
552 ; CHECK: bb.1 (%ir-block.0):
553 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
555 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
556 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
557 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<5 x s32>) = G_LOAD [[DEF]](p1) :: (volatile load (<5 x s32>) from `<5 x i32> addrspace(1)* undef`, align 32, addrspace 1)
558 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<5 x s32>)
559 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
560 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
561 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
562 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
563 ; CHECK-NEXT: $vgpr4 = COPY [[UV4]](s32)
564 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
565 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4
566 %val = load volatile <5 x i32>, <5 x i32> addrspace(1)* undef
570 define <8 x i32> @v8i32_func_void() #0 {
571 ; CHECK-LABEL: name: v8i32_func_void
572 ; CHECK: bb.1 (%ir-block.0):
573 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
575 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
576 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
577 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<8 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
578 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<8 x s32>) from %ir.ptr, addrspace 1)
579 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<8 x s32>)
580 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
581 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
582 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
583 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
584 ; CHECK-NEXT: $vgpr4 = COPY [[UV4]](s32)
585 ; CHECK-NEXT: $vgpr5 = COPY [[UV5]](s32)
586 ; CHECK-NEXT: $vgpr6 = COPY [[UV6]](s32)
587 ; CHECK-NEXT: $vgpr7 = COPY [[UV7]](s32)
588 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
589 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
590 %ptr = load volatile <8 x i32> addrspace(1)*, <8 x i32> addrspace(1)* addrspace(4)* undef
591 %val = load <8 x i32>, <8 x i32> addrspace(1)* %ptr
595 define <16 x i32> @v16i32_func_void() #0 {
596 ; CHECK-LABEL: name: v16i32_func_void
597 ; CHECK: bb.1 (%ir-block.0):
598 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
600 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
601 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
602 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<16 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
603 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s32>) from %ir.ptr, addrspace 1)
604 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s32>)
605 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
606 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
607 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
608 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
609 ; CHECK-NEXT: $vgpr4 = COPY [[UV4]](s32)
610 ; CHECK-NEXT: $vgpr5 = COPY [[UV5]](s32)
611 ; CHECK-NEXT: $vgpr6 = COPY [[UV6]](s32)
612 ; CHECK-NEXT: $vgpr7 = COPY [[UV7]](s32)
613 ; CHECK-NEXT: $vgpr8 = COPY [[UV8]](s32)
614 ; CHECK-NEXT: $vgpr9 = COPY [[UV9]](s32)
615 ; CHECK-NEXT: $vgpr10 = COPY [[UV10]](s32)
616 ; CHECK-NEXT: $vgpr11 = COPY [[UV11]](s32)
617 ; CHECK-NEXT: $vgpr12 = COPY [[UV12]](s32)
618 ; CHECK-NEXT: $vgpr13 = COPY [[UV13]](s32)
619 ; CHECK-NEXT: $vgpr14 = COPY [[UV14]](s32)
620 ; CHECK-NEXT: $vgpr15 = COPY [[UV15]](s32)
621 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
622 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
623 %ptr = load volatile <16 x i32> addrspace(1)*, <16 x i32> addrspace(1)* addrspace(4)* undef
624 %val = load <16 x i32>, <16 x i32> addrspace(1)* %ptr
628 define <32 x i32> @v32i32_func_void() #0 {
629 ; CHECK-LABEL: name: v32i32_func_void
630 ; CHECK: bb.1 (%ir-block.0):
631 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
633 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
634 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
635 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<32 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
636 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<32 x s32>) from %ir.ptr, addrspace 1)
637 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<32 x s32>)
638 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
639 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
640 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
641 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
642 ; CHECK-NEXT: $vgpr4 = COPY [[UV4]](s32)
643 ; CHECK-NEXT: $vgpr5 = COPY [[UV5]](s32)
644 ; CHECK-NEXT: $vgpr6 = COPY [[UV6]](s32)
645 ; CHECK-NEXT: $vgpr7 = COPY [[UV7]](s32)
646 ; CHECK-NEXT: $vgpr8 = COPY [[UV8]](s32)
647 ; CHECK-NEXT: $vgpr9 = COPY [[UV9]](s32)
648 ; CHECK-NEXT: $vgpr10 = COPY [[UV10]](s32)
649 ; CHECK-NEXT: $vgpr11 = COPY [[UV11]](s32)
650 ; CHECK-NEXT: $vgpr12 = COPY [[UV12]](s32)
651 ; CHECK-NEXT: $vgpr13 = COPY [[UV13]](s32)
652 ; CHECK-NEXT: $vgpr14 = COPY [[UV14]](s32)
653 ; CHECK-NEXT: $vgpr15 = COPY [[UV15]](s32)
654 ; CHECK-NEXT: $vgpr16 = COPY [[UV16]](s32)
655 ; CHECK-NEXT: $vgpr17 = COPY [[UV17]](s32)
656 ; CHECK-NEXT: $vgpr18 = COPY [[UV18]](s32)
657 ; CHECK-NEXT: $vgpr19 = COPY [[UV19]](s32)
658 ; CHECK-NEXT: $vgpr20 = COPY [[UV20]](s32)
659 ; CHECK-NEXT: $vgpr21 = COPY [[UV21]](s32)
660 ; CHECK-NEXT: $vgpr22 = COPY [[UV22]](s32)
661 ; CHECK-NEXT: $vgpr23 = COPY [[UV23]](s32)
662 ; CHECK-NEXT: $vgpr24 = COPY [[UV24]](s32)
663 ; CHECK-NEXT: $vgpr25 = COPY [[UV25]](s32)
664 ; CHECK-NEXT: $vgpr26 = COPY [[UV26]](s32)
665 ; CHECK-NEXT: $vgpr27 = COPY [[UV27]](s32)
666 ; CHECK-NEXT: $vgpr28 = COPY [[UV28]](s32)
667 ; CHECK-NEXT: $vgpr29 = COPY [[UV29]](s32)
668 ; CHECK-NEXT: $vgpr30 = COPY [[UV30]](s32)
669 ; CHECK-NEXT: $vgpr31 = COPY [[UV31]](s32)
670 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
671 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
672 %ptr = load volatile <32 x i32> addrspace(1)*, <32 x i32> addrspace(1)* addrspace(4)* undef
673 %val = load <32 x i32>, <32 x i32> addrspace(1)* %ptr
677 define <2 x i64> @v2i64_func_void() #0 {
678 ; CHECK-LABEL: name: v2i64_func_void
679 ; CHECK: bb.1 (%ir-block.0):
680 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
682 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
683 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
684 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[DEF]](p1) :: (load (<2 x s64>) from `<2 x i64> addrspace(1)* undef`, addrspace 1)
685 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
686 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
687 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
688 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
689 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
690 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
691 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
692 %val = load <2 x i64>, <2 x i64> addrspace(1)* undef
696 define <3 x i64> @v3i64_func_void() #0 {
697 ; CHECK-LABEL: name: v3i64_func_void
698 ; CHECK: bb.1 (%ir-block.0):
699 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
701 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
702 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
703 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<3 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
704 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<3 x s64>) from %ir.ptr, align 32, addrspace 1)
705 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s64>)
706 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
707 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
708 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
709 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
710 ; CHECK-NEXT: $vgpr4 = COPY [[UV4]](s32)
711 ; CHECK-NEXT: $vgpr5 = COPY [[UV5]](s32)
712 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
713 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
714 %ptr = load volatile <3 x i64> addrspace(1)*, <3 x i64> addrspace(1)* addrspace(4)* undef
715 %val = load <3 x i64>, <3 x i64> addrspace(1)* %ptr
719 define <4 x i64> @v4i64_func_void() #0 {
720 ; CHECK-LABEL: name: v4i64_func_void
721 ; CHECK: bb.1 (%ir-block.0):
722 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
724 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
725 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
726 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<4 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
727 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<4 x s64>) from %ir.ptr, addrspace 1)
728 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<4 x s64>)
729 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
730 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
731 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
732 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
733 ; CHECK-NEXT: $vgpr4 = COPY [[UV4]](s32)
734 ; CHECK-NEXT: $vgpr5 = COPY [[UV5]](s32)
735 ; CHECK-NEXT: $vgpr6 = COPY [[UV6]](s32)
736 ; CHECK-NEXT: $vgpr7 = COPY [[UV7]](s32)
737 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
738 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
739 %ptr = load volatile <4 x i64> addrspace(1)*, <4 x i64> addrspace(1)* addrspace(4)* undef
740 %val = load <4 x i64>, <4 x i64> addrspace(1)* %ptr
744 define <5 x i64> @v5i64_func_void() #0 {
745 ; CHECK-LABEL: name: v5i64_func_void
746 ; CHECK: bb.1 (%ir-block.0):
747 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
749 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
750 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
751 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<5 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
752 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<5 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<5 x s64>) from %ir.ptr, align 64, addrspace 1)
753 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<5 x s64>)
754 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
755 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
756 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
757 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
758 ; CHECK-NEXT: $vgpr4 = COPY [[UV4]](s32)
759 ; CHECK-NEXT: $vgpr5 = COPY [[UV5]](s32)
760 ; CHECK-NEXT: $vgpr6 = COPY [[UV6]](s32)
761 ; CHECK-NEXT: $vgpr7 = COPY [[UV7]](s32)
762 ; CHECK-NEXT: $vgpr8 = COPY [[UV8]](s32)
763 ; CHECK-NEXT: $vgpr9 = COPY [[UV9]](s32)
764 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
765 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9
766 %ptr = load volatile <5 x i64> addrspace(1)*, <5 x i64> addrspace(1)* addrspace(4)* undef
767 %val = load <5 x i64>, <5 x i64> addrspace(1)* %ptr
771 define <8 x i64> @v8i64_func_void() #0 {
772 ; CHECK-LABEL: name: v8i64_func_void
773 ; CHECK: bb.1 (%ir-block.0):
774 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
776 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
777 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
778 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<8 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
779 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<8 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<8 x s64>) from %ir.ptr, addrspace 1)
780 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<8 x s64>)
781 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
782 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
783 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
784 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
785 ; CHECK-NEXT: $vgpr4 = COPY [[UV4]](s32)
786 ; CHECK-NEXT: $vgpr5 = COPY [[UV5]](s32)
787 ; CHECK-NEXT: $vgpr6 = COPY [[UV6]](s32)
788 ; CHECK-NEXT: $vgpr7 = COPY [[UV7]](s32)
789 ; CHECK-NEXT: $vgpr8 = COPY [[UV8]](s32)
790 ; CHECK-NEXT: $vgpr9 = COPY [[UV9]](s32)
791 ; CHECK-NEXT: $vgpr10 = COPY [[UV10]](s32)
792 ; CHECK-NEXT: $vgpr11 = COPY [[UV11]](s32)
793 ; CHECK-NEXT: $vgpr12 = COPY [[UV12]](s32)
794 ; CHECK-NEXT: $vgpr13 = COPY [[UV13]](s32)
795 ; CHECK-NEXT: $vgpr14 = COPY [[UV14]](s32)
796 ; CHECK-NEXT: $vgpr15 = COPY [[UV15]](s32)
797 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
798 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
799 %ptr = load volatile <8 x i64> addrspace(1)*, <8 x i64> addrspace(1)* addrspace(4)* undef
800 %val = load <8 x i64>, <8 x i64> addrspace(1)* %ptr
804 define <16 x i64> @v16i64_func_void() #0 {
805 ; CHECK-LABEL: name: v16i64_func_void
806 ; CHECK: bb.1 (%ir-block.0):
807 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
809 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
810 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
811 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<16 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
812 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<16 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s64>) from %ir.ptr, addrspace 1)
813 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s64>)
814 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
815 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
816 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
817 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
818 ; CHECK-NEXT: $vgpr4 = COPY [[UV4]](s32)
819 ; CHECK-NEXT: $vgpr5 = COPY [[UV5]](s32)
820 ; CHECK-NEXT: $vgpr6 = COPY [[UV6]](s32)
821 ; CHECK-NEXT: $vgpr7 = COPY [[UV7]](s32)
822 ; CHECK-NEXT: $vgpr8 = COPY [[UV8]](s32)
823 ; CHECK-NEXT: $vgpr9 = COPY [[UV9]](s32)
824 ; CHECK-NEXT: $vgpr10 = COPY [[UV10]](s32)
825 ; CHECK-NEXT: $vgpr11 = COPY [[UV11]](s32)
826 ; CHECK-NEXT: $vgpr12 = COPY [[UV12]](s32)
827 ; CHECK-NEXT: $vgpr13 = COPY [[UV13]](s32)
828 ; CHECK-NEXT: $vgpr14 = COPY [[UV14]](s32)
829 ; CHECK-NEXT: $vgpr15 = COPY [[UV15]](s32)
830 ; CHECK-NEXT: $vgpr16 = COPY [[UV16]](s32)
831 ; CHECK-NEXT: $vgpr17 = COPY [[UV17]](s32)
832 ; CHECK-NEXT: $vgpr18 = COPY [[UV18]](s32)
833 ; CHECK-NEXT: $vgpr19 = COPY [[UV19]](s32)
834 ; CHECK-NEXT: $vgpr20 = COPY [[UV20]](s32)
835 ; CHECK-NEXT: $vgpr21 = COPY [[UV21]](s32)
836 ; CHECK-NEXT: $vgpr22 = COPY [[UV22]](s32)
837 ; CHECK-NEXT: $vgpr23 = COPY [[UV23]](s32)
838 ; CHECK-NEXT: $vgpr24 = COPY [[UV24]](s32)
839 ; CHECK-NEXT: $vgpr25 = COPY [[UV25]](s32)
840 ; CHECK-NEXT: $vgpr26 = COPY [[UV26]](s32)
841 ; CHECK-NEXT: $vgpr27 = COPY [[UV27]](s32)
842 ; CHECK-NEXT: $vgpr28 = COPY [[UV28]](s32)
843 ; CHECK-NEXT: $vgpr29 = COPY [[UV29]](s32)
844 ; CHECK-NEXT: $vgpr30 = COPY [[UV30]](s32)
845 ; CHECK-NEXT: $vgpr31 = COPY [[UV31]](s32)
846 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
847 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
848 %ptr = load volatile <16 x i64> addrspace(1)*, <16 x i64> addrspace(1)* addrspace(4)* undef
849 %val = load <16 x i64>, <16 x i64> addrspace(1)* %ptr
853 define <2 x i16> @v2i16_func_void() #0 {
854 ; CHECK-LABEL: name: v2i16_func_void
855 ; CHECK: bb.1 (%ir-block.0):
856 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
858 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
859 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
860 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[DEF]](p1) :: (load (<2 x s16>) from `<2 x i16> addrspace(1)* undef`, addrspace 1)
861 ; CHECK-NEXT: $vgpr0 = COPY [[LOAD]](<2 x s16>)
862 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
863 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
864 %val = load <2 x i16>, <2 x i16> addrspace(1)* undef
868 define <2 x half> @v2f16_func_void() #0 {
869 ; CHECK-LABEL: name: v2f16_func_void
870 ; CHECK: bb.1 (%ir-block.0):
871 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
873 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
874 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
875 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[DEF]](p1) :: (load (<2 x s16>) from `<2 x half> addrspace(1)* undef`, addrspace 1)
876 ; CHECK-NEXT: $vgpr0 = COPY [[LOAD]](<2 x s16>)
877 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
878 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
879 %val = load <2 x half>, <2 x half> addrspace(1)* undef
883 define <3 x i16> @v3i16_func_void() #0 {
884 ; CHECK-LABEL: name: v3i16_func_void
885 ; CHECK: bb.1 (%ir-block.0):
886 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
888 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
889 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
890 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s16>) = G_LOAD [[DEF]](p1) :: (load (<3 x s16>) from `<3 x i16> addrspace(1)* undef`, align 8, addrspace 1)
891 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<3 x s16>)
892 ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
893 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UV]](s16), [[UV1]](s16), [[UV2]](s16), [[DEF1]](s16)
894 ; CHECK-NEXT: [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s16>)
895 ; CHECK-NEXT: $vgpr0 = COPY [[UV3]](<2 x s16>)
896 ; CHECK-NEXT: $vgpr1 = COPY [[UV4]](<2 x s16>)
897 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
898 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
899 %val = load <3 x i16>, <3 x i16> addrspace(1)* undef
903 define <4 x i16> @v4i16_func_void() #0 {
904 ; CHECK-LABEL: name: v4i16_func_void
905 ; CHECK: bb.1 (%ir-block.0):
906 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
908 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
909 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
910 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[DEF]](p1) :: (load (<4 x s16>) from `<4 x i16> addrspace(1)* undef`, addrspace 1)
911 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
912 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
913 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
914 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
915 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
916 %val = load <4 x i16>, <4 x i16> addrspace(1)* undef
920 define <4 x half> @v4f16_func_void() #0 {
921 ; CHECK-LABEL: name: v4f16_func_void
922 ; CHECK: bb.1 (%ir-block.0):
923 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
925 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
926 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
927 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[DEF]](p1) :: (load (<4 x s16>) from `<4 x half> addrspace(1)* undef`, addrspace 1)
928 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
929 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
930 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
931 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
932 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
933 %val = load <4 x half>, <4 x half> addrspace(1)* undef
937 define <5 x i16> @v5i16_func_void() #0 {
938 ; CHECK-LABEL: name: v5i16_func_void
939 ; CHECK: bb.1 (%ir-block.0):
940 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
942 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
943 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
944 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<5 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
945 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<5 x s16>) = G_LOAD [[LOAD]](p1) :: (load (<5 x s16>) from %ir.ptr, align 16, addrspace 1)
946 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD1]](<5 x s16>)
947 ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
948 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<6 x s16>) = G_BUILD_VECTOR [[UV]](s16), [[UV1]](s16), [[UV2]](s16), [[UV3]](s16), [[UV4]](s16), [[DEF1]](s16)
949 ; CHECK-NEXT: [[UV5:%[0-9]+]]:_(<2 x s16>), [[UV6:%[0-9]+]]:_(<2 x s16>), [[UV7:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<6 x s16>)
950 ; CHECK-NEXT: $vgpr0 = COPY [[UV5]](<2 x s16>)
951 ; CHECK-NEXT: $vgpr1 = COPY [[UV6]](<2 x s16>)
952 ; CHECK-NEXT: $vgpr2 = COPY [[UV7]](<2 x s16>)
953 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
954 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
955 %ptr = load volatile <5 x i16> addrspace(1)*, <5 x i16> addrspace(1)* addrspace(4)* undef
956 %val = load <5 x i16>, <5 x i16> addrspace(1)* %ptr
960 define <8 x i16> @v8i16_func_void() #0 {
961 ; CHECK-LABEL: name: v8i16_func_void
962 ; CHECK: bb.1 (%ir-block.0):
963 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
965 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
966 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
967 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<8 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
968 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[LOAD]](p1) :: (load (<8 x s16>) from %ir.ptr, addrspace 1)
969 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD1]](<8 x s16>)
970 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
971 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
972 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](<2 x s16>)
973 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](<2 x s16>)
974 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
975 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
976 %ptr = load volatile <8 x i16> addrspace(1)*, <8 x i16> addrspace(1)* addrspace(4)* undef
977 %val = load <8 x i16>, <8 x i16> addrspace(1)* %ptr
981 define <16 x i16> @v16i16_func_void() #0 {
982 ; CHECK-LABEL: name: v16i16_func_void
983 ; CHECK: bb.1 (%ir-block.0):
984 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
986 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
987 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
988 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<16 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
989 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<16 x s16>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s16>) from %ir.ptr, addrspace 1)
990 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>), [[UV6:%[0-9]+]]:_(<2 x s16>), [[UV7:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD1]](<16 x s16>)
991 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
992 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
993 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](<2 x s16>)
994 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](<2 x s16>)
995 ; CHECK-NEXT: $vgpr4 = COPY [[UV4]](<2 x s16>)
996 ; CHECK-NEXT: $vgpr5 = COPY [[UV5]](<2 x s16>)
997 ; CHECK-NEXT: $vgpr6 = COPY [[UV6]](<2 x s16>)
998 ; CHECK-NEXT: $vgpr7 = COPY [[UV7]](<2 x s16>)
999 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1000 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
1001 %ptr = load volatile <16 x i16> addrspace(1)*, <16 x i16> addrspace(1)* addrspace(4)* undef
1002 %val = load <16 x i16>, <16 x i16> addrspace(1)* %ptr
1006 define <16 x i8> @v16i8_func_void() #0 {
1007 ; CHECK-LABEL: name: v16i8_func_void
1008 ; CHECK: bb.1 (%ir-block.0):
1009 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
1010 ; CHECK-NEXT: {{ $}}
1011 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1012 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
1013 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<16 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
1014 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s8>) from %ir.ptr, addrspace 1)
1015 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8), [[UV12:%[0-9]+]]:_(s8), [[UV13:%[0-9]+]]:_(s8), [[UV14:%[0-9]+]]:_(s8), [[UV15:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<16 x s8>)
1016 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
1017 ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
1018 ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[UV2]](s8)
1019 ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
1020 ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s16) = G_ANYEXT [[UV4]](s8)
1021 ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s16) = G_ANYEXT [[UV5]](s8)
1022 ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s16) = G_ANYEXT [[UV6]](s8)
1023 ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s16) = G_ANYEXT [[UV7]](s8)
1024 ; CHECK-NEXT: [[ANYEXT8:%[0-9]+]]:_(s16) = G_ANYEXT [[UV8]](s8)
1025 ; CHECK-NEXT: [[ANYEXT9:%[0-9]+]]:_(s16) = G_ANYEXT [[UV9]](s8)
1026 ; CHECK-NEXT: [[ANYEXT10:%[0-9]+]]:_(s16) = G_ANYEXT [[UV10]](s8)
1027 ; CHECK-NEXT: [[ANYEXT11:%[0-9]+]]:_(s16) = G_ANYEXT [[UV11]](s8)
1028 ; CHECK-NEXT: [[ANYEXT12:%[0-9]+]]:_(s16) = G_ANYEXT [[UV12]](s8)
1029 ; CHECK-NEXT: [[ANYEXT13:%[0-9]+]]:_(s16) = G_ANYEXT [[UV13]](s8)
1030 ; CHECK-NEXT: [[ANYEXT14:%[0-9]+]]:_(s16) = G_ANYEXT [[UV14]](s8)
1031 ; CHECK-NEXT: [[ANYEXT15:%[0-9]+]]:_(s16) = G_ANYEXT [[UV15]](s8)
1032 ; CHECK-NEXT: [[ANYEXT16:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
1033 ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT16]](s32)
1034 ; CHECK-NEXT: [[ANYEXT17:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
1035 ; CHECK-NEXT: $vgpr1 = COPY [[ANYEXT17]](s32)
1036 ; CHECK-NEXT: [[ANYEXT18:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
1037 ; CHECK-NEXT: $vgpr2 = COPY [[ANYEXT18]](s32)
1038 ; CHECK-NEXT: [[ANYEXT19:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT3]](s16)
1039 ; CHECK-NEXT: $vgpr3 = COPY [[ANYEXT19]](s32)
1040 ; CHECK-NEXT: [[ANYEXT20:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT4]](s16)
1041 ; CHECK-NEXT: $vgpr4 = COPY [[ANYEXT20]](s32)
1042 ; CHECK-NEXT: [[ANYEXT21:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT5]](s16)
1043 ; CHECK-NEXT: $vgpr5 = COPY [[ANYEXT21]](s32)
1044 ; CHECK-NEXT: [[ANYEXT22:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT6]](s16)
1045 ; CHECK-NEXT: $vgpr6 = COPY [[ANYEXT22]](s32)
1046 ; CHECK-NEXT: [[ANYEXT23:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT7]](s16)
1047 ; CHECK-NEXT: $vgpr7 = COPY [[ANYEXT23]](s32)
1048 ; CHECK-NEXT: [[ANYEXT24:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT8]](s16)
1049 ; CHECK-NEXT: $vgpr8 = COPY [[ANYEXT24]](s32)
1050 ; CHECK-NEXT: [[ANYEXT25:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT9]](s16)
1051 ; CHECK-NEXT: $vgpr9 = COPY [[ANYEXT25]](s32)
1052 ; CHECK-NEXT: [[ANYEXT26:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT10]](s16)
1053 ; CHECK-NEXT: $vgpr10 = COPY [[ANYEXT26]](s32)
1054 ; CHECK-NEXT: [[ANYEXT27:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT11]](s16)
1055 ; CHECK-NEXT: $vgpr11 = COPY [[ANYEXT27]](s32)
1056 ; CHECK-NEXT: [[ANYEXT28:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT12]](s16)
1057 ; CHECK-NEXT: $vgpr12 = COPY [[ANYEXT28]](s32)
1058 ; CHECK-NEXT: [[ANYEXT29:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT13]](s16)
1059 ; CHECK-NEXT: $vgpr13 = COPY [[ANYEXT29]](s32)
1060 ; CHECK-NEXT: [[ANYEXT30:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT14]](s16)
1061 ; CHECK-NEXT: $vgpr14 = COPY [[ANYEXT30]](s32)
1062 ; CHECK-NEXT: [[ANYEXT31:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT15]](s16)
1063 ; CHECK-NEXT: $vgpr15 = COPY [[ANYEXT31]](s32)
1064 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1065 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
1066 %ptr = load volatile <16 x i8> addrspace(1)*, <16 x i8> addrspace(1)* addrspace(4)* undef
1067 %val = load <16 x i8>, <16 x i8> addrspace(1)* %ptr
1071 define <2 x i8> @v2i8_func_void() #0 {
1072 ; CHECK-LABEL: name: v2i8_func_void
1073 ; CHECK: bb.1 (%ir-block.0):
1074 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
1075 ; CHECK-NEXT: {{ $}}
1076 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1077 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1078 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s8>) = G_LOAD [[DEF]](p1) :: (load (<2 x s8>) from `<2 x i8> addrspace(1)* undef`, addrspace 1)
1079 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD]](<2 x s8>)
1080 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
1081 ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
1082 ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
1083 ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT2]](s32)
1084 ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
1085 ; CHECK-NEXT: $vgpr1 = COPY [[ANYEXT3]](s32)
1086 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1087 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
1088 %val = load <2 x i8>, <2 x i8> addrspace(1)* undef
1092 define <3 x i8> @v3i8_func_void() #0 {
1093 ; CHECK-LABEL: name: v3i8_func_void
1094 ; CHECK: bb.1 (%ir-block.0):
1095 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
1096 ; CHECK-NEXT: {{ $}}
1097 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1098 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1099 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s8>) = G_LOAD [[DEF]](p1) :: (load (<3 x s8>) from `<3 x i8> addrspace(1)* undef`, align 4, addrspace 1)
1100 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD]](<3 x s8>)
1101 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
1102 ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
1103 ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[UV2]](s8)
1104 ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
1105 ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT3]](s32)
1106 ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
1107 ; CHECK-NEXT: $vgpr1 = COPY [[ANYEXT4]](s32)
1108 ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
1109 ; CHECK-NEXT: $vgpr2 = COPY [[ANYEXT5]](s32)
1110 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1111 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
1112 %val = load <3 x i8>, <3 x i8> addrspace(1)* undef
1116 define <4 x i8> @v4i8_func_void() #0 {
1117 ; CHECK-LABEL: name: v4i8_func_void
1118 ; CHECK: bb.1 (%ir-block.0):
1119 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
1120 ; CHECK-NEXT: {{ $}}
1121 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1122 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
1123 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<4 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
1124 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s8>) = G_LOAD [[LOAD]](p1) :: (load (<4 x s8>) from %ir.ptr, addrspace 1)
1125 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<4 x s8>)
1126 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
1127 ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
1128 ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[UV2]](s8)
1129 ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
1130 ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
1131 ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT4]](s32)
1132 ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
1133 ; CHECK-NEXT: $vgpr1 = COPY [[ANYEXT5]](s32)
1134 ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
1135 ; CHECK-NEXT: $vgpr2 = COPY [[ANYEXT6]](s32)
1136 ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT3]](s16)
1137 ; CHECK-NEXT: $vgpr3 = COPY [[ANYEXT7]](s32)
1138 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1139 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1140 %ptr = load volatile <4 x i8> addrspace(1)*, <4 x i8> addrspace(1)* addrspace(4)* undef
1141 %val = load <4 x i8>, <4 x i8> addrspace(1)* %ptr
1145 define {i8, i32} @struct_i8_i32_func_void() #0 {
1146 ; CHECK-LABEL: name: struct_i8_i32_func_void
1147 ; CHECK: bb.1 (%ir-block.0):
1148 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
1149 ; CHECK-NEXT: {{ $}}
1150 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1151 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1152 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
1153 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1154 ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
1155 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
1156 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
1157 ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
1158 ; CHECK-NEXT: $vgpr1 = COPY [[LOAD1]](s32)
1159 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1160 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
1161 %val = load { i8, i32 }, { i8, i32 } addrspace(1)* undef
1162 ret { i8, i32 } %val
1165 define void @void_func_sret_struct_i8_i32({ i8, i32 } addrspace(5)* sret({ i8, i32 }) %arg0) #0 {
1166 ; CHECK-LABEL: name: void_func_sret_struct_i8_i32
1167 ; CHECK: bb.1 (%ir-block.0):
1168 ; CHECK-NEXT: liveins: $vgpr0, $sgpr30_sgpr31
1169 ; CHECK-NEXT: {{ $}}
1170 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1171 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1172 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1173 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
1174 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (volatile load (s8) from `i8 addrspace(1)* undef`, addrspace 1)
1175 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p1) :: (volatile load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
1176 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1177 ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
1178 ; CHECK-NEXT: G_STORE [[LOAD]](s8), [[COPY]](p5) :: (store (s8) into %ir.gep01, addrspace 5)
1179 ; CHECK-NEXT: G_STORE [[LOAD1]](s32), [[PTR_ADD]](p5) :: (store (s32) into %ir.gep1, addrspace 5)
1180 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1181 ; CHECK-NEXT: S_SETPC_B64_return [[COPY3]]
1182 %val0 = load volatile i8, i8 addrspace(1)* undef
1183 %val1 = load volatile i32, i32 addrspace(1)* undef
1184 %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 0
1185 %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 1
1186 store i8 %val0, i8 addrspace(5)* %gep0
1187 store i32 %val1, i32 addrspace(5)* %gep1
1191 ; FIXME: Should be able to fold offsets in all of these pre-gfx9. Call
1192 ; lowering introduces an extra CopyToReg/CopyFromReg obscuring the
1193 ; AssertZext inserted. Not using it introduces the spills.
1195 define <33 x i32> @v33i32_func_void() #0 {
1196 ; CHECK-LABEL: name: v33i32_func_void
1197 ; CHECK: bb.1 (%ir-block.0):
1198 ; CHECK-NEXT: liveins: $vgpr0, $sgpr30_sgpr31
1199 ; CHECK-NEXT: {{ $}}
1200 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1201 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1202 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
1203 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<33 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
1204 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<33 x s32>) from %ir.ptr, align 256, addrspace 1)
1205 ; CHECK-NEXT: G_STORE [[LOAD1]](<33 x s32>), [[COPY]](p5) :: (store (<33 x s32>), align 256, addrspace 5)
1206 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1207 ; CHECK-NEXT: S_SETPC_B64_return [[COPY2]]
1208 %ptr = load volatile <33 x i32> addrspace(1)*, <33 x i32> addrspace(1)* addrspace(4)* undef
1209 %val = load <33 x i32>, <33 x i32> addrspace(1)* %ptr
1213 define <33 x i32> @v33i32_func_v33i32_i32(<33 x i32> addrspace(1)* %p, i32 %idx) #0 {
1214 ; CHECK-LABEL: name: v33i32_func_v33i32_i32
1215 ; CHECK: bb.1 (%ir-block.0):
1216 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
1217 ; CHECK-NEXT: {{ $}}
1218 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1219 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1220 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1221 ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
1222 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1223 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1224 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY3]](s32)
1225 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 256
1226 ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
1227 ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[MV]], [[MUL]](s64)
1228 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(p1) = COPY [[PTR_ADD]](p1)
1229 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[COPY5]](p1) :: (load (<33 x s32>) from %ir.gep, align 256, addrspace 1)
1230 ; CHECK-NEXT: G_STORE [[LOAD]](<33 x s32>), [[COPY]](p5) :: (store (<33 x s32>), align 256, addrspace 5)
1231 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
1232 ; CHECK-NEXT: S_SETPC_B64_return [[COPY6]]
1233 %gep = getelementptr inbounds <33 x i32>, <33 x i32> addrspace(1)* %p, i32 %idx
1234 %val = load <33 x i32>, <33 x i32> addrspace(1)* %gep
1238 define { <32 x i32>, i32 } @struct_v32i32_i32_func_void() #0 {
1239 ; CHECK-LABEL: name: struct_v32i32_i32_func_void
1240 ; CHECK: bb.1 (%ir-block.0):
1241 ; CHECK-NEXT: liveins: $vgpr0, $sgpr30_sgpr31
1242 ; CHECK-NEXT: {{ $}}
1243 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1244 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1245 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
1246 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `{ <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
1247 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<32 x s32>) from %ir.ptr, addrspace 1)
1248 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
1249 ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
1250 ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from %ir.ptr + 128, align 128, addrspace 1)
1251 ; CHECK-NEXT: G_STORE [[LOAD1]](<32 x s32>), [[COPY]](p5) :: (store (<32 x s32>), addrspace 5)
1252 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
1253 ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
1254 ; CHECK-NEXT: G_STORE [[LOAD2]](s32), [[PTR_ADD1]](p5) :: (store (s32), align 128, addrspace 5)
1255 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1256 ; CHECK-NEXT: S_SETPC_B64_return [[COPY2]]
1257 %ptr = load volatile { <32 x i32>, i32 } addrspace(1)*, { <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef
1258 %val = load { <32 x i32>, i32 }, { <32 x i32>, i32 } addrspace(1)* %ptr
1259 ret { <32 x i32>, i32 }%val
1262 define { i32, <32 x i32> } @struct_i32_v32i32_func_void() #0 {
1263 ; CHECK-LABEL: name: struct_i32_v32i32_func_void
1264 ; CHECK: bb.1 (%ir-block.0):
1265 ; CHECK-NEXT: liveins: $vgpr0, $sgpr30_sgpr31
1266 ; CHECK-NEXT: {{ $}}
1267 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1268 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1269 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
1270 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `{ i32, <32 x i32> } addrspace(1)* addrspace(4)* undef`, addrspace 4)
1271 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p1) :: (load (s32) from %ir.ptr, align 128, addrspace 1)
1272 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
1273 ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
1274 ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<32 x s32>) from %ir.ptr + 128, addrspace 1)
1275 ; CHECK-NEXT: G_STORE [[LOAD1]](s32), [[COPY]](p5) :: (store (s32), align 128, addrspace 5)
1276 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
1277 ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
1278 ; CHECK-NEXT: G_STORE [[LOAD2]](<32 x s32>), [[PTR_ADD1]](p5) :: (store (<32 x s32>), addrspace 5)
1279 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1280 ; CHECK-NEXT: S_SETPC_B64_return [[COPY2]]
1281 %ptr = load volatile { i32, <32 x i32> } addrspace(1)*, { i32, <32 x i32> } addrspace(1)* addrspace(4)* undef
1282 %val = load { i32, <32 x i32> }, { i32, <32 x i32> } addrspace(1)* %ptr
1283 ret { i32, <32 x i32> }%val
1286 ; Make sure the last struct component is returned in v3, not v4.
1287 define { <3 x i32>, i32 } @v3i32_struct_func_void_wasted_reg() #0 {
1288 ; CHECK-LABEL: name: v3i32_struct_func_void_wasted_reg
1289 ; CHECK: bb.1 (%ir-block.0):
1290 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
1291 ; CHECK-NEXT: {{ $}}
1292 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1293 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1294 ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
1295 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1296 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1297 ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1298 ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
1299 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `i32 addrspace(3)* undef`, addrspace 3)
1300 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `i32 addrspace(3)* undef`, addrspace 3)
1301 ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `i32 addrspace(3)* undef`, addrspace 3)
1302 ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `i32 addrspace(3)* undef`, addrspace 3)
1303 ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[DEF1]], [[LOAD]](s32), [[C]](s32)
1304 ; CHECK-NEXT: [[IVEC1:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC]], [[LOAD1]](s32), [[C1]](s32)
1305 ; CHECK-NEXT: [[IVEC2:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC1]], [[LOAD2]](s32), [[C2]](s32)
1306 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[IVEC2]](<3 x s32>)
1307 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
1308 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
1309 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
1310 ; CHECK-NEXT: $vgpr3 = COPY [[LOAD3]](s32)
1311 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1312 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1313 %load0 = load volatile i32, i32 addrspace(3)* undef
1314 %load1 = load volatile i32, i32 addrspace(3)* undef
1315 %load2 = load volatile i32, i32 addrspace(3)* undef
1316 %load3 = load volatile i32, i32 addrspace(3)* undef
1318 %insert.0 = insertelement <3 x i32> undef, i32 %load0, i32 0
1319 %insert.1 = insertelement <3 x i32> %insert.0, i32 %load1, i32 1
1320 %insert.2 = insertelement <3 x i32> %insert.1, i32 %load2, i32 2
1321 %insert.3 = insertvalue { <3 x i32>, i32 } undef, <3 x i32> %insert.2, 0
1322 %insert.4 = insertvalue { <3 x i32>, i32 } %insert.3, i32 %load3, 1
1323 ret { <3 x i32>, i32 } %insert.4
1326 define { <3 x float>, i32 } @v3f32_struct_func_void_wasted_reg() #0 {
1327 ; CHECK-LABEL: name: v3f32_struct_func_void_wasted_reg
1328 ; CHECK: bb.1 (%ir-block.0):
1329 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
1330 ; CHECK-NEXT: {{ $}}
1331 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1332 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1333 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY [[DEF]](p3)
1334 ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
1335 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1336 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1337 ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1338 ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
1339 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `float addrspace(3)* undef`, addrspace 3)
1340 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `float addrspace(3)* undef`, addrspace 3)
1341 ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `float addrspace(3)* undef`, addrspace 3)
1342 ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[COPY1]](p3) :: (volatile load (s32) from `i32 addrspace(3)* undef`, addrspace 3)
1343 ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[DEF1]], [[LOAD]](s32), [[C]](s32)
1344 ; CHECK-NEXT: [[IVEC1:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC]], [[LOAD1]](s32), [[C1]](s32)
1345 ; CHECK-NEXT: [[IVEC2:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC1]], [[LOAD2]](s32), [[C2]](s32)
1346 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[IVEC2]](<3 x s32>)
1347 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
1348 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
1349 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
1350 ; CHECK-NEXT: $vgpr3 = COPY [[LOAD3]](s32)
1351 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1352 ; CHECK-NEXT: S_SETPC_B64_return [[COPY2]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1353 %load0 = load volatile float, float addrspace(3)* undef
1354 %load1 = load volatile float, float addrspace(3)* undef
1355 %load2 = load volatile float, float addrspace(3)* undef
1356 %load3 = load volatile i32, i32 addrspace(3)* undef
1358 %insert.0 = insertelement <3 x float> undef, float %load0, i32 0
1359 %insert.1 = insertelement <3 x float> %insert.0, float %load1, i32 1
1360 %insert.2 = insertelement <3 x float> %insert.1, float %load2, i32 2
1361 %insert.3 = insertvalue { <3 x float>, i32 } undef, <3 x float> %insert.2, 0
1362 %insert.4 = insertvalue { <3 x float>, i32 } %insert.3, i32 %load3, 1
1363 ret { <3 x float>, i32 } %insert.4
1366 define void @void_func_sret_max_known_zero_bits(i8 addrspace(5)* sret(i8) %arg0) #0 {
1367 ; CHECK-LABEL: name: void_func_sret_max_known_zero_bits
1368 ; CHECK: bb.1 (%ir-block.0):
1369 ; CHECK-NEXT: liveins: $vgpr0, $sgpr30_sgpr31
1370 ; CHECK-NEXT: {{ $}}
1371 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1372 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1373 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
1374 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 17
1375 ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 18
1376 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1377 ; CHECK-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p5)
1378 ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C]](s32)
1379 ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C1]](s32)
1380 ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C2]](s32)
1381 ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[DEF]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
1382 ; CHECK-NEXT: G_STORE [[LSHR1]](s32), [[DEF]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
1383 ; CHECK-NEXT: G_STORE [[LSHR2]](s32), [[DEF]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
1384 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1385 ; CHECK-NEXT: S_SETPC_B64_return [[COPY2]]
1386 %arg0.int = ptrtoint i8 addrspace(5)* %arg0 to i32
1388 %lshr0 = lshr i32 %arg0.int, 16
1389 %lshr1 = lshr i32 %arg0.int, 17
1390 %lshr2 = lshr i32 %arg0.int, 18
1392 store volatile i32 %lshr0, i32 addrspace(3)* undef
1393 store volatile i32 %lshr1, i32 addrspace(3)* undef
1394 store volatile i32 %lshr2, i32 addrspace(3)* undef
1398 define i1022 @i1022_func_void() #0 {
1399 ; CHECK-LABEL: name: i1022_func_void
1400 ; CHECK: bb.1 (%ir-block.0):
1401 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
1402 ; CHECK-NEXT: {{ $}}
1403 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1404 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1405 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s1022) = G_LOAD [[DEF]](p1) :: (load (s1022) from `i1022 addrspace(1)* undef`, align 8, addrspace 1)
1406 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s1024) = G_ANYEXT [[LOAD]](s1022)
1407 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s1024)
1408 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
1409 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
1410 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
1411 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
1412 ; CHECK-NEXT: $vgpr4 = COPY [[UV4]](s32)
1413 ; CHECK-NEXT: $vgpr5 = COPY [[UV5]](s32)
1414 ; CHECK-NEXT: $vgpr6 = COPY [[UV6]](s32)
1415 ; CHECK-NEXT: $vgpr7 = COPY [[UV7]](s32)
1416 ; CHECK-NEXT: $vgpr8 = COPY [[UV8]](s32)
1417 ; CHECK-NEXT: $vgpr9 = COPY [[UV9]](s32)
1418 ; CHECK-NEXT: $vgpr10 = COPY [[UV10]](s32)
1419 ; CHECK-NEXT: $vgpr11 = COPY [[UV11]](s32)
1420 ; CHECK-NEXT: $vgpr12 = COPY [[UV12]](s32)
1421 ; CHECK-NEXT: $vgpr13 = COPY [[UV13]](s32)
1422 ; CHECK-NEXT: $vgpr14 = COPY [[UV14]](s32)
1423 ; CHECK-NEXT: $vgpr15 = COPY [[UV15]](s32)
1424 ; CHECK-NEXT: $vgpr16 = COPY [[UV16]](s32)
1425 ; CHECK-NEXT: $vgpr17 = COPY [[UV17]](s32)
1426 ; CHECK-NEXT: $vgpr18 = COPY [[UV18]](s32)
1427 ; CHECK-NEXT: $vgpr19 = COPY [[UV19]](s32)
1428 ; CHECK-NEXT: $vgpr20 = COPY [[UV20]](s32)
1429 ; CHECK-NEXT: $vgpr21 = COPY [[UV21]](s32)
1430 ; CHECK-NEXT: $vgpr22 = COPY [[UV22]](s32)
1431 ; CHECK-NEXT: $vgpr23 = COPY [[UV23]](s32)
1432 ; CHECK-NEXT: $vgpr24 = COPY [[UV24]](s32)
1433 ; CHECK-NEXT: $vgpr25 = COPY [[UV25]](s32)
1434 ; CHECK-NEXT: $vgpr26 = COPY [[UV26]](s32)
1435 ; CHECK-NEXT: $vgpr27 = COPY [[UV27]](s32)
1436 ; CHECK-NEXT: $vgpr28 = COPY [[UV28]](s32)
1437 ; CHECK-NEXT: $vgpr29 = COPY [[UV29]](s32)
1438 ; CHECK-NEXT: $vgpr30 = COPY [[UV30]](s32)
1439 ; CHECK-NEXT: $vgpr31 = COPY [[UV31]](s32)
1440 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1441 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
1442 %val = load i1022, i1022 addrspace(1)* undef
1446 define signext i1022 @i1022_signext_func_void() #0 {
1447 ; CHECK-LABEL: name: i1022_signext_func_void
1448 ; CHECK: bb.1 (%ir-block.0):
1449 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
1450 ; CHECK-NEXT: {{ $}}
1451 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1452 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1453 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s1022) = G_LOAD [[DEF]](p1) :: (load (s1022) from `i1022 addrspace(1)* undef`, align 8, addrspace 1)
1454 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s1024) = G_SEXT [[LOAD]](s1022)
1455 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s1024)
1456 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
1457 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
1458 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
1459 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
1460 ; CHECK-NEXT: $vgpr4 = COPY [[UV4]](s32)
1461 ; CHECK-NEXT: $vgpr5 = COPY [[UV5]](s32)
1462 ; CHECK-NEXT: $vgpr6 = COPY [[UV6]](s32)
1463 ; CHECK-NEXT: $vgpr7 = COPY [[UV7]](s32)
1464 ; CHECK-NEXT: $vgpr8 = COPY [[UV8]](s32)
1465 ; CHECK-NEXT: $vgpr9 = COPY [[UV9]](s32)
1466 ; CHECK-NEXT: $vgpr10 = COPY [[UV10]](s32)
1467 ; CHECK-NEXT: $vgpr11 = COPY [[UV11]](s32)
1468 ; CHECK-NEXT: $vgpr12 = COPY [[UV12]](s32)
1469 ; CHECK-NEXT: $vgpr13 = COPY [[UV13]](s32)
1470 ; CHECK-NEXT: $vgpr14 = COPY [[UV14]](s32)
1471 ; CHECK-NEXT: $vgpr15 = COPY [[UV15]](s32)
1472 ; CHECK-NEXT: $vgpr16 = COPY [[UV16]](s32)
1473 ; CHECK-NEXT: $vgpr17 = COPY [[UV17]](s32)
1474 ; CHECK-NEXT: $vgpr18 = COPY [[UV18]](s32)
1475 ; CHECK-NEXT: $vgpr19 = COPY [[UV19]](s32)
1476 ; CHECK-NEXT: $vgpr20 = COPY [[UV20]](s32)
1477 ; CHECK-NEXT: $vgpr21 = COPY [[UV21]](s32)
1478 ; CHECK-NEXT: $vgpr22 = COPY [[UV22]](s32)
1479 ; CHECK-NEXT: $vgpr23 = COPY [[UV23]](s32)
1480 ; CHECK-NEXT: $vgpr24 = COPY [[UV24]](s32)
1481 ; CHECK-NEXT: $vgpr25 = COPY [[UV25]](s32)
1482 ; CHECK-NEXT: $vgpr26 = COPY [[UV26]](s32)
1483 ; CHECK-NEXT: $vgpr27 = COPY [[UV27]](s32)
1484 ; CHECK-NEXT: $vgpr28 = COPY [[UV28]](s32)
1485 ; CHECK-NEXT: $vgpr29 = COPY [[UV29]](s32)
1486 ; CHECK-NEXT: $vgpr30 = COPY [[UV30]](s32)
1487 ; CHECK-NEXT: $vgpr31 = COPY [[UV31]](s32)
1488 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1489 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
1490 %val = load i1022, i1022 addrspace(1)* undef
1494 define zeroext i1022 @i1022_zeroext_func_void() #0 {
1495 ; CHECK-LABEL: name: i1022_zeroext_func_void
1496 ; CHECK: bb.1 (%ir-block.0):
1497 ; CHECK-NEXT: liveins: $sgpr30_sgpr31
1498 ; CHECK-NEXT: {{ $}}
1499 ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1500 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1501 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s1022) = G_LOAD [[DEF]](p1) :: (load (s1022) from `i1022 addrspace(1)* undef`, align 8, addrspace 1)
1502 ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s1024) = G_ZEXT [[LOAD]](s1022)
1503 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s1024)
1504 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
1505 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
1506 ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
1507 ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32)
1508 ; CHECK-NEXT: $vgpr4 = COPY [[UV4]](s32)
1509 ; CHECK-NEXT: $vgpr5 = COPY [[UV5]](s32)
1510 ; CHECK-NEXT: $vgpr6 = COPY [[UV6]](s32)
1511 ; CHECK-NEXT: $vgpr7 = COPY [[UV7]](s32)
1512 ; CHECK-NEXT: $vgpr8 = COPY [[UV8]](s32)
1513 ; CHECK-NEXT: $vgpr9 = COPY [[UV9]](s32)
1514 ; CHECK-NEXT: $vgpr10 = COPY [[UV10]](s32)
1515 ; CHECK-NEXT: $vgpr11 = COPY [[UV11]](s32)
1516 ; CHECK-NEXT: $vgpr12 = COPY [[UV12]](s32)
1517 ; CHECK-NEXT: $vgpr13 = COPY [[UV13]](s32)
1518 ; CHECK-NEXT: $vgpr14 = COPY [[UV14]](s32)
1519 ; CHECK-NEXT: $vgpr15 = COPY [[UV15]](s32)
1520 ; CHECK-NEXT: $vgpr16 = COPY [[UV16]](s32)
1521 ; CHECK-NEXT: $vgpr17 = COPY [[UV17]](s32)
1522 ; CHECK-NEXT: $vgpr18 = COPY [[UV18]](s32)
1523 ; CHECK-NEXT: $vgpr19 = COPY [[UV19]](s32)
1524 ; CHECK-NEXT: $vgpr20 = COPY [[UV20]](s32)
1525 ; CHECK-NEXT: $vgpr21 = COPY [[UV21]](s32)
1526 ; CHECK-NEXT: $vgpr22 = COPY [[UV22]](s32)
1527 ; CHECK-NEXT: $vgpr23 = COPY [[UV23]](s32)
1528 ; CHECK-NEXT: $vgpr24 = COPY [[UV24]](s32)
1529 ; CHECK-NEXT: $vgpr25 = COPY [[UV25]](s32)
1530 ; CHECK-NEXT: $vgpr26 = COPY [[UV26]](s32)
1531 ; CHECK-NEXT: $vgpr27 = COPY [[UV27]](s32)
1532 ; CHECK-NEXT: $vgpr28 = COPY [[UV28]](s32)
1533 ; CHECK-NEXT: $vgpr29 = COPY [[UV29]](s32)
1534 ; CHECK-NEXT: $vgpr30 = COPY [[UV30]](s32)
1535 ; CHECK-NEXT: $vgpr31 = COPY [[UV31]](s32)
1536 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
1537 ; CHECK-NEXT: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
1538 %val = load i1022, i1022 addrspace(1)* undef
1542 %struct.with.ptrs = type { <32 x i32>, i32 addrspace(3)*, i32 addrspace(1)*, <2 x i8 addrspace(1)*> }
1544 define %struct.with.ptrs @ptr_in_struct_func_void() #0 {
1545 ; CHECK-LABEL: name: ptr_in_struct_func_void
1546 ; CHECK: bb.1 (%ir-block.0):
1547 ; CHECK-NEXT: liveins: $vgpr0, $sgpr30_sgpr31
1548 ; CHECK-NEXT: {{ $}}
1549 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1550 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1551 ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1552 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[DEF]](p1) :: (volatile load (<32 x s32>) from `%struct.with.ptrs addrspace(1)* undef`, addrspace 1)
1553 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
1554 ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
1555 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p1) :: (volatile load (p3) from `%struct.with.ptrs addrspace(1)* undef` + 128, align 128, addrspace 1)
1556 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 136
1557 ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C1]](s64)
1558 ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(p1) = G_LOAD [[PTR_ADD1]](p1) :: (volatile load (p1) from `%struct.with.ptrs addrspace(1)* undef` + 136, addrspace 1)
1559 ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 144
1560 ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
1561 ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(<2 x p1>) = G_LOAD [[PTR_ADD2]](p1) :: (volatile load (<2 x p1>) from `%struct.with.ptrs addrspace(1)* undef` + 144, addrspace 1)
1562 ; CHECK-NEXT: G_STORE [[LOAD]](<32 x s32>), [[COPY]](p5) :: (store (<32 x s32>), addrspace 5)
1563 ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
1564 ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
1565 ; CHECK-NEXT: G_STORE [[LOAD1]](p3), [[PTR_ADD3]](p5) :: (store (p3), align 128, addrspace 5)
1566 ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 136
1567 ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
1568 ; CHECK-NEXT: G_STORE [[LOAD2]](p1), [[PTR_ADD4]](p5) :: (store (p1), addrspace 5)
1569 ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 144
1570 ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
1571 ; CHECK-NEXT: G_STORE [[LOAD3]](<2 x p1>), [[PTR_ADD5]](p5) :: (store (<2 x p1>), addrspace 5)
1572 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1573 ; CHECK-NEXT: S_SETPC_B64_return [[COPY2]]
1574 %val = load volatile %struct.with.ptrs, %struct.with.ptrs addrspace(1)* undef
1575 ret %struct.with.ptrs %val
1578 attributes #0 = { nounwind }