1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
3 # RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
6 define <vscale x 1 x i8> @vload_nxv1i8(ptr %pa) #0 {
7 %va = load <vscale x 1 x i8>, ptr %pa, align 1
8 ret <vscale x 1 x i8> %va
11 define <vscale x 2 x i8> @vload_nxv2i8(ptr %pa) #0 {
12 %va = load <vscale x 2 x i8>, ptr %pa, align 2
13 ret <vscale x 2 x i8> %va
16 define <vscale x 4 x i8> @vload_nxv4i8(ptr %pa) #0 {
17 %va = load <vscale x 4 x i8>, ptr %pa, align 4
18 ret <vscale x 4 x i8> %va
21 define <vscale x 8 x i8> @vload_nxv8i8(ptr %pa) #0 {
22 %va = load <vscale x 8 x i8>, ptr %pa, align 8
23 ret <vscale x 8 x i8> %va
26 define <vscale x 16 x i8> @vload_nxv16i8(ptr %pa) #0 {
27 %va = load <vscale x 16 x i8>, ptr %pa, align 16
28 ret <vscale x 16 x i8> %va
31 define <vscale x 32 x i8> @vload_nxv32i8(ptr %pa) #0 {
32 %va = load <vscale x 32 x i8>, ptr %pa, align 32
33 ret <vscale x 32 x i8> %va
36 define <vscale x 64 x i8> @vload_nxv64i8(ptr %pa) #0 {
37 %va = load <vscale x 64 x i8>, ptr %pa, align 64
38 ret <vscale x 64 x i8> %va
41 define <vscale x 1 x i16> @vload_nxv1i16(ptr %pa) #0 {
42 %va = load <vscale x 1 x i16>, ptr %pa, align 2
43 ret <vscale x 1 x i16> %va
46 define <vscale x 2 x i16> @vload_nxv2i16(ptr %pa) #0 {
47 %va = load <vscale x 2 x i16>, ptr %pa, align 4
48 ret <vscale x 2 x i16> %va
51 define <vscale x 4 x i16> @vload_nxv4i16(ptr %pa) #0 {
52 %va = load <vscale x 4 x i16>, ptr %pa, align 8
53 ret <vscale x 4 x i16> %va
56 define <vscale x 8 x i16> @vload_nxv8i16(ptr %pa) #0 {
57 %va = load <vscale x 8 x i16>, ptr %pa, align 16
58 ret <vscale x 8 x i16> %va
61 define <vscale x 16 x i16> @vload_nxv16i16(ptr %pa) #0 {
62 %va = load <vscale x 16 x i16>, ptr %pa, align 32
63 ret <vscale x 16 x i16> %va
66 define <vscale x 32 x i16> @vload_nxv32i16(ptr %pa) #0 {
67 %va = load <vscale x 32 x i16>, ptr %pa, align 64
68 ret <vscale x 32 x i16> %va
71 define <vscale x 1 x i32> @vload_nxv1i32(ptr %pa) #0 {
72 %va = load <vscale x 1 x i32>, ptr %pa, align 4
73 ret <vscale x 1 x i32> %va
76 define <vscale x 2 x i32> @vload_nxv2i32(ptr %pa) #0 {
77 %va = load <vscale x 2 x i32>, ptr %pa, align 8
78 ret <vscale x 2 x i32> %va
81 define <vscale x 4 x i32> @vload_nxv4i32(ptr %pa) #0 {
82 %va = load <vscale x 4 x i32>, ptr %pa, align 16
83 ret <vscale x 4 x i32> %va
86 define <vscale x 8 x i32> @vload_nxv8i32(ptr %pa) #0 {
87 %va = load <vscale x 8 x i32>, ptr %pa, align 32
88 ret <vscale x 8 x i32> %va
91 define <vscale x 16 x i32> @vload_nxv16i32(ptr %pa) #0 {
92 %va = load <vscale x 16 x i32>, ptr %pa, align 64
93 ret <vscale x 16 x i32> %va
96 define <vscale x 1 x i64> @vload_nxv1i64(ptr %pa) #0 {
97 %va = load <vscale x 1 x i64>, ptr %pa, align 8
98 ret <vscale x 1 x i64> %va
101 define <vscale x 2 x i64> @vload_nxv2i64(ptr %pa) #0 {
102 %va = load <vscale x 2 x i64>, ptr %pa, align 16
103 ret <vscale x 2 x i64> %va
106 define <vscale x 4 x i64> @vload_nxv4i64(ptr %pa) #0 {
107 %va = load <vscale x 4 x i64>, ptr %pa, align 32
108 ret <vscale x 4 x i64> %va
111 define <vscale x 8 x i64> @vload_nxv8i64(ptr %pa) #0 {
112 %va = load <vscale x 8 x i64>, ptr %pa, align 64
113 ret <vscale x 8 x i64> %va
116 define <vscale x 16 x i8> @vload_nxv16i8_align1(ptr %pa) #0 {
117 %va = load <vscale x 16 x i8>, ptr %pa, align 1
118 ret <vscale x 16 x i8> %va
121 define <vscale x 16 x i8> @vload_nxv16i8_align2(ptr %pa) #0 {
122 %va = load <vscale x 16 x i8>, ptr %pa, align 2
123 ret <vscale x 16 x i8> %va
126 define <vscale x 16 x i8> @vload_nxv16i8_align16(ptr %pa) #0 {
127 %va = load <vscale x 16 x i8>, ptr %pa, align 16
128 ret <vscale x 16 x i8> %va
131 define <vscale x 16 x i8> @vload_nxv16i8_align64(ptr %pa) #0 {
132 %va = load <vscale x 16 x i8>, ptr %pa, align 64
133 ret <vscale x 16 x i8> %va
136 define <vscale x 4 x i16> @vload_nxv4i16_align1(ptr %pa) #0 {
137 %va = load <vscale x 4 x i16>, ptr %pa, align 1
138 ret <vscale x 4 x i16> %va
141 define <vscale x 4 x i16> @vload_nxv4i16_align2(ptr %pa) #0 {
142 %va = load <vscale x 4 x i16>, ptr %pa, align 2
143 ret <vscale x 4 x i16> %va
146 define <vscale x 4 x i16> @vload_nxv4i16_align4(ptr %pa) #0 {
147 %va = load <vscale x 4 x i16>, ptr %pa, align 4
148 ret <vscale x 4 x i16> %va
151 define <vscale x 4 x i16> @vload_nxv4i16_align8(ptr %pa) #0 {
152 %va = load <vscale x 4 x i16>, ptr %pa, align 8
153 ret <vscale x 4 x i16> %va
156 define <vscale x 4 x i16> @vload_nxv4i16_align16(ptr %pa) #0 {
157 %va = load <vscale x 4 x i16>, ptr %pa, align 16
158 ret <vscale x 4 x i16> %va
161 define <vscale x 2 x i32> @vload_nxv2i32_align2(ptr %pa) #0 {
162 %va = load <vscale x 2 x i32>, ptr %pa, align 2
163 ret <vscale x 2 x i32> %va
166 define <vscale x 2 x i32> @vload_nxv2i32_align4(ptr %pa) #0 {
167 %va = load <vscale x 2 x i32>, ptr %pa, align 4
168 ret <vscale x 2 x i32> %va
171 define <vscale x 2 x i32> @vload_nxv2i32_align8(ptr %pa) #0 {
172 %va = load <vscale x 2 x i32>, ptr %pa, align 8
173 ret <vscale x 2 x i32> %va
176 define <vscale x 2 x i32> @vload_nxv2i32_align16(ptr %pa) #0 {
177 %va = load <vscale x 2 x i32>, ptr %pa, align 16
178 ret <vscale x 2 x i32> %va
181 define <vscale x 2 x i32> @vload_nxv2i32_align256(ptr %pa) #0 {
182 %va = load <vscale x 2 x i32>, ptr %pa, align 256
183 ret <vscale x 2 x i32> %va
186 define <vscale x 2 x i64> @vload_nxv2i64_align4(ptr %pa) #0 {
187 %va = load <vscale x 2 x i64>, ptr %pa, align 4
188 ret <vscale x 2 x i64> %va
191 define <vscale x 2 x i64> @vload_nxv2i64_align8(ptr %pa) #0 {
192 %va = load <vscale x 2 x i64>, ptr %pa, align 8
193 ret <vscale x 2 x i64> %va
196 define <vscale x 2 x i64> @vload_nxv2i64_align16(ptr %pa) #0 {
197 %va = load <vscale x 2 x i64>, ptr %pa, align 16
198 ret <vscale x 2 x i64> %va
201 define <vscale x 2 x i64> @vload_nxv2i64_align32(ptr %pa) #0 {
202 %va = load <vscale x 2 x i64>, ptr %pa, align 32
203 ret <vscale x 2 x i64> %va
206 attributes #0 = { "target-features"="+v" }
215 ; CHECK-LABEL: name: vload_nxv1i8
216 ; CHECK: liveins: $x10
218 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
219 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
220 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
221 ; CHECK-NEXT: PseudoRET implicit $v8
223 %1:_(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
224 $v8 = COPY %1(<vscale x 1 x s8>)
225 PseudoRET implicit $v8
234 ; CHECK-LABEL: name: vload_nxv2i8
235 ; CHECK: liveins: $x10
237 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
238 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
239 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
240 ; CHECK-NEXT: PseudoRET implicit $v8
242 %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
243 $v8 = COPY %1(<vscale x 2 x s8>)
244 PseudoRET implicit $v8
253 ; CHECK-LABEL: name: vload_nxv4i8
254 ; CHECK: liveins: $x10
256 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
257 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
258 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
259 ; CHECK-NEXT: PseudoRET implicit $v8
261 %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
262 $v8 = COPY %1(<vscale x 4 x s8>)
263 PseudoRET implicit $v8
272 ; CHECK-LABEL: name: vload_nxv8i8
273 ; CHECK: liveins: $x10
275 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
276 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
277 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
278 ; CHECK-NEXT: PseudoRET implicit $v8
280 %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
281 $v8 = COPY %1(<vscale x 8 x s8>)
282 PseudoRET implicit $v8
291 ; CHECK-LABEL: name: vload_nxv16i8
292 ; CHECK: liveins: $x10
294 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
295 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
296 ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
297 ; CHECK-NEXT: PseudoRET implicit $v8m2
299 %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
300 $v8m2 = COPY %1(<vscale x 16 x s8>)
301 PseudoRET implicit $v8m2
310 ; CHECK-LABEL: name: vload_nxv32i8
311 ; CHECK: liveins: $x10
313 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
314 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
315 ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
316 ; CHECK-NEXT: PseudoRET implicit $v8m4
318 %1:_(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
319 $v8m4 = COPY %1(<vscale x 32 x s8>)
320 PseudoRET implicit $v8m4
329 ; CHECK-LABEL: name: vload_nxv64i8
330 ; CHECK: liveins: $x10
332 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
333 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
334 ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
335 ; CHECK-NEXT: PseudoRET implicit $v8m8
337 %1:_(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
338 $v8m8 = COPY %1(<vscale x 64 x s8>)
339 PseudoRET implicit $v8m8
348 ; CHECK-LABEL: name: vload_nxv1i16
349 ; CHECK: liveins: $x10
351 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
352 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
353 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
354 ; CHECK-NEXT: PseudoRET implicit $v8
356 %1:_(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
357 $v8 = COPY %1(<vscale x 1 x s16>)
358 PseudoRET implicit $v8
367 ; CHECK-LABEL: name: vload_nxv2i16
368 ; CHECK: liveins: $x10
370 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
371 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
372 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
373 ; CHECK-NEXT: PseudoRET implicit $v8
375 %1:_(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
376 $v8 = COPY %1(<vscale x 2 x s16>)
377 PseudoRET implicit $v8
386 ; CHECK-LABEL: name: vload_nxv4i16
387 ; CHECK: liveins: $x10
389 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
390 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
391 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
392 ; CHECK-NEXT: PseudoRET implicit $v8
394 %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
395 $v8 = COPY %1(<vscale x 4 x s16>)
396 PseudoRET implicit $v8
405 ; CHECK-LABEL: name: vload_nxv8i16
406 ; CHECK: liveins: $x10
408 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
409 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
410 ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
411 ; CHECK-NEXT: PseudoRET implicit $v8m2
413 %1:_(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
414 $v8m2 = COPY %1(<vscale x 8 x s16>)
415 PseudoRET implicit $v8m2
424 ; CHECK-LABEL: name: vload_nxv16i16
425 ; CHECK: liveins: $x10
427 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
428 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
429 ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
430 ; CHECK-NEXT: PseudoRET implicit $v8m4
432 %1:_(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
433 $v8m4 = COPY %1(<vscale x 16 x s16>)
434 PseudoRET implicit $v8m4
443 ; CHECK-LABEL: name: vload_nxv32i16
444 ; CHECK: liveins: $x10
446 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
447 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
448 ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
449 ; CHECK-NEXT: PseudoRET implicit $v8m8
451 %1:_(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
452 $v8m8 = COPY %1(<vscale x 32 x s16>)
453 PseudoRET implicit $v8m8
462 ; CHECK-LABEL: name: vload_nxv1i32
463 ; CHECK: liveins: $x10
465 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
466 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
467 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
468 ; CHECK-NEXT: PseudoRET implicit $v8
470 %1:_(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
471 $v8 = COPY %1(<vscale x 1 x s32>)
472 PseudoRET implicit $v8
481 ; CHECK-LABEL: name: vload_nxv2i32
482 ; CHECK: liveins: $x10
484 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
485 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
486 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
487 ; CHECK-NEXT: PseudoRET implicit $v8
489 %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
490 $v8 = COPY %1(<vscale x 2 x s32>)
491 PseudoRET implicit $v8
500 ; CHECK-LABEL: name: vload_nxv4i32
501 ; CHECK: liveins: $x10
503 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
504 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
505 ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
506 ; CHECK-NEXT: PseudoRET implicit $v8m2
508 %1:_(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
509 $v8m2 = COPY %1(<vscale x 4 x s32>)
510 PseudoRET implicit $v8m2
519 ; CHECK-LABEL: name: vload_nxv8i32
520 ; CHECK: liveins: $x10
522 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
523 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
524 ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
525 ; CHECK-NEXT: PseudoRET implicit $v8m4
527 %1:_(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
528 $v8m4 = COPY %1(<vscale x 8 x s32>)
529 PseudoRET implicit $v8m4
538 ; CHECK-LABEL: name: vload_nxv16i32
539 ; CHECK: liveins: $x10
541 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
542 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
543 ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
544 ; CHECK-NEXT: PseudoRET implicit $v8m8
546 %1:_(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
547 $v8m8 = COPY %1(<vscale x 16 x s32>)
548 PseudoRET implicit $v8m8
557 ; CHECK-LABEL: name: vload_nxv1i64
558 ; CHECK: liveins: $x10
560 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
561 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
562 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
563 ; CHECK-NEXT: PseudoRET implicit $v8
565 %1:_(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
566 $v8 = COPY %1(<vscale x 1 x s64>)
567 PseudoRET implicit $v8
576 ; CHECK-LABEL: name: vload_nxv2i64
577 ; CHECK: liveins: $x10
579 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
580 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
581 ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
582 ; CHECK-NEXT: PseudoRET implicit $v8m2
584 %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
585 $v8m2 = COPY %1(<vscale x 2 x s64>)
586 PseudoRET implicit $v8m2
595 ; CHECK-LABEL: name: vload_nxv4i64
596 ; CHECK: liveins: $x10
598 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
599 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
600 ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
601 ; CHECK-NEXT: PseudoRET implicit $v8m4
603 %1:_(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
604 $v8m4 = COPY %1(<vscale x 4 x s64>)
605 PseudoRET implicit $v8m4
614 ; CHECK-LABEL: name: vload_nxv8i64
615 ; CHECK: liveins: $x10
617 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
618 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
619 ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
620 ; CHECK-NEXT: PseudoRET implicit $v8m8
622 %1:_(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
623 $v8m8 = COPY %1(<vscale x 8 x s64>)
624 PseudoRET implicit $v8m8
628 name: vload_nxv16i8_align1
633 ; CHECK-LABEL: name: vload_nxv16i8_align1
634 ; CHECK: liveins: $x10
636 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
637 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
638 ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
639 ; CHECK-NEXT: PseudoRET implicit $v8m2
641 %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
642 $v8m2 = COPY %1(<vscale x 16 x s8>)
643 PseudoRET implicit $v8m2
647 name: vload_nxv16i8_align2
652 ; CHECK-LABEL: name: vload_nxv16i8_align2
653 ; CHECK: liveins: $x10
655 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
656 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
657 ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
658 ; CHECK-NEXT: PseudoRET implicit $v8m2
660 %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
661 $v8m2 = COPY %1(<vscale x 16 x s8>)
662 PseudoRET implicit $v8m2
666 name: vload_nxv16i8_align16
671 ; CHECK-LABEL: name: vload_nxv16i8_align16
672 ; CHECK: liveins: $x10
674 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
675 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
676 ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
677 ; CHECK-NEXT: PseudoRET implicit $v8m2
679 %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
680 $v8m2 = COPY %1(<vscale x 16 x s8>)
681 PseudoRET implicit $v8m2
685 name: vload_nxv16i8_align64
690 ; CHECK-LABEL: name: vload_nxv16i8_align64
691 ; CHECK: liveins: $x10
693 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
694 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
695 ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
696 ; CHECK-NEXT: PseudoRET implicit $v8m2
698 %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
699 $v8m2 = COPY %1(<vscale x 16 x s8>)
700 PseudoRET implicit $v8m2
704 name: vload_nxv4i16_align1
709 ; CHECK-LABEL: name: vload_nxv4i16_align1
710 ; CHECK: liveins: $x10
712 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
713 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
714 ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
715 ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
716 ; CHECK-NEXT: PseudoRET implicit $v8
718 %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 1)
719 $v8 = COPY %1(<vscale x 4 x s16>)
720 PseudoRET implicit $v8
724 name: vload_nxv4i16_align2
729 ; CHECK-LABEL: name: vload_nxv4i16_align2
730 ; CHECK: liveins: $x10
732 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
733 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
734 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
735 ; CHECK-NEXT: PseudoRET implicit $v8
737 %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
738 $v8 = COPY %1(<vscale x 4 x s16>)
739 PseudoRET implicit $v8
743 name: vload_nxv4i16_align4
748 ; CHECK-LABEL: name: vload_nxv4i16_align4
749 ; CHECK: liveins: $x10
751 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
752 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
753 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
754 ; CHECK-NEXT: PseudoRET implicit $v8
756 %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
757 $v8 = COPY %1(<vscale x 4 x s16>)
758 PseudoRET implicit $v8
762 name: vload_nxv4i16_align8
767 ; CHECK-LABEL: name: vload_nxv4i16_align8
768 ; CHECK: liveins: $x10
770 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
771 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
772 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
773 ; CHECK-NEXT: PseudoRET implicit $v8
775 %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
776 $v8 = COPY %1(<vscale x 4 x s16>)
777 PseudoRET implicit $v8
781 name: vload_nxv4i16_align16
786 ; CHECK-LABEL: name: vload_nxv4i16_align16
787 ; CHECK: liveins: $x10
789 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
790 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
791 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
792 ; CHECK-NEXT: PseudoRET implicit $v8
794 %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
795 $v8 = COPY %1(<vscale x 4 x s16>)
796 PseudoRET implicit $v8
800 name: vload_nxv2i32_align2
805 ; CHECK-LABEL: name: vload_nxv2i32_align2
806 ; CHECK: liveins: $x10
808 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
809 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
810 ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
811 ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
812 ; CHECK-NEXT: PseudoRET implicit $v8
814 %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 2)
815 $v8 = COPY %1(<vscale x 2 x s32>)
816 PseudoRET implicit $v8
820 name: vload_nxv2i32_align4
825 ; CHECK-LABEL: name: vload_nxv2i32_align4
826 ; CHECK: liveins: $x10
828 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
829 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
830 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
831 ; CHECK-NEXT: PseudoRET implicit $v8
833 %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
834 $v8 = COPY %1(<vscale x 2 x s32>)
835 PseudoRET implicit $v8
839 name: vload_nxv2i32_align8
844 ; CHECK-LABEL: name: vload_nxv2i32_align8
845 ; CHECK: liveins: $x10
847 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
848 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
849 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
850 ; CHECK-NEXT: PseudoRET implicit $v8
852 %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
853 $v8 = COPY %1(<vscale x 2 x s32>)
854 PseudoRET implicit $v8
858 name: vload_nxv2i32_align16
863 ; CHECK-LABEL: name: vload_nxv2i32_align16
864 ; CHECK: liveins: $x10
866 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
867 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
868 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
869 ; CHECK-NEXT: PseudoRET implicit $v8
871 %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
872 $v8 = COPY %1(<vscale x 2 x s32>)
873 PseudoRET implicit $v8
877 name: vload_nxv2i32_align256
882 ; CHECK-LABEL: name: vload_nxv2i32_align256
883 ; CHECK: liveins: $x10
885 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
886 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
887 ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
888 ; CHECK-NEXT: PseudoRET implicit $v8
890 %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
891 $v8 = COPY %1(<vscale x 2 x s32>)
892 PseudoRET implicit $v8
896 name: vload_nxv2i64_align4
901 ; CHECK-LABEL: name: vload_nxv2i64_align4
902 ; CHECK: liveins: $x10
904 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
905 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
906 ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
907 ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
908 ; CHECK-NEXT: PseudoRET implicit $v8m2
910 %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 4)
911 $v8m2 = COPY %1(<vscale x 2 x s64>)
912 PseudoRET implicit $v8m2
916 name: vload_nxv2i64_align8
921 ; CHECK-LABEL: name: vload_nxv2i64_align8
922 ; CHECK: liveins: $x10
924 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
925 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
926 ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
927 ; CHECK-NEXT: PseudoRET implicit $v8m2
929 %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
930 $v8m2 = COPY %1(<vscale x 2 x s64>)
931 PseudoRET implicit $v8m2
935 name: vload_nxv2i64_align16
940 ; CHECK-LABEL: name: vload_nxv2i64_align16
941 ; CHECK: liveins: $x10
943 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
944 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
945 ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
946 ; CHECK-NEXT: PseudoRET implicit $v8m2
948 %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
949 $v8m2 = COPY %1(<vscale x 2 x s64>)
950 PseudoRET implicit $v8m2
954 name: vload_nxv2i64_align32
959 ; CHECK-LABEL: name: vload_nxv2i64_align32
960 ; CHECK: liveins: $x10
962 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
963 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
964 ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
965 ; CHECK-NEXT: PseudoRET implicit $v8m2
967 %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
968 $v8m2 = COPY %1(<vscale x 2 x s64>)
969 PseudoRET implicit $v8m2