1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
3 # RUN: -simplify-mir -verify-machineinstrs %s \
4 # RUN: -o - | FileCheck -check-prefix=RV32I %s
5 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
6 # RUN: -simplify-mir -verify-machineinstrs %s \
7 # RUN: -o - | FileCheck -check-prefix=RV64I %s
10 define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) #0 {
11 %va = load <vscale x 1 x i8>, ptr %pa, align 1
12 ret <vscale x 1 x i8> %va
15 define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) #0 {
16 %va = load <vscale x 2 x i8>, ptr %pa, align 2
17 ret <vscale x 2 x i8> %va
20 define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) #0 {
21 %va = load <vscale x 4 x i8>, ptr %pa, align 4
22 ret <vscale x 4 x i8> %va
25 define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) #0 {
26 %va = load <vscale x 8 x i8>, ptr %pa, align 8
27 ret <vscale x 8 x i8> %va
30 define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) #0 {
31 %va = load <vscale x 16 x i8>, ptr %pa, align 16
32 ret <vscale x 16 x i8> %va
35 define <vscale x 32 x i8> @vload_nx32i8(ptr %pa) #0 {
36 %va = load <vscale x 32 x i8>, ptr %pa, align 32
37 ret <vscale x 32 x i8> %va
40 define <vscale x 64 x i8> @vload_nx64i8(ptr %pa) #0 {
41 %va = load <vscale x 64 x i8>, ptr %pa, align 64
42 ret <vscale x 64 x i8> %va
45 define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) #0 {
46 %va = load <vscale x 1 x i16>, ptr %pa, align 2
47 ret <vscale x 1 x i16> %va
50 define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) #0 {
51 %va = load <vscale x 2 x i16>, ptr %pa, align 4
52 ret <vscale x 2 x i16> %va
55 define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) #0 {
56 %va = load <vscale x 4 x i16>, ptr %pa, align 8
57 ret <vscale x 4 x i16> %va
60 define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) #0 {
61 %va = load <vscale x 8 x i16>, ptr %pa, align 16
62 ret <vscale x 8 x i16> %va
65 define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) #0 {
66 %va = load <vscale x 16 x i16>, ptr %pa, align 32
67 ret <vscale x 16 x i16> %va
70 define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) #0 {
71 %va = load <vscale x 32 x i16>, ptr %pa, align 64
72 ret <vscale x 32 x i16> %va
75 define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) #0 {
76 %va = load <vscale x 1 x i32>, ptr %pa, align 4
77 ret <vscale x 1 x i32> %va
80 define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) #0 {
81 %va = load <vscale x 2 x i32>, ptr %pa, align 8
82 ret <vscale x 2 x i32> %va
85 define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) #0 {
86 %va = load <vscale x 4 x i32>, ptr %pa, align 16
87 ret <vscale x 4 x i32> %va
90 define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) #0 {
91 %va = load <vscale x 8 x i32>, ptr %pa, align 32
92 ret <vscale x 8 x i32> %va
95 define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) #0 {
96 %va = load <vscale x 16 x i32>, ptr %pa, align 64
97 ret <vscale x 16 x i32> %va
100 define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) #0 {
101 %va = load <vscale x 1 x i64>, ptr %pa, align 8
102 ret <vscale x 1 x i64> %va
105 define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) #0 {
106 %va = load <vscale x 2 x i64>, ptr %pa, align 16
107 ret <vscale x 2 x i64> %va
110 define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) #0 {
111 %va = load <vscale x 4 x i64>, ptr %pa, align 32
112 ret <vscale x 4 x i64> %va
115 define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) #0 {
116 %va = load <vscale x 8 x i64>, ptr %pa, align 64
117 ret <vscale x 8 x i64> %va
120 define <vscale x 16 x i8> @vload_nx16i8_align1(ptr %pa) #0 {
121 %va = load <vscale x 16 x i8>, ptr %pa, align 1
122 ret <vscale x 16 x i8> %va
125 define <vscale x 16 x i8> @vload_nx16i8_align2(ptr %pa) #0 {
126 %va = load <vscale x 16 x i8>, ptr %pa, align 2
127 ret <vscale x 16 x i8> %va
130 define <vscale x 16 x i8> @vload_nx16i8_align16(ptr %pa) #0 {
131 %va = load <vscale x 16 x i8>, ptr %pa, align 16
132 ret <vscale x 16 x i8> %va
135 define <vscale x 16 x i8> @vload_nx16i8_align64(ptr %pa) #0 {
136 %va = load <vscale x 16 x i8>, ptr %pa, align 64
137 ret <vscale x 16 x i8> %va
140 define <vscale x 4 x i16> @vload_nx4i16_align1(ptr %pa) #0 {
141 %va = load <vscale x 4 x i16>, ptr %pa, align 1
142 ret <vscale x 4 x i16> %va
145 define <vscale x 4 x i16> @vload_nx4i16_align2(ptr %pa) #0 {
146 %va = load <vscale x 4 x i16>, ptr %pa, align 2
147 ret <vscale x 4 x i16> %va
150 define <vscale x 4 x i16> @vload_nx4i16_align4(ptr %pa) #0 {
151 %va = load <vscale x 4 x i16>, ptr %pa, align 4
152 ret <vscale x 4 x i16> %va
155 define <vscale x 4 x i16> @vload_nx4i16_align8(ptr %pa) #0 {
156 %va = load <vscale x 4 x i16>, ptr %pa, align 8
157 ret <vscale x 4 x i16> %va
160 define <vscale x 4 x i16> @vload_nx4i16_align16(ptr %pa) #0 {
161 %va = load <vscale x 4 x i16>, ptr %pa, align 16
162 ret <vscale x 4 x i16> %va
165 define <vscale x 2 x i32> @vload_nx2i32_align2(ptr %pa) #0 {
166 %va = load <vscale x 2 x i32>, ptr %pa, align 2
167 ret <vscale x 2 x i32> %va
170 define <vscale x 2 x i32> @vload_nx2i32_align4(ptr %pa) #0 {
171 %va = load <vscale x 2 x i32>, ptr %pa, align 4
172 ret <vscale x 2 x i32> %va
175 define <vscale x 2 x i32> @vload_nx2i32_align8(ptr %pa) #0 {
176 %va = load <vscale x 2 x i32>, ptr %pa, align 8
177 ret <vscale x 2 x i32> %va
180 define <vscale x 2 x i32> @vload_nx2i32_align16(ptr %pa) #0 {
181 %va = load <vscale x 2 x i32>, ptr %pa, align 16
182 ret <vscale x 2 x i32> %va
185 define <vscale x 2 x i32> @vload_nx2i32_align256(ptr %pa) #0 {
186 %va = load <vscale x 2 x i32>, ptr %pa, align 256
187 ret <vscale x 2 x i32> %va
190 define <vscale x 2 x i64> @vload_nx2i64_align4(ptr %pa) #0 {
191 %va = load <vscale x 2 x i64>, ptr %pa, align 4
192 ret <vscale x 2 x i64> %va
195 define <vscale x 2 x i64> @vload_nx2i64_align8(ptr %pa) #0 {
196 %va = load <vscale x 2 x i64>, ptr %pa, align 8
197 ret <vscale x 2 x i64> %va
200 define <vscale x 2 x i64> @vload_nx2i64_align16(ptr %pa) #0 {
201 %va = load <vscale x 2 x i64>, ptr %pa, align 16
202 ret <vscale x 2 x i64> %va
205 define <vscale x 2 x i64> @vload_nx2i64_align32(ptr %pa) #0 {
206 %va = load <vscale x 2 x i64>, ptr %pa, align 32
207 ret <vscale x 2 x i64> %va
210 define <vscale x 1 x ptr> @vload_nx1ptr(ptr %pa) #0 {
211 %va = load <vscale x 1 x ptr>, ptr %pa, align 4
212 ret <vscale x 1 x ptr> %va
215 define <vscale x 2 x ptr> @vload_nx2ptr(ptr %pa) #0 {
216 %va = load <vscale x 2 x ptr>, ptr %pa, align 8
217 ret <vscale x 2 x ptr> %va
220 define <vscale x 8 x ptr> @vload_nx8ptr(ptr %pa) #0 {
221 %va = load <vscale x 8 x ptr>, ptr %pa, align 32
222 ret <vscale x 8 x ptr> %va
229 tracksRegLiveness: true
234 ; RV32I-LABEL: name: vload_nx1i8
235 ; RV32I: liveins: $x10
237 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
238 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
239 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
240 ; RV32I-NEXT: PseudoRET implicit $v8
242 ; RV64I-LABEL: name: vload_nx1i8
243 ; RV64I: liveins: $x10
245 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
246 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
247 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
248 ; RV64I-NEXT: PseudoRET implicit $v8
250 %1:_(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
251 $v8 = COPY %1(<vscale x 1 x s8>)
252 PseudoRET implicit $v8
258 tracksRegLiveness: true
263 ; RV32I-LABEL: name: vload_nx2i8
264 ; RV32I: liveins: $x10
266 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
267 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
268 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
269 ; RV32I-NEXT: PseudoRET implicit $v8
271 ; RV64I-LABEL: name: vload_nx2i8
272 ; RV64I: liveins: $x10
274 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
275 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
276 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
277 ; RV64I-NEXT: PseudoRET implicit $v8
279 %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
280 $v8 = COPY %1(<vscale x 2 x s8>)
281 PseudoRET implicit $v8
287 tracksRegLiveness: true
292 ; RV32I-LABEL: name: vload_nx4i8
293 ; RV32I: liveins: $x10
295 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
296 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
297 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
298 ; RV32I-NEXT: PseudoRET implicit $v8
300 ; RV64I-LABEL: name: vload_nx4i8
301 ; RV64I: liveins: $x10
303 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
304 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
305 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
306 ; RV64I-NEXT: PseudoRET implicit $v8
308 %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
309 $v8 = COPY %1(<vscale x 4 x s8>)
310 PseudoRET implicit $v8
316 tracksRegLiveness: true
321 ; RV32I-LABEL: name: vload_nx8i8
322 ; RV32I: liveins: $x10
324 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
325 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
326 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
327 ; RV32I-NEXT: PseudoRET implicit $v8
329 ; RV64I-LABEL: name: vload_nx8i8
330 ; RV64I: liveins: $x10
332 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
333 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
334 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
335 ; RV64I-NEXT: PseudoRET implicit $v8
337 %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
338 $v8 = COPY %1(<vscale x 8 x s8>)
339 PseudoRET implicit $v8
345 tracksRegLiveness: true
350 ; RV32I-LABEL: name: vload_nx16i8
351 ; RV32I: liveins: $x10
353 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
354 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
355 ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
356 ; RV32I-NEXT: PseudoRET implicit $v8m2
358 ; RV64I-LABEL: name: vload_nx16i8
359 ; RV64I: liveins: $x10
361 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
362 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
363 ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
364 ; RV64I-NEXT: PseudoRET implicit $v8m2
366 %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
367 $v8m2 = COPY %1(<vscale x 16 x s8>)
368 PseudoRET implicit $v8m2
374 tracksRegLiveness: true
379 ; RV32I-LABEL: name: vload_nx32i8
380 ; RV32I: liveins: $x10
382 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
383 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
384 ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
385 ; RV32I-NEXT: PseudoRET implicit $v8m4
387 ; RV64I-LABEL: name: vload_nx32i8
388 ; RV64I: liveins: $x10
390 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
391 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
392 ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
393 ; RV64I-NEXT: PseudoRET implicit $v8m4
395 %1:_(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
396 $v8m4 = COPY %1(<vscale x 32 x s8>)
397 PseudoRET implicit $v8m4
403 tracksRegLiveness: true
408 ; RV32I-LABEL: name: vload_nx64i8
409 ; RV32I: liveins: $x10
411 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
412 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
413 ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
414 ; RV32I-NEXT: PseudoRET implicit $v8m8
416 ; RV64I-LABEL: name: vload_nx64i8
417 ; RV64I: liveins: $x10
419 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
420 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
421 ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
422 ; RV64I-NEXT: PseudoRET implicit $v8m8
424 %1:_(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
425 $v8m8 = COPY %1(<vscale x 64 x s8>)
426 PseudoRET implicit $v8m8
432 tracksRegLiveness: true
437 ; RV32I-LABEL: name: vload_nx1i16
438 ; RV32I: liveins: $x10
440 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
441 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
442 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
443 ; RV32I-NEXT: PseudoRET implicit $v8
445 ; RV64I-LABEL: name: vload_nx1i16
446 ; RV64I: liveins: $x10
448 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
449 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
450 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
451 ; RV64I-NEXT: PseudoRET implicit $v8
453 %1:_(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
454 $v8 = COPY %1(<vscale x 1 x s16>)
455 PseudoRET implicit $v8
461 tracksRegLiveness: true
466 ; RV32I-LABEL: name: vload_nx2i16
467 ; RV32I: liveins: $x10
469 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
470 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
471 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
472 ; RV32I-NEXT: PseudoRET implicit $v8
474 ; RV64I-LABEL: name: vload_nx2i16
475 ; RV64I: liveins: $x10
477 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
478 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
479 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
480 ; RV64I-NEXT: PseudoRET implicit $v8
482 %1:_(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
483 $v8 = COPY %1(<vscale x 2 x s16>)
484 PseudoRET implicit $v8
490 tracksRegLiveness: true
495 ; RV32I-LABEL: name: vload_nx4i16
496 ; RV32I: liveins: $x10
498 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
499 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
500 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
501 ; RV32I-NEXT: PseudoRET implicit $v8
503 ; RV64I-LABEL: name: vload_nx4i16
504 ; RV64I: liveins: $x10
506 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
507 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
508 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
509 ; RV64I-NEXT: PseudoRET implicit $v8
511 %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
512 $v8 = COPY %1(<vscale x 4 x s16>)
513 PseudoRET implicit $v8
519 tracksRegLiveness: true
524 ; RV32I-LABEL: name: vload_nx8i16
525 ; RV32I: liveins: $x10
527 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
528 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
529 ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
530 ; RV32I-NEXT: PseudoRET implicit $v8m2
532 ; RV64I-LABEL: name: vload_nx8i16
533 ; RV64I: liveins: $x10
535 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
536 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
537 ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
538 ; RV64I-NEXT: PseudoRET implicit $v8m2
540 %1:_(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
541 $v8m2 = COPY %1(<vscale x 8 x s16>)
542 PseudoRET implicit $v8m2
548 tracksRegLiveness: true
553 ; RV32I-LABEL: name: vload_nx16i16
554 ; RV32I: liveins: $x10
556 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
557 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
558 ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
559 ; RV32I-NEXT: PseudoRET implicit $v8m4
561 ; RV64I-LABEL: name: vload_nx16i16
562 ; RV64I: liveins: $x10
564 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
565 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
566 ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
567 ; RV64I-NEXT: PseudoRET implicit $v8m4
569 %1:_(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
570 $v8m4 = COPY %1(<vscale x 16 x s16>)
571 PseudoRET implicit $v8m4
577 tracksRegLiveness: true
582 ; RV32I-LABEL: name: vload_nx32i16
583 ; RV32I: liveins: $x10
585 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
586 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
587 ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
588 ; RV32I-NEXT: PseudoRET implicit $v8m8
590 ; RV64I-LABEL: name: vload_nx32i16
591 ; RV64I: liveins: $x10
593 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
594 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
595 ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
596 ; RV64I-NEXT: PseudoRET implicit $v8m8
598 %1:_(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
599 $v8m8 = COPY %1(<vscale x 32 x s16>)
600 PseudoRET implicit $v8m8
606 tracksRegLiveness: true
611 ; RV32I-LABEL: name: vload_nx1i32
612 ; RV32I: liveins: $x10
614 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
615 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
616 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
617 ; RV32I-NEXT: PseudoRET implicit $v8
619 ; RV64I-LABEL: name: vload_nx1i32
620 ; RV64I: liveins: $x10
622 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
623 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
624 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
625 ; RV64I-NEXT: PseudoRET implicit $v8
627 %1:_(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
628 $v8 = COPY %1(<vscale x 1 x s32>)
629 PseudoRET implicit $v8
635 tracksRegLiveness: true
640 ; RV32I-LABEL: name: vload_nx2i32
641 ; RV32I: liveins: $x10
643 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
644 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
645 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
646 ; RV32I-NEXT: PseudoRET implicit $v8
648 ; RV64I-LABEL: name: vload_nx2i32
649 ; RV64I: liveins: $x10
651 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
652 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
653 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
654 ; RV64I-NEXT: PseudoRET implicit $v8
656 %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
657 $v8 = COPY %1(<vscale x 2 x s32>)
658 PseudoRET implicit $v8
664 tracksRegLiveness: true
669 ; RV32I-LABEL: name: vload_nx4i32
670 ; RV32I: liveins: $x10
672 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
673 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
674 ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
675 ; RV32I-NEXT: PseudoRET implicit $v8m2
677 ; RV64I-LABEL: name: vload_nx4i32
678 ; RV64I: liveins: $x10
680 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
681 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
682 ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
683 ; RV64I-NEXT: PseudoRET implicit $v8m2
685 %1:_(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
686 $v8m2 = COPY %1(<vscale x 4 x s32>)
687 PseudoRET implicit $v8m2
693 tracksRegLiveness: true
698 ; RV32I-LABEL: name: vload_nx8i32
699 ; RV32I: liveins: $x10
701 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
702 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
703 ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
704 ; RV32I-NEXT: PseudoRET implicit $v8m4
706 ; RV64I-LABEL: name: vload_nx8i32
707 ; RV64I: liveins: $x10
709 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
710 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
711 ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
712 ; RV64I-NEXT: PseudoRET implicit $v8m4
714 %1:_(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
715 $v8m4 = COPY %1(<vscale x 8 x s32>)
716 PseudoRET implicit $v8m4
722 tracksRegLiveness: true
727 ; RV32I-LABEL: name: vload_nx16i32
728 ; RV32I: liveins: $x10
730 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
731 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
732 ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
733 ; RV32I-NEXT: PseudoRET implicit $v8m8
735 ; RV64I-LABEL: name: vload_nx16i32
736 ; RV64I: liveins: $x10
738 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
739 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
740 ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
741 ; RV64I-NEXT: PseudoRET implicit $v8m8
743 %1:_(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
744 $v8m8 = COPY %1(<vscale x 16 x s32>)
745 PseudoRET implicit $v8m8
751 tracksRegLiveness: true
756 ; RV32I-LABEL: name: vload_nx1i64
757 ; RV32I: liveins: $x10
759 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
760 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
761 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
762 ; RV32I-NEXT: PseudoRET implicit $v8
764 ; RV64I-LABEL: name: vload_nx1i64
765 ; RV64I: liveins: $x10
767 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
768 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
769 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
770 ; RV64I-NEXT: PseudoRET implicit $v8
772 %1:_(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
773 $v8 = COPY %1(<vscale x 1 x s64>)
774 PseudoRET implicit $v8
780 tracksRegLiveness: true
785 ; RV32I-LABEL: name: vload_nx2i64
786 ; RV32I: liveins: $x10
788 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
789 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
790 ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
791 ; RV32I-NEXT: PseudoRET implicit $v8m2
793 ; RV64I-LABEL: name: vload_nx2i64
794 ; RV64I: liveins: $x10
796 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
797 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
798 ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
799 ; RV64I-NEXT: PseudoRET implicit $v8m2
801 %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
802 $v8m2 = COPY %1(<vscale x 2 x s64>)
803 PseudoRET implicit $v8m2
809 tracksRegLiveness: true
814 ; RV32I-LABEL: name: vload_nx4i64
815 ; RV32I: liveins: $x10
817 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
818 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
819 ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
820 ; RV32I-NEXT: PseudoRET implicit $v8m4
822 ; RV64I-LABEL: name: vload_nx4i64
823 ; RV64I: liveins: $x10
825 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
826 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
827 ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
828 ; RV64I-NEXT: PseudoRET implicit $v8m4
830 %1:_(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
831 $v8m4 = COPY %1(<vscale x 4 x s64>)
832 PseudoRET implicit $v8m4
838 tracksRegLiveness: true
843 ; RV32I-LABEL: name: vload_nx8i64
844 ; RV32I: liveins: $x10
846 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
847 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
848 ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
849 ; RV32I-NEXT: PseudoRET implicit $v8m8
851 ; RV64I-LABEL: name: vload_nx8i64
852 ; RV64I: liveins: $x10
854 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
855 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
856 ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
857 ; RV64I-NEXT: PseudoRET implicit $v8m8
859 %1:_(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
860 $v8m8 = COPY %1(<vscale x 8 x s64>)
861 PseudoRET implicit $v8m8
865 name: vload_nx16i8_align1
867 tracksRegLiveness: true
872 ; RV32I-LABEL: name: vload_nx16i8_align1
873 ; RV32I: liveins: $x10
875 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
876 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
877 ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
878 ; RV32I-NEXT: PseudoRET implicit $v8m2
880 ; RV64I-LABEL: name: vload_nx16i8_align1
881 ; RV64I: liveins: $x10
883 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
884 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
885 ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
886 ; RV64I-NEXT: PseudoRET implicit $v8m2
888 %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
889 $v8m2 = COPY %1(<vscale x 16 x s8>)
890 PseudoRET implicit $v8m2
894 name: vload_nx16i8_align2
896 tracksRegLiveness: true
901 ; RV32I-LABEL: name: vload_nx16i8_align2
902 ; RV32I: liveins: $x10
904 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
905 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
906 ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
907 ; RV32I-NEXT: PseudoRET implicit $v8m2
909 ; RV64I-LABEL: name: vload_nx16i8_align2
910 ; RV64I: liveins: $x10
912 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
913 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
914 ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
915 ; RV64I-NEXT: PseudoRET implicit $v8m2
917 %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
918 $v8m2 = COPY %1(<vscale x 16 x s8>)
919 PseudoRET implicit $v8m2
923 name: vload_nx16i8_align16
925 tracksRegLiveness: true
930 ; RV32I-LABEL: name: vload_nx16i8_align16
931 ; RV32I: liveins: $x10
933 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
934 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
935 ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
936 ; RV32I-NEXT: PseudoRET implicit $v8m2
938 ; RV64I-LABEL: name: vload_nx16i8_align16
939 ; RV64I: liveins: $x10
941 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
942 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
943 ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
944 ; RV64I-NEXT: PseudoRET implicit $v8m2
946 %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
947 $v8m2 = COPY %1(<vscale x 16 x s8>)
948 PseudoRET implicit $v8m2
952 name: vload_nx16i8_align64
954 tracksRegLiveness: true
959 ; RV32I-LABEL: name: vload_nx16i8_align64
960 ; RV32I: liveins: $x10
962 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
963 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
964 ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
965 ; RV32I-NEXT: PseudoRET implicit $v8m2
967 ; RV64I-LABEL: name: vload_nx16i8_align64
968 ; RV64I: liveins: $x10
970 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
971 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
972 ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
973 ; RV64I-NEXT: PseudoRET implicit $v8m2
975 %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
976 $v8m2 = COPY %1(<vscale x 16 x s8>)
977 PseudoRET implicit $v8m2
981 name: vload_nx4i16_align1
983 tracksRegLiveness: true
988 ; RV32I-LABEL: name: vload_nx4i16_align1
989 ; RV32I: liveins: $x10
991 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
992 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
993 ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
994 ; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
995 ; RV32I-NEXT: PseudoRET implicit $v8
997 ; RV64I-LABEL: name: vload_nx4i16_align1
998 ; RV64I: liveins: $x10
1000 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1001 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
1002 ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
1003 ; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
1004 ; RV64I-NEXT: PseudoRET implicit $v8
1005 %0:_(p0) = COPY $x10
1006 %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
1007 %1:_(<vscale x 4 x s16>) = G_BITCAST %2(<vscale x 8 x s8>)
1008 $v8 = COPY %1(<vscale x 4 x s16>)
1009 PseudoRET implicit $v8
1013 name: vload_nx4i16_align2
1015 tracksRegLiveness: true
1020 ; RV32I-LABEL: name: vload_nx4i16_align2
1021 ; RV32I: liveins: $x10
1022 ; RV32I-NEXT: {{ $}}
1023 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1024 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
1025 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
1026 ; RV32I-NEXT: PseudoRET implicit $v8
1028 ; RV64I-LABEL: name: vload_nx4i16_align2
1029 ; RV64I: liveins: $x10
1030 ; RV64I-NEXT: {{ $}}
1031 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1032 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
1033 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
1034 ; RV64I-NEXT: PseudoRET implicit $v8
1035 %0:_(p0) = COPY $x10
1036 %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
1037 $v8 = COPY %1(<vscale x 4 x s16>)
1038 PseudoRET implicit $v8
1042 name: vload_nx4i16_align4
1044 tracksRegLiveness: true
1049 ; RV32I-LABEL: name: vload_nx4i16_align4
1050 ; RV32I: liveins: $x10
1051 ; RV32I-NEXT: {{ $}}
1052 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1053 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
1054 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
1055 ; RV32I-NEXT: PseudoRET implicit $v8
1057 ; RV64I-LABEL: name: vload_nx4i16_align4
1058 ; RV64I: liveins: $x10
1059 ; RV64I-NEXT: {{ $}}
1060 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1061 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
1062 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
1063 ; RV64I-NEXT: PseudoRET implicit $v8
1064 %0:_(p0) = COPY $x10
1065 %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
1066 $v8 = COPY %1(<vscale x 4 x s16>)
1067 PseudoRET implicit $v8
1071 name: vload_nx4i16_align8
1073 tracksRegLiveness: true
1078 ; RV32I-LABEL: name: vload_nx4i16_align8
1079 ; RV32I: liveins: $x10
1080 ; RV32I-NEXT: {{ $}}
1081 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1082 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
1083 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
1084 ; RV32I-NEXT: PseudoRET implicit $v8
1086 ; RV64I-LABEL: name: vload_nx4i16_align8
1087 ; RV64I: liveins: $x10
1088 ; RV64I-NEXT: {{ $}}
1089 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1090 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
1091 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
1092 ; RV64I-NEXT: PseudoRET implicit $v8
1093 %0:_(p0) = COPY $x10
1094 %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
1095 $v8 = COPY %1(<vscale x 4 x s16>)
1096 PseudoRET implicit $v8
1100 name: vload_nx4i16_align16
1102 tracksRegLiveness: true
1107 ; RV32I-LABEL: name: vload_nx4i16_align16
1108 ; RV32I: liveins: $x10
1109 ; RV32I-NEXT: {{ $}}
1110 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1111 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
1112 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
1113 ; RV32I-NEXT: PseudoRET implicit $v8
1115 ; RV64I-LABEL: name: vload_nx4i16_align16
1116 ; RV64I: liveins: $x10
1117 ; RV64I-NEXT: {{ $}}
1118 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1119 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
1120 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
1121 ; RV64I-NEXT: PseudoRET implicit $v8
1122 %0:_(p0) = COPY $x10
1123 %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
1124 $v8 = COPY %1(<vscale x 4 x s16>)
1125 PseudoRET implicit $v8
1129 name: vload_nx2i32_align2
1131 tracksRegLiveness: true
1136 ; RV32I-LABEL: name: vload_nx2i32_align2
1137 ; RV32I: liveins: $x10
1138 ; RV32I-NEXT: {{ $}}
1139 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1140 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
1141 ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
1142 ; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
1143 ; RV32I-NEXT: PseudoRET implicit $v8
1145 ; RV64I-LABEL: name: vload_nx2i32_align2
1146 ; RV64I: liveins: $x10
1147 ; RV64I-NEXT: {{ $}}
1148 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1149 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
1150 ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
1151 ; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
1152 ; RV64I-NEXT: PseudoRET implicit $v8
1153 %0:_(p0) = COPY $x10
1154 %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
1155 %1:_(<vscale x 2 x s32>) = G_BITCAST %2(<vscale x 8 x s8>)
1156 $v8 = COPY %1(<vscale x 2 x s32>)
1157 PseudoRET implicit $v8
1161 name: vload_nx2i32_align4
1163 tracksRegLiveness: true
1168 ; RV32I-LABEL: name: vload_nx2i32_align4
1169 ; RV32I: liveins: $x10
1170 ; RV32I-NEXT: {{ $}}
1171 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1172 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
1173 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
1174 ; RV32I-NEXT: PseudoRET implicit $v8
1176 ; RV64I-LABEL: name: vload_nx2i32_align4
1177 ; RV64I: liveins: $x10
1178 ; RV64I-NEXT: {{ $}}
1179 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1180 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
1181 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
1182 ; RV64I-NEXT: PseudoRET implicit $v8
1183 %0:_(p0) = COPY $x10
1184 %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
1185 $v8 = COPY %1(<vscale x 2 x s32>)
1186 PseudoRET implicit $v8
1190 name: vload_nx2i32_align8
1192 tracksRegLiveness: true
1197 ; RV32I-LABEL: name: vload_nx2i32_align8
1198 ; RV32I: liveins: $x10
1199 ; RV32I-NEXT: {{ $}}
1200 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1201 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
1202 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
1203 ; RV32I-NEXT: PseudoRET implicit $v8
1205 ; RV64I-LABEL: name: vload_nx2i32_align8
1206 ; RV64I: liveins: $x10
1207 ; RV64I-NEXT: {{ $}}
1208 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1209 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
1210 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
1211 ; RV64I-NEXT: PseudoRET implicit $v8
1212 %0:_(p0) = COPY $x10
1213 %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
1214 $v8 = COPY %1(<vscale x 2 x s32>)
1215 PseudoRET implicit $v8
1219 name: vload_nx2i32_align16
1221 tracksRegLiveness: true
1226 ; RV32I-LABEL: name: vload_nx2i32_align16
1227 ; RV32I: liveins: $x10
1228 ; RV32I-NEXT: {{ $}}
1229 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1230 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
1231 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
1232 ; RV32I-NEXT: PseudoRET implicit $v8
1234 ; RV64I-LABEL: name: vload_nx2i32_align16
1235 ; RV64I: liveins: $x10
1236 ; RV64I-NEXT: {{ $}}
1237 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1238 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
1239 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
1240 ; RV64I-NEXT: PseudoRET implicit $v8
1241 %0:_(p0) = COPY $x10
1242 %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
1243 $v8 = COPY %1(<vscale x 2 x s32>)
1244 PseudoRET implicit $v8
1248 name: vload_nx2i32_align256
1250 tracksRegLiveness: true
1255 ; RV32I-LABEL: name: vload_nx2i32_align256
1256 ; RV32I: liveins: $x10
1257 ; RV32I-NEXT: {{ $}}
1258 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1259 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
1260 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
1261 ; RV32I-NEXT: PseudoRET implicit $v8
1263 ; RV64I-LABEL: name: vload_nx2i32_align256
1264 ; RV64I: liveins: $x10
1265 ; RV64I-NEXT: {{ $}}
1266 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1267 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
1268 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
1269 ; RV64I-NEXT: PseudoRET implicit $v8
1270 %0:_(p0) = COPY $x10
1271 %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
1272 $v8 = COPY %1(<vscale x 2 x s32>)
1273 PseudoRET implicit $v8
1277 name: vload_nx2i64_align4
1279 tracksRegLiveness: true
1284 ; RV32I-LABEL: name: vload_nx2i64_align4
1285 ; RV32I: liveins: $x10
1286 ; RV32I-NEXT: {{ $}}
1287 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1288 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
1289 ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
1290 ; RV32I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
1291 ; RV32I-NEXT: PseudoRET implicit $v8m2
1293 ; RV64I-LABEL: name: vload_nx2i64_align4
1294 ; RV64I: liveins: $x10
1295 ; RV64I-NEXT: {{ $}}
1296 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1297 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
1298 ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
1299 ; RV64I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
1300 ; RV64I-NEXT: PseudoRET implicit $v8m2
1301 %0:_(p0) = COPY $x10
1302 %2:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
1303 %1:_(<vscale x 2 x s64>) = G_BITCAST %2(<vscale x 16 x s8>)
1304 $v8m2 = COPY %1(<vscale x 2 x s64>)
1305 PseudoRET implicit $v8m2
1309 name: vload_nx2i64_align8
1311 tracksRegLiveness: true
1316 ; RV32I-LABEL: name: vload_nx2i64_align8
1317 ; RV32I: liveins: $x10
1318 ; RV32I-NEXT: {{ $}}
1319 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1320 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
1321 ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
1322 ; RV32I-NEXT: PseudoRET implicit $v8m2
1324 ; RV64I-LABEL: name: vload_nx2i64_align8
1325 ; RV64I: liveins: $x10
1326 ; RV64I-NEXT: {{ $}}
1327 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1328 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
1329 ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
1330 ; RV64I-NEXT: PseudoRET implicit $v8m2
1331 %0:_(p0) = COPY $x10
1332 %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
1333 $v8m2 = COPY %1(<vscale x 2 x s64>)
1334 PseudoRET implicit $v8m2
1338 name: vload_nx2i64_align16
1340 tracksRegLiveness: true
1345 ; RV32I-LABEL: name: vload_nx2i64_align16
1346 ; RV32I: liveins: $x10
1347 ; RV32I-NEXT: {{ $}}
1348 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1349 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
1350 ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
1351 ; RV32I-NEXT: PseudoRET implicit $v8m2
1353 ; RV64I-LABEL: name: vload_nx2i64_align16
1354 ; RV64I: liveins: $x10
1355 ; RV64I-NEXT: {{ $}}
1356 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1357 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
1358 ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
1359 ; RV64I-NEXT: PseudoRET implicit $v8m2
1360 %0:_(p0) = COPY $x10
1361 %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
1362 $v8m2 = COPY %1(<vscale x 2 x s64>)
1363 PseudoRET implicit $v8m2
1367 name: vload_nx2i64_align32
1369 tracksRegLiveness: true
1374 ; RV32I-LABEL: name: vload_nx2i64_align32
1375 ; RV32I: liveins: $x10
1376 ; RV32I-NEXT: {{ $}}
1377 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1378 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
1379 ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
1380 ; RV32I-NEXT: PseudoRET implicit $v8m2
1382 ; RV64I-LABEL: name: vload_nx2i64_align32
1383 ; RV64I: liveins: $x10
1384 ; RV64I-NEXT: {{ $}}
1385 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1386 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
1387 ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
1388 ; RV64I-NEXT: PseudoRET implicit $v8m2
1389 %0:_(p0) = COPY $x10
1390 %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
1391 $v8m2 = COPY %1(<vscale x 2 x s64>)
1392 PseudoRET implicit $v8m2
1398 tracksRegLiveness: true
1403 ; RV32I-LABEL: name: vload_nx1ptr
1404 ; RV32I: liveins: $x10
1405 ; RV32I-NEXT: {{ $}}
1406 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1407 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
1408 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
1409 ; RV32I-NEXT: PseudoRET implicit $v8
1411 ; RV64I-LABEL: name: vload_nx1ptr
1412 ; RV64I: liveins: $x10
1413 ; RV64I-NEXT: {{ $}}
1414 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1415 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
1416 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
1417 ; RV64I-NEXT: PseudoRET implicit $v8
1418 %0:_(p0) = COPY $x10
1419 %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
1420 $v8 = COPY %1(<vscale x 1 x p0>)
1421 PseudoRET implicit $v8
1427 tracksRegLiveness: true
1432 ; RV32I-LABEL: name: vload_nx2ptr
1433 ; RV32I: liveins: $x10
1434 ; RV32I-NEXT: {{ $}}
1435 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1436 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
1437 ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
1438 ; RV32I-NEXT: PseudoRET implicit $v8
1440 ; RV64I-LABEL: name: vload_nx2ptr
1441 ; RV64I: liveins: $x10
1442 ; RV64I-NEXT: {{ $}}
1443 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1444 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
1445 ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
1446 ; RV64I-NEXT: PseudoRET implicit $v8
1447 %0:_(p0) = COPY $x10
1448 %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
1449 $v8 = COPY %1(<vscale x 2 x p0>)
1450 PseudoRET implicit $v8
1456 tracksRegLiveness: true
1461 ; RV32I-LABEL: name: vload_nx8ptr
1462 ; RV32I: liveins: $x10
1463 ; RV32I-NEXT: {{ $}}
1464 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1465 ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
1466 ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
1467 ; RV32I-NEXT: PseudoRET implicit $v8m4
1469 ; RV64I-LABEL: name: vload_nx8ptr
1470 ; RV64I: liveins: $x10
1471 ; RV64I-NEXT: {{ $}}
1472 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1473 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
1474 ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
1475 ; RV64I-NEXT: PseudoRET implicit $v8m4
1476 %0:_(p0) = COPY $x10
1477 %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
1478 $v8m4 = COPY %1(<vscale x 8 x p0>)
1479 PseudoRET implicit $v8m4