1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
3 # RUN: -simplify-mir -verify-machineinstrs %s \
4 # RUN: -o - | FileCheck -check-prefix=RV32I %s
5 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
6 # RUN: -simplify-mir -verify-machineinstrs %s \
7 # RUN: -o - | FileCheck -check-prefix=RV64I %s
10 define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) #0 {
11 store <vscale x 1 x i8> %b, ptr %pa, align 1
15 define void @vstore_nx2i8(ptr %pa, <vscale x 2 x i8> %b) #0 {
16 store <vscale x 2 x i8> %b, ptr %pa, align 2
20 define void @vstore_nx4i8(ptr %pa, <vscale x 4 x i8> %b) #0 {
21 store <vscale x 4 x i8> %b, ptr %pa, align 4
25 define void @vstore_nx8i8(ptr %pa, <vscale x 8 x i8> %b) #0 {
26 store <vscale x 8 x i8> %b, ptr %pa, align 8
30 define void @vstore_nx16i8(ptr %pa, <vscale x 16 x i8> %b) #0 {
31 store <vscale x 16 x i8> %b, ptr %pa, align 16
35 define void @vstore_nx32i8(ptr %pa, <vscale x 32 x i8> %b) #0 {
36 store <vscale x 32 x i8> %b, ptr %pa, align 32
40 define void @vstore_nx64i8(ptr %pa, <vscale x 64 x i8> %b) #0 {
41 store <vscale x 64 x i8> %b, ptr %pa, align 64
45 define void @vstore_nx1i16(ptr %pa, <vscale x 1 x i16> %b) #0 {
46 store <vscale x 1 x i16> %b, ptr %pa, align 2
50 define void @vstore_nx2i16(ptr %pa, <vscale x 2 x i16> %b) #0 {
51 store <vscale x 2 x i16> %b, ptr %pa, align 4
55 define void @vstore_nx4i16(ptr %pa, <vscale x 4 x i16> %b) #0 {
56 store <vscale x 4 x i16> %b, ptr %pa, align 8
60 define void @vstore_nx8i16(ptr %pa, <vscale x 8 x i16> %b) #0 {
61 store <vscale x 8 x i16> %b, ptr %pa, align 16
65 define void @vstore_nx16i16(ptr %pa, <vscale x 16 x i16> %b) #0 {
66 store <vscale x 16 x i16> %b, ptr %pa, align 32
70 define void @vstore_nx32i16(ptr %pa, <vscale x 32 x i16> %b) #0 {
71 store <vscale x 32 x i16> %b, ptr %pa, align 64
75 define void @vstore_nx1i32(ptr %pa, <vscale x 1 x i32> %b) #0 {
76 store <vscale x 1 x i32> %b, ptr %pa, align 4
80 define void @vstore_nx2i32(ptr %pa, <vscale x 2 x i32> %b) #0 {
81 store <vscale x 2 x i32> %b, ptr %pa, align 8
85 define void @vstore_nx4i32(ptr %pa, <vscale x 4 x i32> %b) #0 {
86 store <vscale x 4 x i32> %b, ptr %pa, align 16
90 define void @vstore_nx8i32(ptr %pa, <vscale x 8 x i32> %b) #0 {
91 store <vscale x 8 x i32> %b, ptr %pa, align 32
95 define void @vstore_nx16i32(ptr %pa, <vscale x 16 x i32> %b) #0 {
96 store <vscale x 16 x i32> %b, ptr %pa, align 64
100 define void @vstore_nx1i64(ptr %pa, <vscale x 1 x i64> %b) #0 {
101 store <vscale x 1 x i64> %b, ptr %pa, align 8
105 define void @vstore_nx2i64(ptr %pa, <vscale x 2 x i64> %b) #0 {
106 store <vscale x 2 x i64> %b, ptr %pa, align 16
110 define void @vstore_nx4i64(ptr %pa, <vscale x 4 x i64> %b) #0 {
111 store <vscale x 4 x i64> %b, ptr %pa, align 32
115 define void @vstore_nx8i64(ptr %pa, <vscale x 8 x i64> %b) #0 {
116 store <vscale x 8 x i64> %b, ptr %pa, align 64
120 define void @vstore_nx16i8_align1(ptr %pa, <vscale x 16 x i8> %b) #0 {
121 store <vscale x 16 x i8> %b, ptr %pa, align 1
125 define void @vstore_nx16i8_align2(ptr %pa, <vscale x 16 x i8> %b) #0 {
126 store <vscale x 16 x i8> %b, ptr %pa, align 2
130 define void @vstore_nx16i8_align16(ptr %pa, <vscale x 16 x i8> %b) #0 {
131 store <vscale x 16 x i8> %b, ptr %pa, align 16
135 define void @vstore_nx16i8_align64(ptr %pa, <vscale x 16 x i8> %b) #0 {
136 store <vscale x 16 x i8> %b, ptr %pa, align 64
140 define void @vstore_nx4i16_align1(ptr %pa, <vscale x 4 x i16> %b) #0 {
141 store <vscale x 4 x i16> %b, ptr %pa, align 1
145 define void @vstore_nx4i16_align2(ptr %pa, <vscale x 4 x i16> %b) #0 {
146 store <vscale x 4 x i16> %b, ptr %pa, align 2
150 define void @vstore_nx4i16_align4(ptr %pa, <vscale x 4 x i16> %b) #0 {
151 store <vscale x 4 x i16> %b, ptr %pa, align 4
155 define void @vstore_nx4i16_align8(ptr %pa, <vscale x 4 x i16> %b) #0 {
156 store <vscale x 4 x i16> %b, ptr %pa, align 8
160 define void @vstore_nx4i16_align16(ptr %pa, <vscale x 4 x i16> %b) #0 {
161 store <vscale x 4 x i16> %b, ptr %pa, align 16
165 define void @vstore_nx2i32_align2(ptr %pa, <vscale x 2 x i32> %b) #0 {
166 store <vscale x 2 x i32> %b, ptr %pa, align 2
170 define void @vstore_nx2i32_align4(ptr %pa, <vscale x 2 x i32> %b) #0 {
171 store <vscale x 2 x i32> %b, ptr %pa, align 4
175 define void @vstore_nx2i32_align8(ptr %pa, <vscale x 2 x i32> %b) #0 {
176 store <vscale x 2 x i32> %b, ptr %pa, align 8
180 define void @vstore_nx2i32_align16(ptr %pa, <vscale x 2 x i32> %b) #0 {
181 store <vscale x 2 x i32> %b, ptr %pa, align 16
185 define void @vstore_nx2i32_align256(ptr %pa, <vscale x 2 x i32> %b) #0 {
186 store <vscale x 2 x i32> %b, ptr %pa, align 256
190 define void @vstore_nx2i64_align4(ptr %pa, <vscale x 2 x i64> %b) #0 {
191 store <vscale x 2 x i64> %b, ptr %pa, align 4
195 define void @vstore_nx2i64_align8(ptr %pa, <vscale x 2 x i64> %b) #0 {
196 store <vscale x 2 x i64> %b, ptr %pa, align 8
200 define void @vstore_nx2i64_align16(ptr %pa, <vscale x 2 x i64> %b) #0 {
201 store <vscale x 2 x i64> %b, ptr %pa, align 16
205 define void @vstore_nx2i64_align32(ptr %pa, <vscale x 2 x i64> %b) #0 {
206 store <vscale x 2 x i64> %b, ptr %pa, align 32
210 define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) #0 {
211 store <vscale x 1 x ptr> %b, ptr %pa, align 4
215 define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) #0 {
216 store <vscale x 2 x ptr> %b, ptr %pa, align 8
220 define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) #0 {
221 store <vscale x 8 x ptr> %b, ptr %pa, align 32
229 tracksRegLiveness: true
234 ; RV32I-LABEL: name: vstore_nx1i8
235 ; RV32I: liveins: $v8, $x10
237 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
238 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
239 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
240 ; RV32I-NEXT: PseudoRET
242 ; RV64I-LABEL: name: vstore_nx1i8
243 ; RV64I: liveins: $v8, $x10
245 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
246 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
247 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
248 ; RV64I-NEXT: PseudoRET
250 %1:_(<vscale x 1 x s8>) = COPY $v8
251 G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
258 tracksRegLiveness: true
263 ; RV32I-LABEL: name: vstore_nx2i8
264 ; RV32I: liveins: $v8, $x10
266 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
267 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
268 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
269 ; RV32I-NEXT: PseudoRET
271 ; RV64I-LABEL: name: vstore_nx2i8
272 ; RV64I: liveins: $v8, $x10
274 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
275 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
276 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
277 ; RV64I-NEXT: PseudoRET
279 %1:_(<vscale x 2 x s8>) = COPY $v8
280 G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
287 tracksRegLiveness: true
292 ; RV32I-LABEL: name: vstore_nx4i8
293 ; RV32I: liveins: $v8, $x10
295 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
296 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
297 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
298 ; RV32I-NEXT: PseudoRET
300 ; RV64I-LABEL: name: vstore_nx4i8
301 ; RV64I: liveins: $v8, $x10
303 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
304 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
305 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
306 ; RV64I-NEXT: PseudoRET
308 %1:_(<vscale x 4 x s8>) = COPY $v8
309 G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
316 tracksRegLiveness: true
321 ; RV32I-LABEL: name: vstore_nx8i8
322 ; RV32I: liveins: $v8, $x10
324 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
325 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
326 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
327 ; RV32I-NEXT: PseudoRET
329 ; RV64I-LABEL: name: vstore_nx8i8
330 ; RV64I: liveins: $v8, $x10
332 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
333 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
334 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
335 ; RV64I-NEXT: PseudoRET
337 %1:_(<vscale x 8 x s8>) = COPY $v8
338 G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
345 tracksRegLiveness: true
350 ; RV32I-LABEL: name: vstore_nx16i8
351 ; RV32I: liveins: $x10, $v8m2
353 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
354 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
355 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
356 ; RV32I-NEXT: PseudoRET
358 ; RV64I-LABEL: name: vstore_nx16i8
359 ; RV64I: liveins: $x10, $v8m2
361 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
362 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
363 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
364 ; RV64I-NEXT: PseudoRET
366 %1:_(<vscale x 16 x s8>) = COPY $v8m2
367 G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
374 tracksRegLiveness: true
379 ; RV32I-LABEL: name: vstore_nx32i8
380 ; RV32I: liveins: $x10, $v8m4
382 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
383 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
384 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
385 ; RV32I-NEXT: PseudoRET
387 ; RV64I-LABEL: name: vstore_nx32i8
388 ; RV64I: liveins: $x10, $v8m4
390 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
391 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
392 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
393 ; RV64I-NEXT: PseudoRET
395 %1:_(<vscale x 32 x s8>) = COPY $v8m4
396 G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
403 tracksRegLiveness: true
408 ; RV32I-LABEL: name: vstore_nx64i8
409 ; RV32I: liveins: $x10, $v8m8
411 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
412 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
413 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
414 ; RV32I-NEXT: PseudoRET
416 ; RV64I-LABEL: name: vstore_nx64i8
417 ; RV64I: liveins: $x10, $v8m8
419 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
420 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
421 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
422 ; RV64I-NEXT: PseudoRET
424 %1:_(<vscale x 64 x s8>) = COPY $v8m8
425 G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
432 tracksRegLiveness: true
437 ; RV32I-LABEL: name: vstore_nx1i16
438 ; RV32I: liveins: $v8, $x10
440 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
441 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
442 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
443 ; RV32I-NEXT: PseudoRET
445 ; RV64I-LABEL: name: vstore_nx1i16
446 ; RV64I: liveins: $v8, $x10
448 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
449 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
450 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
451 ; RV64I-NEXT: PseudoRET
453 %1:_(<vscale x 1 x s16>) = COPY $v8
454 G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
461 tracksRegLiveness: true
466 ; RV32I-LABEL: name: vstore_nx2i16
467 ; RV32I: liveins: $v8, $x10
469 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
470 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
471 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
472 ; RV32I-NEXT: PseudoRET
474 ; RV64I-LABEL: name: vstore_nx2i16
475 ; RV64I: liveins: $v8, $x10
477 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
478 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
479 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
480 ; RV64I-NEXT: PseudoRET
482 %1:_(<vscale x 2 x s16>) = COPY $v8
483 G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
490 tracksRegLiveness: true
495 ; RV32I-LABEL: name: vstore_nx4i16
496 ; RV32I: liveins: $v8, $x10
498 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
499 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
500 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
501 ; RV32I-NEXT: PseudoRET
503 ; RV64I-LABEL: name: vstore_nx4i16
504 ; RV64I: liveins: $v8, $x10
506 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
507 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
508 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
509 ; RV64I-NEXT: PseudoRET
511 %1:_(<vscale x 4 x s16>) = COPY $v8
512 G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
519 tracksRegLiveness: true
524 ; RV32I-LABEL: name: vstore_nx8i16
525 ; RV32I: liveins: $x10, $v8m2
527 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
528 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
529 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
530 ; RV32I-NEXT: PseudoRET
532 ; RV64I-LABEL: name: vstore_nx8i16
533 ; RV64I: liveins: $x10, $v8m2
535 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
536 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
537 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
538 ; RV64I-NEXT: PseudoRET
540 %1:_(<vscale x 8 x s16>) = COPY $v8m2
541 G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
548 tracksRegLiveness: true
553 ; RV32I-LABEL: name: vstore_nx16i16
554 ; RV32I: liveins: $x10, $v8m4
556 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
557 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
558 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
559 ; RV32I-NEXT: PseudoRET
561 ; RV64I-LABEL: name: vstore_nx16i16
562 ; RV64I: liveins: $x10, $v8m4
564 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
565 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
566 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
567 ; RV64I-NEXT: PseudoRET
569 %1:_(<vscale x 16 x s16>) = COPY $v8m4
570 G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
577 tracksRegLiveness: true
582 ; RV32I-LABEL: name: vstore_nx32i16
583 ; RV32I: liveins: $x10, $v8m8
585 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
586 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
587 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
588 ; RV32I-NEXT: PseudoRET
590 ; RV64I-LABEL: name: vstore_nx32i16
591 ; RV64I: liveins: $x10, $v8m8
593 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
594 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
595 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
596 ; RV64I-NEXT: PseudoRET
598 %1:_(<vscale x 32 x s16>) = COPY $v8m8
599 G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
606 tracksRegLiveness: true
611 ; RV32I-LABEL: name: vstore_nx1i32
612 ; RV32I: liveins: $v8, $x10
614 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
615 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
616 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
617 ; RV32I-NEXT: PseudoRET
619 ; RV64I-LABEL: name: vstore_nx1i32
620 ; RV64I: liveins: $v8, $x10
622 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
623 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
624 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
625 ; RV64I-NEXT: PseudoRET
627 %1:_(<vscale x 1 x s32>) = COPY $v8
628 G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
635 tracksRegLiveness: true
640 ; RV32I-LABEL: name: vstore_nx2i32
641 ; RV32I: liveins: $v8, $x10
643 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
644 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
645 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
646 ; RV32I-NEXT: PseudoRET
648 ; RV64I-LABEL: name: vstore_nx2i32
649 ; RV64I: liveins: $v8, $x10
651 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
652 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
653 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
654 ; RV64I-NEXT: PseudoRET
656 %1:_(<vscale x 2 x s32>) = COPY $v8
657 G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
664 tracksRegLiveness: true
669 ; RV32I-LABEL: name: vstore_nx4i32
670 ; RV32I: liveins: $x10, $v8m2
672 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
673 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
674 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
675 ; RV32I-NEXT: PseudoRET
677 ; RV64I-LABEL: name: vstore_nx4i32
678 ; RV64I: liveins: $x10, $v8m2
680 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
681 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
682 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
683 ; RV64I-NEXT: PseudoRET
685 %1:_(<vscale x 4 x s32>) = COPY $v8m2
686 G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
693 tracksRegLiveness: true
698 ; RV32I-LABEL: name: vstore_nx8i32
699 ; RV32I: liveins: $x10, $v8m4
701 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
702 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
703 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
704 ; RV32I-NEXT: PseudoRET
706 ; RV64I-LABEL: name: vstore_nx8i32
707 ; RV64I: liveins: $x10, $v8m4
709 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
710 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
711 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
712 ; RV64I-NEXT: PseudoRET
714 %1:_(<vscale x 8 x s32>) = COPY $v8m4
715 G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
722 tracksRegLiveness: true
727 ; RV32I-LABEL: name: vstore_nx16i32
728 ; RV32I: liveins: $x10, $v8m8
730 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
731 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
732 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
733 ; RV32I-NEXT: PseudoRET
735 ; RV64I-LABEL: name: vstore_nx16i32
736 ; RV64I: liveins: $x10, $v8m8
738 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
739 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
740 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
741 ; RV64I-NEXT: PseudoRET
743 %1:_(<vscale x 16 x s32>) = COPY $v8m8
744 G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
751 tracksRegLiveness: true
756 ; RV32I-LABEL: name: vstore_nx1i64
757 ; RV32I: liveins: $v8, $x10
759 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
760 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
761 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
762 ; RV32I-NEXT: PseudoRET
764 ; RV64I-LABEL: name: vstore_nx1i64
765 ; RV64I: liveins: $v8, $x10
767 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
768 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
769 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
770 ; RV64I-NEXT: PseudoRET
772 %1:_(<vscale x 1 x s64>) = COPY $v8
773 G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
780 tracksRegLiveness: true
785 ; RV32I-LABEL: name: vstore_nx2i64
786 ; RV32I: liveins: $x10, $v8m2
788 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
789 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
790 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
791 ; RV32I-NEXT: PseudoRET
793 ; RV64I-LABEL: name: vstore_nx2i64
794 ; RV64I: liveins: $x10, $v8m2
796 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
797 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
798 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
799 ; RV64I-NEXT: PseudoRET
801 %1:_(<vscale x 2 x s64>) = COPY $v8m2
802 G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
809 tracksRegLiveness: true
814 ; RV32I-LABEL: name: vstore_nx4i64
815 ; RV32I: liveins: $x10, $v8m4
817 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
818 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
819 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
820 ; RV32I-NEXT: PseudoRET
822 ; RV64I-LABEL: name: vstore_nx4i64
823 ; RV64I: liveins: $x10, $v8m4
825 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
826 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
827 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
828 ; RV64I-NEXT: PseudoRET
830 %1:_(<vscale x 4 x s64>) = COPY $v8m4
831 G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
838 tracksRegLiveness: true
843 ; RV32I-LABEL: name: vstore_nx8i64
844 ; RV32I: liveins: $x10, $v8m8
846 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
847 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
848 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
849 ; RV32I-NEXT: PseudoRET
851 ; RV64I-LABEL: name: vstore_nx8i64
852 ; RV64I: liveins: $x10, $v8m8
854 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
855 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
856 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
857 ; RV64I-NEXT: PseudoRET
859 %1:_(<vscale x 8 x s64>) = COPY $v8m8
860 G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
865 name: vstore_nx16i8_align1
867 tracksRegLiveness: true
872 ; RV32I-LABEL: name: vstore_nx16i8_align1
873 ; RV32I: liveins: $x10, $v8m2
875 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
876 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
877 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
878 ; RV32I-NEXT: PseudoRET
880 ; RV64I-LABEL: name: vstore_nx16i8_align1
881 ; RV64I: liveins: $x10, $v8m2
883 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
884 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
885 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
886 ; RV64I-NEXT: PseudoRET
888 %1:_(<vscale x 16 x s8>) = COPY $v8m2
889 G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
894 name: vstore_nx16i8_align2
896 tracksRegLiveness: true
901 ; RV32I-LABEL: name: vstore_nx16i8_align2
902 ; RV32I: liveins: $x10, $v8m2
904 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
905 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
906 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
907 ; RV32I-NEXT: PseudoRET
909 ; RV64I-LABEL: name: vstore_nx16i8_align2
910 ; RV64I: liveins: $x10, $v8m2
912 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
913 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
914 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
915 ; RV64I-NEXT: PseudoRET
917 %1:_(<vscale x 16 x s8>) = COPY $v8m2
918 G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
923 name: vstore_nx16i8_align16
925 tracksRegLiveness: true
930 ; RV32I-LABEL: name: vstore_nx16i8_align16
931 ; RV32I: liveins: $x10, $v8m2
933 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
934 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
935 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
936 ; RV32I-NEXT: PseudoRET
938 ; RV64I-LABEL: name: vstore_nx16i8_align16
939 ; RV64I: liveins: $x10, $v8m2
941 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
942 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
943 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
944 ; RV64I-NEXT: PseudoRET
946 %1:_(<vscale x 16 x s8>) = COPY $v8m2
947 G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
952 name: vstore_nx16i8_align64
954 tracksRegLiveness: true
959 ; RV32I-LABEL: name: vstore_nx16i8_align64
960 ; RV32I: liveins: $x10, $v8m2
962 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
963 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
964 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
965 ; RV32I-NEXT: PseudoRET
967 ; RV64I-LABEL: name: vstore_nx16i8_align64
968 ; RV64I: liveins: $x10, $v8m2
970 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
971 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
972 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
973 ; RV64I-NEXT: PseudoRET
975 %1:_(<vscale x 16 x s8>) = COPY $v8m2
976 G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
981 name: vstore_nx4i16_align1
983 tracksRegLiveness: true
988 ; RV32I-LABEL: name: vstore_nx4i16_align1
989 ; RV32I: liveins: $v8, $x10
991 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
992 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
993 ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
994 ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
995 ; RV32I-NEXT: PseudoRET
997 ; RV64I-LABEL: name: vstore_nx4i16_align1
998 ; RV64I: liveins: $v8, $x10
1000 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1001 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
1002 ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
1003 ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
1004 ; RV64I-NEXT: PseudoRET
1005 %0:_(p0) = COPY $x10
1006 %1:_(<vscale x 4 x s16>) = COPY $v8
1007 %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 4 x s16>)
1008 G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
1013 name: vstore_nx4i16_align2
1015 tracksRegLiveness: true
1020 ; RV32I-LABEL: name: vstore_nx4i16_align2
1021 ; RV32I: liveins: $v8, $x10
1022 ; RV32I-NEXT: {{ $}}
1023 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1024 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
1025 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
1026 ; RV32I-NEXT: PseudoRET
1028 ; RV64I-LABEL: name: vstore_nx4i16_align2
1029 ; RV64I: liveins: $v8, $x10
1030 ; RV64I-NEXT: {{ $}}
1031 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1032 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
1033 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
1034 ; RV64I-NEXT: PseudoRET
1035 %0:_(p0) = COPY $x10
1036 %1:_(<vscale x 4 x s16>) = COPY $v8
1037 G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
1042 name: vstore_nx4i16_align4
1044 tracksRegLiveness: true
1049 ; RV32I-LABEL: name: vstore_nx4i16_align4
1050 ; RV32I: liveins: $v8, $x10
1051 ; RV32I-NEXT: {{ $}}
1052 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1053 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
1054 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
1055 ; RV32I-NEXT: PseudoRET
1057 ; RV64I-LABEL: name: vstore_nx4i16_align4
1058 ; RV64I: liveins: $v8, $x10
1059 ; RV64I-NEXT: {{ $}}
1060 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1061 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
1062 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
1063 ; RV64I-NEXT: PseudoRET
1064 %0:_(p0) = COPY $x10
1065 %1:_(<vscale x 4 x s16>) = COPY $v8
1066 G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
1071 name: vstore_nx4i16_align8
1073 tracksRegLiveness: true
1078 ; RV32I-LABEL: name: vstore_nx4i16_align8
1079 ; RV32I: liveins: $v8, $x10
1080 ; RV32I-NEXT: {{ $}}
1081 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1082 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
1083 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
1084 ; RV32I-NEXT: PseudoRET
1086 ; RV64I-LABEL: name: vstore_nx4i16_align8
1087 ; RV64I: liveins: $v8, $x10
1088 ; RV64I-NEXT: {{ $}}
1089 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1090 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
1091 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
1092 ; RV64I-NEXT: PseudoRET
1093 %0:_(p0) = COPY $x10
1094 %1:_(<vscale x 4 x s16>) = COPY $v8
1095 G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
1100 name: vstore_nx4i16_align16
1102 tracksRegLiveness: true
1107 ; RV32I-LABEL: name: vstore_nx4i16_align16
1108 ; RV32I: liveins: $v8, $x10
1109 ; RV32I-NEXT: {{ $}}
1110 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1111 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
1112 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
1113 ; RV32I-NEXT: PseudoRET
1115 ; RV64I-LABEL: name: vstore_nx4i16_align16
1116 ; RV64I: liveins: $v8, $x10
1117 ; RV64I-NEXT: {{ $}}
1118 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1119 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
1120 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
1121 ; RV64I-NEXT: PseudoRET
1122 %0:_(p0) = COPY $x10
1123 %1:_(<vscale x 4 x s16>) = COPY $v8
1124 G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
1129 name: vstore_nx2i32_align2
1131 tracksRegLiveness: true
1136 ; RV32I-LABEL: name: vstore_nx2i32_align2
1137 ; RV32I: liveins: $v8, $x10
1138 ; RV32I-NEXT: {{ $}}
1139 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1140 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
1141 ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
1142 ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
1143 ; RV32I-NEXT: PseudoRET
1145 ; RV64I-LABEL: name: vstore_nx2i32_align2
1146 ; RV64I: liveins: $v8, $x10
1147 ; RV64I-NEXT: {{ $}}
1148 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1149 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
1150 ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
1151 ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
1152 ; RV64I-NEXT: PseudoRET
1153 %0:_(p0) = COPY $x10
1154 %1:_(<vscale x 2 x s32>) = COPY $v8
1155 %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 2 x s32>)
1156 G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
1161 name: vstore_nx2i32_align4
1163 tracksRegLiveness: true
1168 ; RV32I-LABEL: name: vstore_nx2i32_align4
1169 ; RV32I: liveins: $v8, $x10
1170 ; RV32I-NEXT: {{ $}}
1171 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1172 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
1173 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
1174 ; RV32I-NEXT: PseudoRET
1176 ; RV64I-LABEL: name: vstore_nx2i32_align4
1177 ; RV64I: liveins: $v8, $x10
1178 ; RV64I-NEXT: {{ $}}
1179 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1180 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
1181 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
1182 ; RV64I-NEXT: PseudoRET
1183 %0:_(p0) = COPY $x10
1184 %1:_(<vscale x 2 x s32>) = COPY $v8
1185 G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
1190 name: vstore_nx2i32_align8
1192 tracksRegLiveness: true
1197 ; RV32I-LABEL: name: vstore_nx2i32_align8
1198 ; RV32I: liveins: $v8, $x10
1199 ; RV32I-NEXT: {{ $}}
1200 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1201 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
1202 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
1203 ; RV32I-NEXT: PseudoRET
1205 ; RV64I-LABEL: name: vstore_nx2i32_align8
1206 ; RV64I: liveins: $v8, $x10
1207 ; RV64I-NEXT: {{ $}}
1208 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1209 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
1210 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
1211 ; RV64I-NEXT: PseudoRET
1212 %0:_(p0) = COPY $x10
1213 %1:_(<vscale x 2 x s32>) = COPY $v8
1214 G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
1219 name: vstore_nx2i32_align16
1221 tracksRegLiveness: true
1226 ; RV32I-LABEL: name: vstore_nx2i32_align16
1227 ; RV32I: liveins: $v8, $x10
1228 ; RV32I-NEXT: {{ $}}
1229 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1230 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
1231 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
1232 ; RV32I-NEXT: PseudoRET
1234 ; RV64I-LABEL: name: vstore_nx2i32_align16
1235 ; RV64I: liveins: $v8, $x10
1236 ; RV64I-NEXT: {{ $}}
1237 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1238 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
1239 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
1240 ; RV64I-NEXT: PseudoRET
1241 %0:_(p0) = COPY $x10
1242 %1:_(<vscale x 2 x s32>) = COPY $v8
1243 G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
1248 name: vstore_nx2i32_align256
1250 tracksRegLiveness: true
1255 ; RV32I-LABEL: name: vstore_nx2i32_align256
1256 ; RV32I: liveins: $v8, $x10
1257 ; RV32I-NEXT: {{ $}}
1258 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1259 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
1260 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
1261 ; RV32I-NEXT: PseudoRET
1263 ; RV64I-LABEL: name: vstore_nx2i32_align256
1264 ; RV64I: liveins: $v8, $x10
1265 ; RV64I-NEXT: {{ $}}
1266 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1267 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
1268 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
1269 ; RV64I-NEXT: PseudoRET
1270 %0:_(p0) = COPY $x10
1271 %1:_(<vscale x 2 x s32>) = COPY $v8
1272 G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
1277 name: vstore_nx2i64_align4
1279 tracksRegLiveness: true
1282 liveins: $x10, $v8m2
1284 ; RV32I-LABEL: name: vstore_nx2i64_align4
1285 ; RV32I: liveins: $x10, $v8m2
1286 ; RV32I-NEXT: {{ $}}
1287 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1288 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
1289 ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
1290 ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
1291 ; RV32I-NEXT: PseudoRET
1293 ; RV64I-LABEL: name: vstore_nx2i64_align4
1294 ; RV64I: liveins: $x10, $v8m2
1295 ; RV64I-NEXT: {{ $}}
1296 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1297 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
1298 ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
1299 ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
1300 ; RV64I-NEXT: PseudoRET
1301 %0:_(p0) = COPY $x10
1302 %1:_(<vscale x 2 x s64>) = COPY $v8m2
1303 %2:_(<vscale x 16 x s8>) = G_BITCAST %1(<vscale x 2 x s64>)
1304 G_STORE %2(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
1309 name: vstore_nx2i64_align8
1311 tracksRegLiveness: true
1314 liveins: $x10, $v8m2
1316 ; RV32I-LABEL: name: vstore_nx2i64_align8
1317 ; RV32I: liveins: $x10, $v8m2
1318 ; RV32I-NEXT: {{ $}}
1319 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1320 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
1321 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
1322 ; RV32I-NEXT: PseudoRET
1324 ; RV64I-LABEL: name: vstore_nx2i64_align8
1325 ; RV64I: liveins: $x10, $v8m2
1326 ; RV64I-NEXT: {{ $}}
1327 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1328 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
1329 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
1330 ; RV64I-NEXT: PseudoRET
1331 %0:_(p0) = COPY $x10
1332 %1:_(<vscale x 2 x s64>) = COPY $v8m2
1333 G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
1338 name: vstore_nx2i64_align16
1340 tracksRegLiveness: true
1343 liveins: $x10, $v8m2
1345 ; RV32I-LABEL: name: vstore_nx2i64_align16
1346 ; RV32I: liveins: $x10, $v8m2
1347 ; RV32I-NEXT: {{ $}}
1348 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1349 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
1350 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
1351 ; RV32I-NEXT: PseudoRET
1353 ; RV64I-LABEL: name: vstore_nx2i64_align16
1354 ; RV64I: liveins: $x10, $v8m2
1355 ; RV64I-NEXT: {{ $}}
1356 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1357 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
1358 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
1359 ; RV64I-NEXT: PseudoRET
1360 %0:_(p0) = COPY $x10
1361 %1:_(<vscale x 2 x s64>) = COPY $v8m2
1362 G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
1367 name: vstore_nx2i64_align32
1369 tracksRegLiveness: true
1372 liveins: $x10, $v8m2
1374 ; RV32I-LABEL: name: vstore_nx2i64_align32
1375 ; RV32I: liveins: $x10, $v8m2
1376 ; RV32I-NEXT: {{ $}}
1377 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1378 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
1379 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
1380 ; RV32I-NEXT: PseudoRET
1382 ; RV64I-LABEL: name: vstore_nx2i64_align32
1383 ; RV64I: liveins: $x10, $v8m2
1384 ; RV64I-NEXT: {{ $}}
1385 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1386 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
1387 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
1388 ; RV64I-NEXT: PseudoRET
1389 %0:_(p0) = COPY $x10
1390 %1:_(<vscale x 2 x s64>) = COPY $v8m2
1391 G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
1398 tracksRegLiveness: true
1403 ; RV32I-LABEL: name: vstore_nx1ptr
1404 ; RV32I: liveins: $v8, $x10
1405 ; RV32I-NEXT: {{ $}}
1406 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1407 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
1408 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
1409 ; RV32I-NEXT: PseudoRET
1411 ; RV64I-LABEL: name: vstore_nx1ptr
1412 ; RV64I: liveins: $v8, $x10
1413 ; RV64I-NEXT: {{ $}}
1414 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1415 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
1416 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
1417 ; RV64I-NEXT: PseudoRET
1418 %0:_(p0) = COPY $x10
1419 %1:_(<vscale x 1 x p0>) = COPY $v8
1420 G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
1427 tracksRegLiveness: true
1432 ; RV32I-LABEL: name: vstore_nx2ptr
1433 ; RV32I: liveins: $v8, $x10
1434 ; RV32I-NEXT: {{ $}}
1435 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1436 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
1437 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
1438 ; RV32I-NEXT: PseudoRET
1440 ; RV64I-LABEL: name: vstore_nx2ptr
1441 ; RV64I: liveins: $v8, $x10
1442 ; RV64I-NEXT: {{ $}}
1443 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1444 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
1445 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
1446 ; RV64I-NEXT: PseudoRET
1447 %0:_(p0) = COPY $x10
1448 %1:_(<vscale x 2 x p0>) = COPY $v8
1449 G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
1456 tracksRegLiveness: true
1459 liveins: $x10, $v8m4
1461 ; RV32I-LABEL: name: vstore_nx8ptr
1462 ; RV32I: liveins: $x10, $v8m4
1463 ; RV32I-NEXT: {{ $}}
1464 ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1465 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
1466 ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
1467 ; RV32I-NEXT: PseudoRET
1469 ; RV64I-LABEL: name: vstore_nx8ptr
1470 ; RV64I: liveins: $x10, $v8m4
1471 ; RV64I-NEXT: {{ $}}
1472 ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
1473 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
1474 ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
1475 ; RV64I-NEXT: PseudoRET
1476 %0:_(p0) = COPY $x10
1477 %1:_(<vscale x 8 x p0>) = COPY $v8m4
1478 G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)