1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
3 # RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
6 define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) #0 {
7 store <vscale x 1 x i8> %b, ptr %pa, align 1
11 define void @vstore_nx2i8(ptr %pa, <vscale x 2 x i8> %b) #0 {
12 store <vscale x 2 x i8> %b, ptr %pa, align 2
16 define void @vstore_nx4i8(ptr %pa, <vscale x 4 x i8> %b) #0 {
17 store <vscale x 4 x i8> %b, ptr %pa, align 4
21 define void @vstore_nx8i8(ptr %pa, <vscale x 8 x i8> %b) #0 {
22 store <vscale x 8 x i8> %b, ptr %pa, align 8
26 define void @vstore_nx16i8(ptr %pa, <vscale x 16 x i8> %b) #0 {
27 store <vscale x 16 x i8> %b, ptr %pa, align 16
31 define void @vstore_nx32i8(ptr %pa, <vscale x 32 x i8> %b) #0 {
32 store <vscale x 32 x i8> %b, ptr %pa, align 32
36 define void @vstore_nx64i8(ptr %pa, <vscale x 64 x i8> %b) #0 {
37 store <vscale x 64 x i8> %b, ptr %pa, align 64
41 define void @vstore_nx1i16(ptr %pa, <vscale x 1 x i16> %b) #0 {
42 store <vscale x 1 x i16> %b, ptr %pa, align 2
46 define void @vstore_nx2i16(ptr %pa, <vscale x 2 x i16> %b) #0 {
47 store <vscale x 2 x i16> %b, ptr %pa, align 4
51 define void @vstore_nx4i16(ptr %pa, <vscale x 4 x i16> %b) #0 {
52 store <vscale x 4 x i16> %b, ptr %pa, align 8
56 define void @vstore_nx8i16(ptr %pa, <vscale x 8 x i16> %b) #0 {
57 store <vscale x 8 x i16> %b, ptr %pa, align 16
61 define void @vstore_nx16i16(ptr %pa, <vscale x 16 x i16> %b) #0 {
62 store <vscale x 16 x i16> %b, ptr %pa, align 32
66 define void @vstore_nx32i16(ptr %pa, <vscale x 32 x i16> %b) #0 {
67 store <vscale x 32 x i16> %b, ptr %pa, align 64
71 define void @vstore_nx1i32(ptr %pa, <vscale x 1 x i32> %b) #0 {
72 store <vscale x 1 x i32> %b, ptr %pa, align 4
76 define void @vstore_nx2i32(ptr %pa, <vscale x 2 x i32> %b) #0 {
77 store <vscale x 2 x i32> %b, ptr %pa, align 8
81 define void @vstore_nx4i32(ptr %pa, <vscale x 4 x i32> %b) #0 {
82 store <vscale x 4 x i32> %b, ptr %pa, align 16
86 define void @vstore_nx8i32(ptr %pa, <vscale x 8 x i32> %b) #0 {
87 store <vscale x 8 x i32> %b, ptr %pa, align 32
91 define void @vstore_nx16i32(ptr %pa, <vscale x 16 x i32> %b) #0 {
92 store <vscale x 16 x i32> %b, ptr %pa, align 64
96 define void @vstore_nx1i64(ptr %pa, <vscale x 1 x i64> %b) #0 {
97 store <vscale x 1 x i64> %b, ptr %pa, align 8
101 define void @vstore_nx2i64(ptr %pa, <vscale x 2 x i64> %b) #0 {
102 store <vscale x 2 x i64> %b, ptr %pa, align 16
106 define void @vstore_nx4i64(ptr %pa, <vscale x 4 x i64> %b) #0 {
107 store <vscale x 4 x i64> %b, ptr %pa, align 32
111 define void @vstore_nx8i64(ptr %pa, <vscale x 8 x i64> %b) #0 {
112 store <vscale x 8 x i64> %b, ptr %pa, align 64
116 define void @vstore_nx16i8_align1(ptr %pa, <vscale x 16 x i8> %b) #0 {
117 store <vscale x 16 x i8> %b, ptr %pa, align 1
121 define void @vstore_nx16i8_align2(ptr %pa, <vscale x 16 x i8> %b) #0 {
122 store <vscale x 16 x i8> %b, ptr %pa, align 2
126 define void @vstore_nx16i8_align16(ptr %pa, <vscale x 16 x i8> %b) #0 {
127 store <vscale x 16 x i8> %b, ptr %pa, align 16
131 define void @vstore_nx16i8_align64(ptr %pa, <vscale x 16 x i8> %b) #0 {
132 store <vscale x 16 x i8> %b, ptr %pa, align 64
136 define void @vstore_nx4i16_align1(ptr %pa, <vscale x 4 x i16> %b) #0 {
137 store <vscale x 4 x i16> %b, ptr %pa, align 1
141 define void @vstore_nx4i16_align2(ptr %pa, <vscale x 4 x i16> %b) #0 {
142 store <vscale x 4 x i16> %b, ptr %pa, align 2
146 define void @vstore_nx4i16_align4(ptr %pa, <vscale x 4 x i16> %b) #0 {
147 store <vscale x 4 x i16> %b, ptr %pa, align 4
151 define void @vstore_nx4i16_align8(ptr %pa, <vscale x 4 x i16> %b) #0 {
152 store <vscale x 4 x i16> %b, ptr %pa, align 8
156 define void @vstore_nx4i16_align16(ptr %pa, <vscale x 4 x i16> %b) #0 {
157 store <vscale x 4 x i16> %b, ptr %pa, align 16
161 define void @vstore_nx2i32_align2(ptr %pa, <vscale x 2 x i32> %b) #0 {
162 store <vscale x 2 x i32> %b, ptr %pa, align 2
166 define void @vstore_nx2i32_align4(ptr %pa, <vscale x 2 x i32> %b) #0 {
167 store <vscale x 2 x i32> %b, ptr %pa, align 4
171 define void @vstore_nx2i32_align8(ptr %pa, <vscale x 2 x i32> %b) #0 {
172 store <vscale x 2 x i32> %b, ptr %pa, align 8
176 define void @vstore_nx2i32_align16(ptr %pa, <vscale x 2 x i32> %b) #0 {
177 store <vscale x 2 x i32> %b, ptr %pa, align 16
181 define void @vstore_nx2i32_align256(ptr %pa, <vscale x 2 x i32> %b) #0 {
182 store <vscale x 2 x i32> %b, ptr %pa, align 256
186 define void @vstore_nx2i64_align4(ptr %pa, <vscale x 2 x i64> %b) #0 {
187 store <vscale x 2 x i64> %b, ptr %pa, align 4
191 define void @vstore_nx2i64_align8(ptr %pa, <vscale x 2 x i64> %b) #0 {
192 store <vscale x 2 x i64> %b, ptr %pa, align 8
196 define void @vstore_nx2i64_align16(ptr %pa, <vscale x 2 x i64> %b) #0 {
197 store <vscale x 2 x i64> %b, ptr %pa, align 16
201 define void @vstore_nx2i64_align32(ptr %pa, <vscale x 2 x i64> %b) #0 {
202 store <vscale x 2 x i64> %b, ptr %pa, align 32
206 attributes #0 = { "target-features"="+v" }
215 ; CHECK-LABEL: name: vstore_nx1i8
216 ; CHECK: liveins: $v8, $x10
218 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
219 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
220 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
221 ; CHECK-NEXT: PseudoRET
223 %1:_(<vscale x 1 x s8>) = COPY $v8
224 G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
234 ; CHECK-LABEL: name: vstore_nx2i8
235 ; CHECK: liveins: $v8, $x10
237 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
238 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
239 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
240 ; CHECK-NEXT: PseudoRET
242 %1:_(<vscale x 2 x s8>) = COPY $v8
243 G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
253 ; CHECK-LABEL: name: vstore_nx4i8
254 ; CHECK: liveins: $v8, $x10
256 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
257 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
258 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
259 ; CHECK-NEXT: PseudoRET
261 %1:_(<vscale x 4 x s8>) = COPY $v8
262 G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
272 ; CHECK-LABEL: name: vstore_nx8i8
273 ; CHECK: liveins: $v8, $x10
275 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
276 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
277 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
278 ; CHECK-NEXT: PseudoRET
280 %1:_(<vscale x 8 x s8>) = COPY $v8
281 G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
291 ; CHECK-LABEL: name: vstore_nx16i8
292 ; CHECK: liveins: $x10, $v8m2
294 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
295 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
296 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
297 ; CHECK-NEXT: PseudoRET
299 %1:_(<vscale x 16 x s8>) = COPY $v8m2
300 G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
310 ; CHECK-LABEL: name: vstore_nx32i8
311 ; CHECK: liveins: $x10, $v8m4
313 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
314 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
315 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
316 ; CHECK-NEXT: PseudoRET
318 %1:_(<vscale x 32 x s8>) = COPY $v8m4
319 G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
329 ; CHECK-LABEL: name: vstore_nx64i8
330 ; CHECK: liveins: $x10, $v8m8
332 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
333 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
334 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
335 ; CHECK-NEXT: PseudoRET
337 %1:_(<vscale x 64 x s8>) = COPY $v8m8
338 G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
348 ; CHECK-LABEL: name: vstore_nx1i16
349 ; CHECK: liveins: $v8, $x10
351 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
352 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
353 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
354 ; CHECK-NEXT: PseudoRET
356 %1:_(<vscale x 1 x s16>) = COPY $v8
357 G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
367 ; CHECK-LABEL: name: vstore_nx2i16
368 ; CHECK: liveins: $v8, $x10
370 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
371 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
372 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
373 ; CHECK-NEXT: PseudoRET
375 %1:_(<vscale x 2 x s16>) = COPY $v8
376 G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
386 ; CHECK-LABEL: name: vstore_nx4i16
387 ; CHECK: liveins: $v8, $x10
389 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
390 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
391 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
392 ; CHECK-NEXT: PseudoRET
394 %1:_(<vscale x 4 x s16>) = COPY $v8
395 G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
405 ; CHECK-LABEL: name: vstore_nx8i16
406 ; CHECK: liveins: $x10, $v8m2
408 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
409 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
410 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
411 ; CHECK-NEXT: PseudoRET
413 %1:_(<vscale x 8 x s16>) = COPY $v8m2
414 G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
424 ; CHECK-LABEL: name: vstore_nx16i16
425 ; CHECK: liveins: $x10, $v8m4
427 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
428 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
429 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
430 ; CHECK-NEXT: PseudoRET
432 %1:_(<vscale x 16 x s16>) = COPY $v8m4
433 G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
443 ; CHECK-LABEL: name: vstore_nx32i16
444 ; CHECK: liveins: $x10, $v8m8
446 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
447 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
448 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
449 ; CHECK-NEXT: PseudoRET
451 %1:_(<vscale x 32 x s16>) = COPY $v8m8
452 G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
462 ; CHECK-LABEL: name: vstore_nx1i32
463 ; CHECK: liveins: $v8, $x10
465 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
466 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
467 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
468 ; CHECK-NEXT: PseudoRET
470 %1:_(<vscale x 1 x s32>) = COPY $v8
471 G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
481 ; CHECK-LABEL: name: vstore_nx2i32
482 ; CHECK: liveins: $v8, $x10
484 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
485 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
486 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
487 ; CHECK-NEXT: PseudoRET
489 %1:_(<vscale x 2 x s32>) = COPY $v8
490 G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
500 ; CHECK-LABEL: name: vstore_nx4i32
501 ; CHECK: liveins: $x10, $v8m2
503 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
504 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
505 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
506 ; CHECK-NEXT: PseudoRET
508 %1:_(<vscale x 4 x s32>) = COPY $v8m2
509 G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
519 ; CHECK-LABEL: name: vstore_nx8i32
520 ; CHECK: liveins: $x10, $v8m4
522 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
523 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
524 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
525 ; CHECK-NEXT: PseudoRET
527 %1:_(<vscale x 8 x s32>) = COPY $v8m4
528 G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
538 ; CHECK-LABEL: name: vstore_nx16i32
539 ; CHECK: liveins: $x10, $v8m8
541 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
542 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
543 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
544 ; CHECK-NEXT: PseudoRET
546 %1:_(<vscale x 16 x s32>) = COPY $v8m8
547 G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
557 ; CHECK-LABEL: name: vstore_nx1i64
558 ; CHECK: liveins: $v8, $x10
560 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
561 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
562 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
563 ; CHECK-NEXT: PseudoRET
565 %1:_(<vscale x 1 x s64>) = COPY $v8
566 G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
576 ; CHECK-LABEL: name: vstore_nx2i64
577 ; CHECK: liveins: $x10, $v8m2
579 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
580 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
581 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
582 ; CHECK-NEXT: PseudoRET
584 %1:_(<vscale x 2 x s64>) = COPY $v8m2
585 G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
595 ; CHECK-LABEL: name: vstore_nx4i64
596 ; CHECK: liveins: $x10, $v8m4
598 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
599 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
600 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
601 ; CHECK-NEXT: PseudoRET
603 %1:_(<vscale x 4 x s64>) = COPY $v8m4
604 G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
614 ; CHECK-LABEL: name: vstore_nx8i64
615 ; CHECK: liveins: $x10, $v8m8
617 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
618 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
619 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
620 ; CHECK-NEXT: PseudoRET
622 %1:_(<vscale x 8 x s64>) = COPY $v8m8
623 G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
628 name: vstore_nx16i8_align1
633 ; CHECK-LABEL: name: vstore_nx16i8_align1
634 ; CHECK: liveins: $x10, $v8m2
636 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
637 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
638 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
639 ; CHECK-NEXT: PseudoRET
641 %1:_(<vscale x 16 x s8>) = COPY $v8m2
642 G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
647 name: vstore_nx16i8_align2
652 ; CHECK-LABEL: name: vstore_nx16i8_align2
653 ; CHECK: liveins: $x10, $v8m2
655 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
656 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
657 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
658 ; CHECK-NEXT: PseudoRET
660 %1:_(<vscale x 16 x s8>) = COPY $v8m2
661 G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
666 name: vstore_nx16i8_align16
671 ; CHECK-LABEL: name: vstore_nx16i8_align16
672 ; CHECK: liveins: $x10, $v8m2
674 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
675 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
676 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
677 ; CHECK-NEXT: PseudoRET
679 %1:_(<vscale x 16 x s8>) = COPY $v8m2
680 G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
685 name: vstore_nx16i8_align64
690 ; CHECK-LABEL: name: vstore_nx16i8_align64
691 ; CHECK: liveins: $x10, $v8m2
693 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
694 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
695 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
696 ; CHECK-NEXT: PseudoRET
698 %1:_(<vscale x 16 x s8>) = COPY $v8m2
699 G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
704 name: vstore_nx4i16_align1
709 ; CHECK-LABEL: name: vstore_nx4i16_align1
710 ; CHECK: liveins: $v8, $x10
712 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
713 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
714 ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
715 ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
716 ; CHECK-NEXT: PseudoRET
718 %1:_(<vscale x 4 x s16>) = COPY $v8
719 G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 1)
724 name: vstore_nx4i16_align2
729 ; CHECK-LABEL: name: vstore_nx4i16_align2
730 ; CHECK: liveins: $v8, $x10
732 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
733 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
734 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
735 ; CHECK-NEXT: PseudoRET
737 %1:_(<vscale x 4 x s16>) = COPY $v8
738 G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
743 name: vstore_nx4i16_align4
748 ; CHECK-LABEL: name: vstore_nx4i16_align4
749 ; CHECK: liveins: $v8, $x10
751 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
752 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
753 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
754 ; CHECK-NEXT: PseudoRET
756 %1:_(<vscale x 4 x s16>) = COPY $v8
757 G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
762 name: vstore_nx4i16_align8
767 ; CHECK-LABEL: name: vstore_nx4i16_align8
768 ; CHECK: liveins: $v8, $x10
770 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
771 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
772 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
773 ; CHECK-NEXT: PseudoRET
775 %1:_(<vscale x 4 x s16>) = COPY $v8
776 G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
781 name: vstore_nx4i16_align16
786 ; CHECK-LABEL: name: vstore_nx4i16_align16
787 ; CHECK: liveins: $v8, $x10
789 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
790 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
791 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
792 ; CHECK-NEXT: PseudoRET
794 %1:_(<vscale x 4 x s16>) = COPY $v8
795 G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
800 name: vstore_nx2i32_align2
805 ; CHECK-LABEL: name: vstore_nx2i32_align2
806 ; CHECK: liveins: $v8, $x10
808 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
809 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
810 ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
811 ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
812 ; CHECK-NEXT: PseudoRET
814 %1:_(<vscale x 2 x s32>) = COPY $v8
815 G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 2)
820 name: vstore_nx2i32_align4
825 ; CHECK-LABEL: name: vstore_nx2i32_align4
826 ; CHECK: liveins: $v8, $x10
828 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
829 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
830 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
831 ; CHECK-NEXT: PseudoRET
833 %1:_(<vscale x 2 x s32>) = COPY $v8
834 G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
839 name: vstore_nx2i32_align8
844 ; CHECK-LABEL: name: vstore_nx2i32_align8
845 ; CHECK: liveins: $v8, $x10
847 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
848 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
849 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
850 ; CHECK-NEXT: PseudoRET
852 %1:_(<vscale x 2 x s32>) = COPY $v8
853 G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
858 name: vstore_nx2i32_align16
863 ; CHECK-LABEL: name: vstore_nx2i32_align16
864 ; CHECK: liveins: $v8, $x10
866 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
867 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
868 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
869 ; CHECK-NEXT: PseudoRET
871 %1:_(<vscale x 2 x s32>) = COPY $v8
872 G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
877 name: vstore_nx2i32_align256
882 ; CHECK-LABEL: name: vstore_nx2i32_align256
883 ; CHECK: liveins: $v8, $x10
885 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
886 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
887 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
888 ; CHECK-NEXT: PseudoRET
890 %1:_(<vscale x 2 x s32>) = COPY $v8
891 G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
896 name: vstore_nx2i64_align4
901 ; CHECK-LABEL: name: vstore_nx2i64_align4
902 ; CHECK: liveins: $x10, $v8m2
904 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
905 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
906 ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
907 ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
908 ; CHECK-NEXT: PseudoRET
910 %1:_(<vscale x 2 x s64>) = COPY $v8m2
911 G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 4)
916 name: vstore_nx2i64_align8
921 ; CHECK-LABEL: name: vstore_nx2i64_align8
922 ; CHECK: liveins: $x10, $v8m2
924 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
925 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
926 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
927 ; CHECK-NEXT: PseudoRET
929 %1:_(<vscale x 2 x s64>) = COPY $v8m2
930 G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
935 name: vstore_nx2i64_align16
940 ; CHECK-LABEL: name: vstore_nx2i64_align16
941 ; CHECK: liveins: $x10, $v8m2
943 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
944 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
945 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
946 ; CHECK-NEXT: PseudoRET
948 %1:_(<vscale x 2 x s64>) = COPY $v8m2
949 G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
954 name: vstore_nx2i64_align32
959 ; CHECK-LABEL: name: vstore_nx2i64_align32
960 ; CHECK: liveins: $x10, $v8m2
962 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
963 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
964 ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
965 ; CHECK-NEXT: PseudoRET
967 %1:_(<vscale x 2 x s64>) = COPY $v8m2
968 G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)