1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare void @llvm.riscv.vsse.nxv1i64(
13 define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
14 ; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
17 ; CHECK-NEXT: vsse64.v v8, (a0), a1
20 call void @llvm.riscv.vsse.nxv1i64(
21 <vscale x 1 x i64> %0,
29 declare void @llvm.riscv.vsse.mask.nxv1i64(
36 define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
40 ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
43 call void @llvm.riscv.vsse.mask.nxv1i64(
44 <vscale x 1 x i64> %0,
53 declare void @llvm.riscv.vsse.nxv2i64(
59 define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
60 ; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
63 ; CHECK-NEXT: vsse64.v v8, (a0), a1
66 call void @llvm.riscv.vsse.nxv2i64(
67 <vscale x 2 x i64> %0,
75 declare void @llvm.riscv.vsse.mask.nxv2i64(
82 define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
86 ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
89 call void @llvm.riscv.vsse.mask.nxv2i64(
90 <vscale x 2 x i64> %0,
99 declare void @llvm.riscv.vsse.nxv4i64(
105 define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
106 ; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64:
107 ; CHECK: # %bb.0: # %entry
108 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
109 ; CHECK-NEXT: vsse64.v v8, (a0), a1
112 call void @llvm.riscv.vsse.nxv4i64(
113 <vscale x 4 x i64> %0,
121 declare void @llvm.riscv.vsse.mask.nxv4i64(
128 define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64:
130 ; CHECK: # %bb.0: # %entry
131 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
132 ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
135 call void @llvm.riscv.vsse.mask.nxv4i64(
136 <vscale x 4 x i64> %0,
139 <vscale x 4 x i1> %3,
145 declare void @llvm.riscv.vsse.nxv8i64(
151 define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
152 ; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64:
153 ; CHECK: # %bb.0: # %entry
154 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
155 ; CHECK-NEXT: vsse64.v v8, (a0), a1
158 call void @llvm.riscv.vsse.nxv8i64(
159 <vscale x 8 x i64> %0,
167 declare void @llvm.riscv.vsse.mask.nxv8i64(
174 define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64:
176 ; CHECK: # %bb.0: # %entry
177 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
178 ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
181 call void @llvm.riscv.vsse.mask.nxv8i64(
182 <vscale x 8 x i64> %0,
185 <vscale x 8 x i1> %3,
191 declare void @llvm.riscv.vsse.nxv1f64(
192 <vscale x 1 x double>,
197 define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
198 ; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64:
199 ; CHECK: # %bb.0: # %entry
200 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
201 ; CHECK-NEXT: vsse64.v v8, (a0), a1
204 call void @llvm.riscv.vsse.nxv1f64(
205 <vscale x 1 x double> %0,
213 declare void @llvm.riscv.vsse.mask.nxv1f64(
214 <vscale x 1 x double>,
220 define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
221 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
224 ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
227 call void @llvm.riscv.vsse.mask.nxv1f64(
228 <vscale x 1 x double> %0,
231 <vscale x 1 x i1> %3,
237 declare void @llvm.riscv.vsse.nxv2f64(
238 <vscale x 2 x double>,
243 define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
244 ; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
247 ; CHECK-NEXT: vsse64.v v8, (a0), a1
250 call void @llvm.riscv.vsse.nxv2f64(
251 <vscale x 2 x double> %0,
259 declare void @llvm.riscv.vsse.mask.nxv2f64(
260 <vscale x 2 x double>,
266 define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
267 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64:
268 ; CHECK: # %bb.0: # %entry
269 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
270 ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
273 call void @llvm.riscv.vsse.mask.nxv2f64(
274 <vscale x 2 x double> %0,
277 <vscale x 2 x i1> %3,
283 declare void @llvm.riscv.vsse.nxv4f64(
284 <vscale x 4 x double>,
289 define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
290 ; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64:
291 ; CHECK: # %bb.0: # %entry
292 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
293 ; CHECK-NEXT: vsse64.v v8, (a0), a1
296 call void @llvm.riscv.vsse.nxv4f64(
297 <vscale x 4 x double> %0,
305 declare void @llvm.riscv.vsse.mask.nxv4f64(
306 <vscale x 4 x double>,
312 define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
313 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64:
314 ; CHECK: # %bb.0: # %entry
315 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
316 ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
319 call void @llvm.riscv.vsse.mask.nxv4f64(
320 <vscale x 4 x double> %0,
323 <vscale x 4 x i1> %3,
329 declare void @llvm.riscv.vsse.nxv8f64(
330 <vscale x 8 x double>,
335 define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
336 ; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64:
337 ; CHECK: # %bb.0: # %entry
338 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
339 ; CHECK-NEXT: vsse64.v v8, (a0), a1
342 call void @llvm.riscv.vsse.nxv8f64(
343 <vscale x 8 x double> %0,
351 declare void @llvm.riscv.vsse.mask.nxv8f64(
352 <vscale x 8 x double>,
358 define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
359 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64:
360 ; CHECK: # %bb.0: # %entry
361 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
362 ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
365 call void @llvm.riscv.vsse.mask.nxv8f64(
366 <vscale x 8 x double> %0,
369 <vscale x 8 x i1> %3,
375 declare void @llvm.riscv.vsse.nxv1i32(
381 define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
382 ; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32:
383 ; CHECK: # %bb.0: # %entry
384 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
385 ; CHECK-NEXT: vsse32.v v8, (a0), a1
388 call void @llvm.riscv.vsse.nxv1i32(
389 <vscale x 1 x i32> %0,
397 declare void @llvm.riscv.vsse.mask.nxv1i32(
404 define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
405 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32:
406 ; CHECK: # %bb.0: # %entry
407 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
408 ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
411 call void @llvm.riscv.vsse.mask.nxv1i32(
412 <vscale x 1 x i32> %0,
415 <vscale x 1 x i1> %3,
421 declare void @llvm.riscv.vsse.nxv2i32(
427 define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
428 ; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32:
429 ; CHECK: # %bb.0: # %entry
430 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
431 ; CHECK-NEXT: vsse32.v v8, (a0), a1
434 call void @llvm.riscv.vsse.nxv2i32(
435 <vscale x 2 x i32> %0,
443 declare void @llvm.riscv.vsse.mask.nxv2i32(
450 define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
451 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32:
452 ; CHECK: # %bb.0: # %entry
453 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
454 ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
457 call void @llvm.riscv.vsse.mask.nxv2i32(
458 <vscale x 2 x i32> %0,
461 <vscale x 2 x i1> %3,
467 declare void @llvm.riscv.vsse.nxv4i32(
473 define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
474 ; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32:
475 ; CHECK: # %bb.0: # %entry
476 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
477 ; CHECK-NEXT: vsse32.v v8, (a0), a1
480 call void @llvm.riscv.vsse.nxv4i32(
481 <vscale x 4 x i32> %0,
489 declare void @llvm.riscv.vsse.mask.nxv4i32(
496 define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
497 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32:
498 ; CHECK: # %bb.0: # %entry
499 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
500 ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
503 call void @llvm.riscv.vsse.mask.nxv4i32(
504 <vscale x 4 x i32> %0,
507 <vscale x 4 x i1> %3,
513 declare void @llvm.riscv.vsse.nxv8i32(
519 define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
520 ; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32:
521 ; CHECK: # %bb.0: # %entry
522 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
523 ; CHECK-NEXT: vsse32.v v8, (a0), a1
526 call void @llvm.riscv.vsse.nxv8i32(
527 <vscale x 8 x i32> %0,
535 declare void @llvm.riscv.vsse.mask.nxv8i32(
542 define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
543 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32:
544 ; CHECK: # %bb.0: # %entry
545 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
546 ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
549 call void @llvm.riscv.vsse.mask.nxv8i32(
550 <vscale x 8 x i32> %0,
553 <vscale x 8 x i1> %3,
559 declare void @llvm.riscv.vsse.nxv16i32(
565 define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
566 ; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32:
567 ; CHECK: # %bb.0: # %entry
568 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
569 ; CHECK-NEXT: vsse32.v v8, (a0), a1
572 call void @llvm.riscv.vsse.nxv16i32(
573 <vscale x 16 x i32> %0,
581 declare void @llvm.riscv.vsse.mask.nxv16i32(
588 define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
589 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32:
590 ; CHECK: # %bb.0: # %entry
591 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
592 ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
595 call void @llvm.riscv.vsse.mask.nxv16i32(
596 <vscale x 16 x i32> %0,
599 <vscale x 16 x i1> %3,
605 declare void @llvm.riscv.vsse.nxv1f32(
606 <vscale x 1 x float>,
611 define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
612 ; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32:
613 ; CHECK: # %bb.0: # %entry
614 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
615 ; CHECK-NEXT: vsse32.v v8, (a0), a1
618 call void @llvm.riscv.vsse.nxv1f32(
619 <vscale x 1 x float> %0,
627 declare void @llvm.riscv.vsse.mask.nxv1f32(
628 <vscale x 1 x float>,
634 define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
635 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32:
636 ; CHECK: # %bb.0: # %entry
637 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
638 ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
641 call void @llvm.riscv.vsse.mask.nxv1f32(
642 <vscale x 1 x float> %0,
645 <vscale x 1 x i1> %3,
651 declare void @llvm.riscv.vsse.nxv2f32(
652 <vscale x 2 x float>,
657 define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
658 ; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32:
659 ; CHECK: # %bb.0: # %entry
660 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
661 ; CHECK-NEXT: vsse32.v v8, (a0), a1
664 call void @llvm.riscv.vsse.nxv2f32(
665 <vscale x 2 x float> %0,
673 declare void @llvm.riscv.vsse.mask.nxv2f32(
674 <vscale x 2 x float>,
680 define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
681 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32:
682 ; CHECK: # %bb.0: # %entry
683 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
684 ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
687 call void @llvm.riscv.vsse.mask.nxv2f32(
688 <vscale x 2 x float> %0,
691 <vscale x 2 x i1> %3,
697 declare void @llvm.riscv.vsse.nxv4f32(
698 <vscale x 4 x float>,
703 define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
704 ; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32:
705 ; CHECK: # %bb.0: # %entry
706 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
707 ; CHECK-NEXT: vsse32.v v8, (a0), a1
710 call void @llvm.riscv.vsse.nxv4f32(
711 <vscale x 4 x float> %0,
719 declare void @llvm.riscv.vsse.mask.nxv4f32(
720 <vscale x 4 x float>,
726 define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
727 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32:
728 ; CHECK: # %bb.0: # %entry
729 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
730 ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
733 call void @llvm.riscv.vsse.mask.nxv4f32(
734 <vscale x 4 x float> %0,
737 <vscale x 4 x i1> %3,
743 declare void @llvm.riscv.vsse.nxv8f32(
744 <vscale x 8 x float>,
749 define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
750 ; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32:
751 ; CHECK: # %bb.0: # %entry
752 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
753 ; CHECK-NEXT: vsse32.v v8, (a0), a1
756 call void @llvm.riscv.vsse.nxv8f32(
757 <vscale x 8 x float> %0,
765 declare void @llvm.riscv.vsse.mask.nxv8f32(
766 <vscale x 8 x float>,
772 define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
773 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32:
774 ; CHECK: # %bb.0: # %entry
775 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
776 ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
779 call void @llvm.riscv.vsse.mask.nxv8f32(
780 <vscale x 8 x float> %0,
783 <vscale x 8 x i1> %3,
789 declare void @llvm.riscv.vsse.nxv16f32(
790 <vscale x 16 x float>,
795 define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
796 ; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32:
797 ; CHECK: # %bb.0: # %entry
798 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
799 ; CHECK-NEXT: vsse32.v v8, (a0), a1
802 call void @llvm.riscv.vsse.nxv16f32(
803 <vscale x 16 x float> %0,
811 declare void @llvm.riscv.vsse.mask.nxv16f32(
812 <vscale x 16 x float>,
818 define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
819 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32:
820 ; CHECK: # %bb.0: # %entry
821 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
822 ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
825 call void @llvm.riscv.vsse.mask.nxv16f32(
826 <vscale x 16 x float> %0,
829 <vscale x 16 x i1> %3,
835 declare void @llvm.riscv.vsse.nxv1i16(
841 define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
842 ; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16:
843 ; CHECK: # %bb.0: # %entry
844 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
845 ; CHECK-NEXT: vsse16.v v8, (a0), a1
848 call void @llvm.riscv.vsse.nxv1i16(
849 <vscale x 1 x i16> %0,
857 declare void @llvm.riscv.vsse.mask.nxv1i16(
864 define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
865 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16:
866 ; CHECK: # %bb.0: # %entry
867 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
868 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
871 call void @llvm.riscv.vsse.mask.nxv1i16(
872 <vscale x 1 x i16> %0,
875 <vscale x 1 x i1> %3,
881 declare void @llvm.riscv.vsse.nxv2i16(
887 define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
888 ; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16:
889 ; CHECK: # %bb.0: # %entry
890 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
891 ; CHECK-NEXT: vsse16.v v8, (a0), a1
894 call void @llvm.riscv.vsse.nxv2i16(
895 <vscale x 2 x i16> %0,
903 declare void @llvm.riscv.vsse.mask.nxv2i16(
910 define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
911 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16:
912 ; CHECK: # %bb.0: # %entry
913 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
914 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
917 call void @llvm.riscv.vsse.mask.nxv2i16(
918 <vscale x 2 x i16> %0,
921 <vscale x 2 x i1> %3,
927 declare void @llvm.riscv.vsse.nxv4i16(
933 define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
934 ; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16:
935 ; CHECK: # %bb.0: # %entry
936 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
937 ; CHECK-NEXT: vsse16.v v8, (a0), a1
940 call void @llvm.riscv.vsse.nxv4i16(
941 <vscale x 4 x i16> %0,
949 declare void @llvm.riscv.vsse.mask.nxv4i16(
956 define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
957 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16:
958 ; CHECK: # %bb.0: # %entry
959 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
960 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
963 call void @llvm.riscv.vsse.mask.nxv4i16(
964 <vscale x 4 x i16> %0,
967 <vscale x 4 x i1> %3,
973 declare void @llvm.riscv.vsse.nxv8i16(
979 define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
980 ; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16:
981 ; CHECK: # %bb.0: # %entry
982 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
983 ; CHECK-NEXT: vsse16.v v8, (a0), a1
986 call void @llvm.riscv.vsse.nxv8i16(
987 <vscale x 8 x i16> %0,
995 declare void @llvm.riscv.vsse.mask.nxv8i16(
1002 define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1003 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16:
1004 ; CHECK: # %bb.0: # %entry
1005 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
1006 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1009 call void @llvm.riscv.vsse.mask.nxv8i16(
1010 <vscale x 8 x i16> %0,
1013 <vscale x 8 x i1> %3,
1019 declare void @llvm.riscv.vsse.nxv16i16(
1020 <vscale x 16 x i16>,
1025 define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1026 ; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16:
1027 ; CHECK: # %bb.0: # %entry
1028 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
1029 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1032 call void @llvm.riscv.vsse.nxv16i16(
1033 <vscale x 16 x i16> %0,
1041 declare void @llvm.riscv.vsse.mask.nxv16i16(
1042 <vscale x 16 x i16>,
1048 define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1049 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16:
1050 ; CHECK: # %bb.0: # %entry
1051 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
1052 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1055 call void @llvm.riscv.vsse.mask.nxv16i16(
1056 <vscale x 16 x i16> %0,
1059 <vscale x 16 x i1> %3,
1065 declare void @llvm.riscv.vsse.nxv32i16(
1066 <vscale x 32 x i16>,
1071 define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1072 ; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16:
1073 ; CHECK: # %bb.0: # %entry
1074 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
1075 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1078 call void @llvm.riscv.vsse.nxv32i16(
1079 <vscale x 32 x i16> %0,
1087 declare void @llvm.riscv.vsse.mask.nxv32i16(
1088 <vscale x 32 x i16>,
1094 define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1095 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16:
1096 ; CHECK: # %bb.0: # %entry
1097 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
1098 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1101 call void @llvm.riscv.vsse.mask.nxv32i16(
1102 <vscale x 32 x i16> %0,
1105 <vscale x 32 x i1> %3,
1111 declare void @llvm.riscv.vsse.nxv1f16(
1112 <vscale x 1 x half>,
1117 define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1118 ; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16:
1119 ; CHECK: # %bb.0: # %entry
1120 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1121 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1124 call void @llvm.riscv.vsse.nxv1f16(
1125 <vscale x 1 x half> %0,
1133 declare void @llvm.riscv.vsse.mask.nxv1f16(
1134 <vscale x 1 x half>,
1140 define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1141 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16:
1142 ; CHECK: # %bb.0: # %entry
1143 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1144 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1147 call void @llvm.riscv.vsse.mask.nxv1f16(
1148 <vscale x 1 x half> %0,
1151 <vscale x 1 x i1> %3,
1157 declare void @llvm.riscv.vsse.nxv2f16(
1158 <vscale x 2 x half>,
1163 define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1164 ; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16:
1165 ; CHECK: # %bb.0: # %entry
1166 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1167 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1170 call void @llvm.riscv.vsse.nxv2f16(
1171 <vscale x 2 x half> %0,
1179 declare void @llvm.riscv.vsse.mask.nxv2f16(
1180 <vscale x 2 x half>,
1186 define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1187 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16:
1188 ; CHECK: # %bb.0: # %entry
1189 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1190 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1193 call void @llvm.riscv.vsse.mask.nxv2f16(
1194 <vscale x 2 x half> %0,
1197 <vscale x 2 x i1> %3,
1203 declare void @llvm.riscv.vsse.nxv4f16(
1204 <vscale x 4 x half>,
1209 define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1210 ; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16:
1211 ; CHECK: # %bb.0: # %entry
1212 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1213 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1216 call void @llvm.riscv.vsse.nxv4f16(
1217 <vscale x 4 x half> %0,
1225 declare void @llvm.riscv.vsse.mask.nxv4f16(
1226 <vscale x 4 x half>,
1232 define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1233 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16:
1234 ; CHECK: # %bb.0: # %entry
1235 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1236 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1239 call void @llvm.riscv.vsse.mask.nxv4f16(
1240 <vscale x 4 x half> %0,
1243 <vscale x 4 x i1> %3,
1249 declare void @llvm.riscv.vsse.nxv8f16(
1250 <vscale x 8 x half>,
1255 define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1256 ; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16:
1257 ; CHECK: # %bb.0: # %entry
1258 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
1259 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1262 call void @llvm.riscv.vsse.nxv8f16(
1263 <vscale x 8 x half> %0,
1271 declare void @llvm.riscv.vsse.mask.nxv8f16(
1272 <vscale x 8 x half>,
1278 define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1279 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16:
1280 ; CHECK: # %bb.0: # %entry
1281 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
1282 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1285 call void @llvm.riscv.vsse.mask.nxv8f16(
1286 <vscale x 8 x half> %0,
1289 <vscale x 8 x i1> %3,
1295 declare void @llvm.riscv.vsse.nxv16f16(
1296 <vscale x 16 x half>,
1301 define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1302 ; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16:
1303 ; CHECK: # %bb.0: # %entry
1304 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
1305 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1308 call void @llvm.riscv.vsse.nxv16f16(
1309 <vscale x 16 x half> %0,
1317 declare void @llvm.riscv.vsse.mask.nxv16f16(
1318 <vscale x 16 x half>,
1324 define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1325 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16:
1326 ; CHECK: # %bb.0: # %entry
1327 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
1328 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1331 call void @llvm.riscv.vsse.mask.nxv16f16(
1332 <vscale x 16 x half> %0,
1335 <vscale x 16 x i1> %3,
1341 declare void @llvm.riscv.vsse.nxv32f16(
1342 <vscale x 32 x half>,
1347 define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1348 ; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16:
1349 ; CHECK: # %bb.0: # %entry
1350 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
1351 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1354 call void @llvm.riscv.vsse.nxv32f16(
1355 <vscale x 32 x half> %0,
1363 declare void @llvm.riscv.vsse.mask.nxv32f16(
1364 <vscale x 32 x half>,
1370 define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1371 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16:
1372 ; CHECK: # %bb.0: # %entry
1373 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
1374 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1377 call void @llvm.riscv.vsse.mask.nxv32f16(
1378 <vscale x 32 x half> %0,
1381 <vscale x 32 x i1> %3,
1387 declare void @llvm.riscv.vsse.nxv1bf16(
1388 <vscale x 1 x bfloat>,
1393 define void @intrinsic_vsse_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1394 ; CHECK-LABEL: intrinsic_vsse_v_nxv1bf16_nxv1bf16:
1395 ; CHECK: # %bb.0: # %entry
1396 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1397 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1400 call void @llvm.riscv.vsse.nxv1bf16(
1401 <vscale x 1 x bfloat> %0,
1409 declare void @llvm.riscv.vsse.mask.nxv1bf16(
1410 <vscale x 1 x bfloat>,
1416 define void @intrinsic_vsse_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1417 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1bf16_nxv1bf16:
1418 ; CHECK: # %bb.0: # %entry
1419 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1420 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1423 call void @llvm.riscv.vsse.mask.nxv1bf16(
1424 <vscale x 1 x bfloat> %0,
1427 <vscale x 1 x i1> %3,
1433 declare void @llvm.riscv.vsse.nxv2bf16(
1434 <vscale x 2 x bfloat>,
1439 define void @intrinsic_vsse_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1440 ; CHECK-LABEL: intrinsic_vsse_v_nxv2bf16_nxv2bf16:
1441 ; CHECK: # %bb.0: # %entry
1442 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1443 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1446 call void @llvm.riscv.vsse.nxv2bf16(
1447 <vscale x 2 x bfloat> %0,
1455 declare void @llvm.riscv.vsse.mask.nxv2bf16(
1456 <vscale x 2 x bfloat>,
1462 define void @intrinsic_vsse_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1463 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2bf16_nxv2bf16:
1464 ; CHECK: # %bb.0: # %entry
1465 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1466 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1469 call void @llvm.riscv.vsse.mask.nxv2bf16(
1470 <vscale x 2 x bfloat> %0,
1473 <vscale x 2 x i1> %3,
1479 declare void @llvm.riscv.vsse.nxv4bf16(
1480 <vscale x 4 x bfloat>,
1485 define void @intrinsic_vsse_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1486 ; CHECK-LABEL: intrinsic_vsse_v_nxv4bf16_nxv4bf16:
1487 ; CHECK: # %bb.0: # %entry
1488 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1489 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1492 call void @llvm.riscv.vsse.nxv4bf16(
1493 <vscale x 4 x bfloat> %0,
1501 declare void @llvm.riscv.vsse.mask.nxv4bf16(
1502 <vscale x 4 x bfloat>,
1508 define void @intrinsic_vsse_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1509 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4bf16_nxv4bf16:
1510 ; CHECK: # %bb.0: # %entry
1511 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1512 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1515 call void @llvm.riscv.vsse.mask.nxv4bf16(
1516 <vscale x 4 x bfloat> %0,
1519 <vscale x 4 x i1> %3,
1525 declare void @llvm.riscv.vsse.nxv8bf16(
1526 <vscale x 8 x bfloat>,
1531 define void @intrinsic_vsse_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1532 ; CHECK-LABEL: intrinsic_vsse_v_nxv8bf16_nxv8bf16:
1533 ; CHECK: # %bb.0: # %entry
1534 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
1535 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1538 call void @llvm.riscv.vsse.nxv8bf16(
1539 <vscale x 8 x bfloat> %0,
1547 declare void @llvm.riscv.vsse.mask.nxv8bf16(
1548 <vscale x 8 x bfloat>,
1554 define void @intrinsic_vsse_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1555 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8bf16_nxv8bf16:
1556 ; CHECK: # %bb.0: # %entry
1557 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
1558 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1561 call void @llvm.riscv.vsse.mask.nxv8bf16(
1562 <vscale x 8 x bfloat> %0,
1565 <vscale x 8 x i1> %3,
1571 declare void @llvm.riscv.vsse.nxv16bf16(
1572 <vscale x 16 x bfloat>,
1577 define void @intrinsic_vsse_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1578 ; CHECK-LABEL: intrinsic_vsse_v_nxv16bf16_nxv16bf16:
1579 ; CHECK: # %bb.0: # %entry
1580 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
1581 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1584 call void @llvm.riscv.vsse.nxv16bf16(
1585 <vscale x 16 x bfloat> %0,
1593 declare void @llvm.riscv.vsse.mask.nxv16bf16(
1594 <vscale x 16 x bfloat>,
1600 define void @intrinsic_vsse_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1601 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16bf16_nxv16bf16:
1602 ; CHECK: # %bb.0: # %entry
1603 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
1604 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1607 call void @llvm.riscv.vsse.mask.nxv16bf16(
1608 <vscale x 16 x bfloat> %0,
1611 <vscale x 16 x i1> %3,
1617 declare void @llvm.riscv.vsse.nxv32bf16(
1618 <vscale x 32 x bfloat>,
1623 define void @intrinsic_vsse_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1624 ; CHECK-LABEL: intrinsic_vsse_v_nxv32bf16_nxv32bf16:
1625 ; CHECK: # %bb.0: # %entry
1626 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
1627 ; CHECK-NEXT: vsse16.v v8, (a0), a1
1630 call void @llvm.riscv.vsse.nxv32bf16(
1631 <vscale x 32 x bfloat> %0,
1639 declare void @llvm.riscv.vsse.mask.nxv32bf16(
1640 <vscale x 32 x bfloat>,
1646 define void @intrinsic_vsse_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1647 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32bf16_nxv32bf16:
1648 ; CHECK: # %bb.0: # %entry
1649 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
1650 ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
1653 call void @llvm.riscv.vsse.mask.nxv32bf16(
1654 <vscale x 32 x bfloat> %0,
1657 <vscale x 32 x i1> %3,
1663 declare void @llvm.riscv.vsse.nxv1i8(
1669 define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1670 ; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8:
1671 ; CHECK: # %bb.0: # %entry
1672 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
1673 ; CHECK-NEXT: vsse8.v v8, (a0), a1
1676 call void @llvm.riscv.vsse.nxv1i8(
1677 <vscale x 1 x i8> %0,
1685 declare void @llvm.riscv.vsse.mask.nxv1i8(
1692 define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1693 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8:
1694 ; CHECK: # %bb.0: # %entry
1695 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
1696 ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
1699 call void @llvm.riscv.vsse.mask.nxv1i8(
1700 <vscale x 1 x i8> %0,
1703 <vscale x 1 x i1> %3,
1709 declare void @llvm.riscv.vsse.nxv2i8(
1715 define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1716 ; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8:
1717 ; CHECK: # %bb.0: # %entry
1718 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
1719 ; CHECK-NEXT: vsse8.v v8, (a0), a1
1722 call void @llvm.riscv.vsse.nxv2i8(
1723 <vscale x 2 x i8> %0,
1731 declare void @llvm.riscv.vsse.mask.nxv2i8(
1738 define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1739 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8:
1740 ; CHECK: # %bb.0: # %entry
1741 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
1742 ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
1745 call void @llvm.riscv.vsse.mask.nxv2i8(
1746 <vscale x 2 x i8> %0,
1749 <vscale x 2 x i1> %3,
1755 declare void @llvm.riscv.vsse.nxv4i8(
1761 define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1762 ; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8:
1763 ; CHECK: # %bb.0: # %entry
1764 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
1765 ; CHECK-NEXT: vsse8.v v8, (a0), a1
1768 call void @llvm.riscv.vsse.nxv4i8(
1769 <vscale x 4 x i8> %0,
1777 declare void @llvm.riscv.vsse.mask.nxv4i8(
1784 define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1785 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8:
1786 ; CHECK: # %bb.0: # %entry
1787 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
1788 ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
1791 call void @llvm.riscv.vsse.mask.nxv4i8(
1792 <vscale x 4 x i8> %0,
1795 <vscale x 4 x i1> %3,
1801 declare void @llvm.riscv.vsse.nxv8i8(
1807 define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1808 ; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8:
1809 ; CHECK: # %bb.0: # %entry
1810 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1811 ; CHECK-NEXT: vsse8.v v8, (a0), a1
1814 call void @llvm.riscv.vsse.nxv8i8(
1815 <vscale x 8 x i8> %0,
1823 declare void @llvm.riscv.vsse.mask.nxv8i8(
1830 define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1831 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8:
1832 ; CHECK: # %bb.0: # %entry
1833 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1834 ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
1837 call void @llvm.riscv.vsse.mask.nxv8i8(
1838 <vscale x 8 x i8> %0,
1841 <vscale x 8 x i1> %3,
1847 declare void @llvm.riscv.vsse.nxv16i8(
1853 define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1854 ; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8:
1855 ; CHECK: # %bb.0: # %entry
1856 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
1857 ; CHECK-NEXT: vsse8.v v8, (a0), a1
1860 call void @llvm.riscv.vsse.nxv16i8(
1861 <vscale x 16 x i8> %0,
1869 declare void @llvm.riscv.vsse.mask.nxv16i8(
1876 define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1877 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8:
1878 ; CHECK: # %bb.0: # %entry
1879 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
1880 ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
1883 call void @llvm.riscv.vsse.mask.nxv16i8(
1884 <vscale x 16 x i8> %0,
1887 <vscale x 16 x i1> %3,
1893 declare void @llvm.riscv.vsse.nxv32i8(
1899 define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1900 ; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8:
1901 ; CHECK: # %bb.0: # %entry
1902 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
1903 ; CHECK-NEXT: vsse8.v v8, (a0), a1
1906 call void @llvm.riscv.vsse.nxv32i8(
1907 <vscale x 32 x i8> %0,
1915 declare void @llvm.riscv.vsse.mask.nxv32i8(
1922 define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1923 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8:
1924 ; CHECK: # %bb.0: # %entry
1925 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
1926 ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
1929 call void @llvm.riscv.vsse.mask.nxv32i8(
1930 <vscale x 32 x i8> %0,
1933 <vscale x 32 x i1> %3,
1939 declare void @llvm.riscv.vsse.nxv64i8(
1945 define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
1946 ; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8:
1947 ; CHECK: # %bb.0: # %entry
1948 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
1949 ; CHECK-NEXT: vsse8.v v8, (a0), a1
1952 call void @llvm.riscv.vsse.nxv64i8(
1953 <vscale x 64 x i8> %0,
1961 declare void @llvm.riscv.vsse.mask.nxv64i8(
1968 define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
1969 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8:
1970 ; CHECK: # %bb.0: # %entry
1971 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
1972 ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
1975 call void @llvm.riscv.vsse.mask.nxv64i8(
1976 <vscale x 64 x i8> %0,
1979 <vscale x 64 x i1> %3,