1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 declare <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(<vscale x 1 x i1>*, iXLen);
9 define <vscale x 1 x i1> @intrinsic_vlm_v_nxv1i1(<vscale x 1 x i1>* %0, iXLen %1) nounwind {
10 ; CHECK-LABEL: intrinsic_vlm_v_nxv1i1:
11 ; CHECK: # %bb.0: # %entry
12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
13 ; CHECK-NEXT: vlm.v v0, (a0)
16 %a = call <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(<vscale x 1 x i1>* %0, iXLen %1)
17 ret <vscale x 1 x i1> %a
20 declare <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(<vscale x 2 x i1>*, iXLen);
22 define <vscale x 2 x i1> @intrinsic_vlm_v_nxv2i1(<vscale x 2 x i1>* %0, iXLen %1) nounwind {
23 ; CHECK-LABEL: intrinsic_vlm_v_nxv2i1:
24 ; CHECK: # %bb.0: # %entry
25 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
26 ; CHECK-NEXT: vlm.v v0, (a0)
29 %a = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(<vscale x 2 x i1>* %0, iXLen %1)
30 ret <vscale x 2 x i1> %a
33 declare <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(<vscale x 4 x i1>*, iXLen);
35 define <vscale x 4 x i1> @intrinsic_vlm_v_nxv4i1(<vscale x 4 x i1>* %0, iXLen %1) nounwind {
36 ; CHECK-LABEL: intrinsic_vlm_v_nxv4i1:
37 ; CHECK: # %bb.0: # %entry
38 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
39 ; CHECK-NEXT: vlm.v v0, (a0)
42 %a = call <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(<vscale x 4 x i1>* %0, iXLen %1)
43 ret <vscale x 4 x i1> %a
46 declare <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(<vscale x 8 x i1>*, iXLen);
48 define <vscale x 8 x i1> @intrinsic_vlm_v_nxv8i1(<vscale x 8 x i1>* %0, iXLen %1) nounwind {
49 ; CHECK-LABEL: intrinsic_vlm_v_nxv8i1:
50 ; CHECK: # %bb.0: # %entry
51 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
52 ; CHECK-NEXT: vlm.v v0, (a0)
55 %a = call <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(<vscale x 8 x i1>* %0, iXLen %1)
56 ret <vscale x 8 x i1> %a
59 declare <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(<vscale x 16 x i1>*, iXLen);
61 define <vscale x 16 x i1> @intrinsic_vlm_v_nxv16i1(<vscale x 16 x i1>* %0, iXLen %1) nounwind {
62 ; CHECK-LABEL: intrinsic_vlm_v_nxv16i1:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
65 ; CHECK-NEXT: vlm.v v0, (a0)
68 %a = call <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(<vscale x 16 x i1>* %0, iXLen %1)
69 ret <vscale x 16 x i1> %a
72 declare <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(<vscale x 32 x i1>*, iXLen);
74 define <vscale x 32 x i1> @intrinsic_vlm_v_nxv32i1(<vscale x 32 x i1>* %0, iXLen %1) nounwind {
75 ; CHECK-LABEL: intrinsic_vlm_v_nxv32i1:
76 ; CHECK: # %bb.0: # %entry
77 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
78 ; CHECK-NEXT: vlm.v v0, (a0)
81 %a = call <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(<vscale x 32 x i1>* %0, iXLen %1)
82 ret <vscale x 32 x i1> %a
85 declare <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(<vscale x 64 x i1>*, iXLen);
87 define <vscale x 64 x i1> @intrinsic_vlm_v_nxv64i1(<vscale x 64 x i1>* %0, iXLen %1) nounwind {
88 ; CHECK-LABEL: intrinsic_vlm_v_nxv64i1:
89 ; CHECK: # %bb.0: # %entry
90 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
91 ; CHECK-NEXT: vlm.v v0, (a0)
94 %a = call <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(<vscale x 64 x i1>* %0, iXLen %1)
95 ret <vscale x 64 x i1> %a