1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 declare <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
11 define <vscale x 1 x i1> @intrinsic_vmsbf_m_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
12 ; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1:
13 ; CHECK: # %bb.0: # %entry
14 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
15 ; CHECK-NEXT: vmsbf.m v8, v0
16 ; CHECK-NEXT: vmv1r.v v0, v8
19 %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
22 ret <vscale x 1 x i1> %a
25 declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
31 define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
32 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
33 ; CHECK: # %bb.0: # %entry
34 ; CHECK-NEXT: vmv1r.v v10, v0
35 ; CHECK-NEXT: vmv1r.v v0, v9
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
37 ; CHECK-NEXT: vmsbf.m v10, v8, v0.t
38 ; CHECK-NEXT: vmv1r.v v0, v10
41 %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
46 ret <vscale x 1 x i1> %a
49 declare <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1(
53 define <vscale x 2 x i1> @intrinsic_vmsbf_m_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
54 ; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1:
55 ; CHECK: # %bb.0: # %entry
56 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
57 ; CHECK-NEXT: vmsbf.m v8, v0
58 ; CHECK-NEXT: vmv1r.v v0, v8
61 %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1(
64 ret <vscale x 2 x i1> %a
67 declare <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
73 define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
74 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1:
75 ; CHECK: # %bb.0: # %entry
76 ; CHECK-NEXT: vmv1r.v v10, v0
77 ; CHECK-NEXT: vmv1r.v v0, v9
78 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
79 ; CHECK-NEXT: vmsbf.m v10, v8, v0.t
80 ; CHECK-NEXT: vmv1r.v v0, v10
83 %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
88 ret <vscale x 2 x i1> %a
91 declare <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1(
95 define <vscale x 4 x i1> @intrinsic_vmsbf_m_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
96 ; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1:
97 ; CHECK: # %bb.0: # %entry
98 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
99 ; CHECK-NEXT: vmsbf.m v8, v0
100 ; CHECK-NEXT: vmv1r.v v0, v8
103 %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1(
104 <vscale x 4 x i1> %0,
106 ret <vscale x 4 x i1> %a
109 declare <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
115 define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
116 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1:
117 ; CHECK: # %bb.0: # %entry
118 ; CHECK-NEXT: vmv1r.v v10, v0
119 ; CHECK-NEXT: vmv1r.v v0, v9
120 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
121 ; CHECK-NEXT: vmsbf.m v10, v8, v0.t
122 ; CHECK-NEXT: vmv1r.v v0, v10
125 %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
126 <vscale x 4 x i1> %0,
127 <vscale x 4 x i1> %1,
128 <vscale x 4 x i1> %2,
130 ret <vscale x 4 x i1> %a
133 declare <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1(
137 define <vscale x 8 x i1> @intrinsic_vmsbf_m_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
138 ; CHECK-LABEL: intrinsic_vmsbf_m_nxv8i1:
139 ; CHECK: # %bb.0: # %entry
140 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
141 ; CHECK-NEXT: vmsbf.m v8, v0
142 ; CHECK-NEXT: vmv.v.v v0, v8
145 %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1(
146 <vscale x 8 x i1> %0,
148 ret <vscale x 8 x i1> %a
151 declare <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
157 define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
158 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1:
159 ; CHECK: # %bb.0: # %entry
160 ; CHECK-NEXT: vmv1r.v v10, v0
161 ; CHECK-NEXT: vmv1r.v v0, v9
162 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
163 ; CHECK-NEXT: vmsbf.m v10, v8, v0.t
164 ; CHECK-NEXT: vmv.v.v v0, v10
167 %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
168 <vscale x 8 x i1> %0,
169 <vscale x 8 x i1> %1,
170 <vscale x 8 x i1> %2,
172 ret <vscale x 8 x i1> %a
175 declare <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1(
179 define <vscale x 16 x i1> @intrinsic_vmsbf_m_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
180 ; CHECK-LABEL: intrinsic_vmsbf_m_nxv16i1:
181 ; CHECK: # %bb.0: # %entry
182 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
183 ; CHECK-NEXT: vmsbf.m v8, v0
184 ; CHECK-NEXT: vmv1r.v v0, v8
187 %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1(
188 <vscale x 16 x i1> %0,
190 ret <vscale x 16 x i1> %a
193 declare <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
199 define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
200 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1:
201 ; CHECK: # %bb.0: # %entry
202 ; CHECK-NEXT: vmv1r.v v10, v0
203 ; CHECK-NEXT: vmv1r.v v0, v9
204 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
205 ; CHECK-NEXT: vmsbf.m v10, v8, v0.t
206 ; CHECK-NEXT: vmv1r.v v0, v10
209 %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
210 <vscale x 16 x i1> %0,
211 <vscale x 16 x i1> %1,
212 <vscale x 16 x i1> %2,
214 ret <vscale x 16 x i1> %a
217 declare <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1(
221 define <vscale x 32 x i1> @intrinsic_vmsbf_m_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
222 ; CHECK-LABEL: intrinsic_vmsbf_m_nxv32i1:
223 ; CHECK: # %bb.0: # %entry
224 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
225 ; CHECK-NEXT: vmsbf.m v8, v0
226 ; CHECK-NEXT: vmv1r.v v0, v8
229 %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1(
230 <vscale x 32 x i1> %0,
232 ret <vscale x 32 x i1> %a
235 declare <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
241 define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
242 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1:
243 ; CHECK: # %bb.0: # %entry
244 ; CHECK-NEXT: vmv1r.v v10, v0
245 ; CHECK-NEXT: vmv1r.v v0, v9
246 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
247 ; CHECK-NEXT: vmsbf.m v10, v8, v0.t
248 ; CHECK-NEXT: vmv1r.v v0, v10
251 %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
252 <vscale x 32 x i1> %0,
253 <vscale x 32 x i1> %1,
254 <vscale x 32 x i1> %2,
256 ret <vscale x 32 x i1> %a
259 declare <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1(
263 define <vscale x 64 x i1> @intrinsic_vmsbf_m_nxv64i1(<vscale x 64 x i1> %0, iXLen %1) nounwind {
264 ; CHECK-LABEL: intrinsic_vmsbf_m_nxv64i1:
265 ; CHECK: # %bb.0: # %entry
266 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
267 ; CHECK-NEXT: vmsbf.m v8, v0
268 ; CHECK-NEXT: vmv1r.v v0, v8
271 %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1(
272 <vscale x 64 x i1> %0,
274 ret <vscale x 64 x i1> %a
277 declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
283 define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
284 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
285 ; CHECK: # %bb.0: # %entry
286 ; CHECK-NEXT: vmv1r.v v10, v0
287 ; CHECK-NEXT: vmv1r.v v0, v9
288 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
289 ; CHECK-NEXT: vmsbf.m v10, v8, v0.t
290 ; CHECK-NEXT: vmv1r.v v0, v10
293 %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
294 <vscale x 64 x i1> %0,
295 <vscale x 64 x i1> %1,
296 <vscale x 64 x i1> %2,
298 ret <vscale x 64 x i1> %a