1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
14 define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
15 ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64:
16 ; CHECK: # %bb.0: # %entry
17 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
18 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
21 %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
22 <vscale x 1 x i64> undef,
23 <vscale x 1 x i64>* %0,
27 ret <vscale x 1 x i64> %a
30 declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
37 define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i1> %1, iXLen %2, iXLen* %3) nounwind {
38 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
39 ; RV32: # %bb.0: # %entry
40 ; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
41 ; RV32-NEXT: vle64ff.v v8, (a0), v0.t
42 ; RV32-NEXT: csrr a0, vl
43 ; RV32-NEXT: sw a0, 0(a2)
46 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
47 ; RV64: # %bb.0: # %entry
48 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
49 ; RV64-NEXT: vle64ff.v v8, (a0), v0.t
50 ; RV64-NEXT: csrr a0, vl
51 ; RV64-NEXT: sd a0, 0(a2)
54 %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
55 <vscale x 1 x i64> undef,
56 <vscale x 1 x i64>* %0,
59 %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
60 %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
61 store iXLen %c, iXLen* %3
63 ret <vscale x 1 x i64> %b
66 declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
74 define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, iXLen %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
75 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
76 ; CHECK: # %bb.0: # %entry
77 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
78 ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
81 %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
82 <vscale x 1 x i64> undef,
83 <vscale x 1 x i64>* %0,
88 ret <vscale x 1 x i64> %a
91 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
99 define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8>* %0, <vscale x 1 x iXLen> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
101 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
102 <vscale x 1 x i8> undef,
103 <vscale x 1 x i8>* %0,
104 <vscale x 1 x iXLen> %1,
105 <vscale x 1 x i1> %2,
108 ret <vscale x 1 x i8> %a
111 declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
118 define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
119 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
120 ; CHECK: # %bb.0: # %entry
121 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
122 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
125 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
126 <vscale x 1 x i8> undef,
127 <vscale x 1 x i8> %0,
128 <vscale x 1 x i8> %1,
129 <vscale x 1 x i1> %2,
132 ret <vscale x 1 x i8> %a
135 declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
143 define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
144 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8:
145 ; CHECK: # %bb.0: # %entry
146 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
147 ; CHECK-NEXT: vwadd.vv v10, v8, v9, v0.t
148 ; CHECK-NEXT: vmv1r.v v8, v10
151 %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
152 <vscale x 1 x i16> undef,
153 <vscale x 1 x i8> %0,
154 <vscale x 1 x i8> %1,
155 <vscale x 1 x i1> %2,
158 ret <vscale x 1 x i16> %a
161 declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
169 define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
170 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8:
171 ; CHECK: # %bb.0: # %entry
172 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
173 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
176 %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
177 <vscale x 1 x i8> undef,
178 <vscale x 1 x i8> %0,
180 <vscale x 1 x i1> %2,
183 ret <vscale x 1 x i8> %a
186 declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
194 define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
195 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8:
196 ; CHECK: # %bb.0: # %entry
197 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
198 ; CHECK-NEXT: vadd.vi v8, v8, 9, v0.t
201 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
202 <vscale x 1 x i8> undef,
203 <vscale x 1 x i8> %0,
205 <vscale x 1 x i1> %1,
208 ret <vscale x 1 x i8> %a
211 declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
218 define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
219 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64:
220 ; CHECK: # %bb.0: # %entry
221 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
222 ; CHECK-NEXT: vzext.vf2 v9, v8, v0.t
223 ; CHECK-NEXT: vmv.v.v v8, v9
226 %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
227 <vscale x 1 x i64> undef,
228 <vscale x 1 x i32> %0,
229 <vscale x 1 x i1> %1,
232 ret <vscale x 1 x i64> %a
235 declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
242 define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
243 ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64:
244 ; CHECK: # %bb.0: # %entry
245 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
246 ; CHECK-NEXT: vzext.vf4 v9, v8, v0.t
247 ; CHECK-NEXT: vmv.v.v v8, v9
250 %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
251 <vscale x 1 x i64> undef,
252 <vscale x 1 x i16> %0,
253 <vscale x 1 x i1> %1,
256 ret <vscale x 1 x i64> %a
258 declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
265 define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
266 ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64:
267 ; CHECK: # %bb.0: # %entry
268 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
269 ; CHECK-NEXT: vzext.vf8 v9, v8, v0.t
270 ; CHECK-NEXT: vmv.v.v v8, v9
273 %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
274 <vscale x 1 x i64> undef,
275 <vscale x 1 x i8> %0,
276 <vscale x 1 x i1> %1,
279 ret <vscale x 1 x i64> %a
282 declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
290 define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
291 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8:
292 ; CHECK: # %bb.0: # %entry
293 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
294 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
297 %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
298 <vscale x 1 x i8> undef,
299 <vscale x 1 x i8> %0,
300 <vscale x 1 x i8> %1,
301 <vscale x 1 x i1> %2,
304 ret <vscale x 1 x i8> %a
307 declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
315 define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
316 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8:
317 ; CHECK: # %bb.0: # %entry
318 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
319 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
322 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
323 <vscale x 1 x i8> undef,
324 <vscale x 1 x i8> %0,
325 <vscale x 1 x i8> %1,
326 <vscale x 1 x i1> %2,
329 ret <vscale x 1 x i8> %a
332 declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
340 define <vscale x 1 x i8> @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
341 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8:
342 ; CHECK: # %bb.0: # %entry
343 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
344 ; CHECK-NEXT: vnsra.wv v8, v8, v9, v0.t
347 %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
348 <vscale x 1 x i8> undef,
349 <vscale x 1 x i16> %0,
350 <vscale x 1 x i8> %1,
351 <vscale x 1 x i1> %2,
354 ret <vscale x 1 x i8> %a
357 declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
365 define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
366 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8:
367 ; CHECK: # %bb.0: # %entry
368 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
369 ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
372 %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
373 <vscale x 1 x i8> undef,
374 <vscale x 1 x i8> %0,
375 <vscale x 1 x i8> %1,
376 <vscale x 1 x i1> %2,
379 ret <vscale x 1 x i8> %a
382 declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
390 define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
391 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8:
392 ; CHECK: # %bb.0: # %entry
393 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
394 ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
397 %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
398 <vscale x 1 x i8> undef,
399 <vscale x 1 x i8> %0,
400 <vscale x 1 x i8> %1,
401 <vscale x 1 x i1> %2,
404 ret <vscale x 1 x i8> %a
407 declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
415 define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
416 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8:
417 ; CHECK: # %bb.0: # %entry
418 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
419 ; CHECK-NEXT: vwmul.vv v10, v8, v9, v0.t
420 ; CHECK-NEXT: vmv1r.v v8, v10
423 %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
424 <vscale x 1 x i16> undef,
425 <vscale x 1 x i8> %0,
426 <vscale x 1 x i8> %1,
427 <vscale x 1 x i1> %2,
430 ret <vscale x 1 x i16> %a
433 declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
440 define <vscale x 1 x i8> @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
441 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
442 ; CHECK: # %bb.0: # %entry
443 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
444 ; CHECK-NEXT: vmacc.vv v8, v8, v9, v0.t
447 %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
448 <vscale x 1 x i8> undef,
449 <vscale x 1 x i8> %0,
450 <vscale x 1 x i8> %1,
451 <vscale x 1 x i1> %2,
454 ret <vscale x 1 x i8> %a
457 declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
464 define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
465 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
466 ; CHECK: # %bb.0: # %entry
467 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
468 ; CHECK-NEXT: vwmacc.vv v10, v8, v9, v0.t
469 ; CHECK-NEXT: vmv1r.v v8, v10
472 %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
473 <vscale x 1 x i16> undef,
474 <vscale x 1 x i8> %0,
475 <vscale x 1 x i8> %1,
476 <vscale x 1 x i1> %2,
479 ret <vscale x 1 x i16> %a
482 declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
490 define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
491 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
492 ; CHECK: # %bb.0: # %entry
493 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
494 ; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
497 %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
498 <vscale x 1 x i8> undef,
499 <vscale x 1 x i8> %0,
500 <vscale x 1 x i8> %1,
501 <vscale x 1 x i1> %2,
504 ret <vscale x 1 x i8> %a
507 declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
516 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
517 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
518 ; CHECK: # %bb.0: # %entry
519 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
520 ; CHECK-NEXT: csrwi vxrm, 1
521 ; CHECK-NEXT: vaadd.vv v8, v8, v9, v0.t
524 %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
525 <vscale x 1 x i8> undef,
526 <vscale x 1 x i8> %0,
527 <vscale x 1 x i8> %1,
528 <vscale x 1 x i1> %2,
529 iXLen 1, iXLen %3, iXLen 3)
531 ret <vscale x 1 x i8> %a
534 declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
543 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
544 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
545 ; CHECK: # %bb.0: # %entry
546 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
547 ; CHECK-NEXT: csrwi vxrm, 0
548 ; CHECK-NEXT: vsmul.vv v8, v8, v9, v0.t
551 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
552 <vscale x 1 x i8> undef,
553 <vscale x 1 x i8> %0,
554 <vscale x 1 x i8> %1,
555 <vscale x 1 x i1> %2,
556 iXLen 0, iXLen %3, iXLen 3)
558 ret <vscale x 1 x i8> %a
561 declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
570 define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
571 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
572 ; CHECK: # %bb.0: # %entry
573 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
574 ; CHECK-NEXT: csrwi vxrm, 0
575 ; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
578 %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
579 <vscale x 1 x i8> undef,
580 <vscale x 1 x i8> %0,
581 <vscale x 1 x i8> %1,
582 <vscale x 1 x i1> %2,
583 iXLen 0, iXLen %3, iXLen 3)
585 ret <vscale x 1 x i8> %a
588 declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
597 define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
598 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
599 ; CHECK: # %bb.0: # %entry
600 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
601 ; CHECK-NEXT: csrwi vxrm, 0
602 ; CHECK-NEXT: vnclip.wv v8, v8, v9, v0.t
605 %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
606 <vscale x 1 x i8> undef,
607 <vscale x 1 x i16> %0,
608 <vscale x 1 x i8> %1,
609 <vscale x 1 x i1> %2,
610 iXLen 0, iXLen %3, iXLen 3)
612 ret <vscale x 1 x i8> %a
615 declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
620 iXLen, iXLen, iXLen);
622 define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
623 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
624 ; CHECK: # %bb.0: # %entry
625 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
626 ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
629 %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
630 <vscale x 1 x half> undef,
631 <vscale x 1 x half> %0,
632 <vscale x 1 x half> %1,
633 <vscale x 1 x i1> %2,
634 iXLen 7, iXLen %3, iXLen 3)
636 ret <vscale x 1 x half> %a
639 declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
640 <vscale x 1 x float>,
644 iXLen, iXLen, iXLen);
646 define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
647 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16:
648 ; CHECK: # %bb.0: # %entry
649 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
650 ; CHECK-NEXT: vfwadd.vv v10, v8, v9, v0.t
651 ; CHECK-NEXT: vmv1r.v v8, v10
654 %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
655 <vscale x 1 x float> undef,
656 <vscale x 1 x half> %0,
657 <vscale x 1 x half> %1,
658 <vscale x 1 x i1> %2,
659 iXLen 7, iXLen %3, iXLen 3)
661 ret <vscale x 1 x float> %a
664 declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
669 iXLen, iXLen, iXLen);
671 define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
672 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16:
673 ; CHECK: # %bb.0: # %entry
674 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
675 ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
678 %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
679 <vscale x 1 x half> undef,
680 <vscale x 1 x half> %0,
681 <vscale x 1 x half> %1,
682 <vscale x 1 x i1> %2,
683 iXLen 7, iXLen %3, iXLen 3)
685 ret <vscale x 1 x half> %a
688 declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
693 iXLen, iXLen, iXLen);
695 define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
696 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16:
697 ; CHECK: # %bb.0: # %entry
698 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
699 ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
702 %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
703 <vscale x 1 x half> undef,
704 <vscale x 1 x half> %0,
705 <vscale x 1 x half> %1,
706 <vscale x 1 x i1> %2,
707 iXLen 7, iXLen %3, iXLen 3)
709 ret <vscale x 1 x half> %a
712 declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
717 iXLen, iXLen, iXLen);
719 define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
720 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16:
721 ; CHECK: # %bb.0: # %entry
722 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
723 ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
726 %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
727 <vscale x 1 x half> undef,
728 <vscale x 1 x half> %0,
730 <vscale x 1 x i1> %2,
731 iXLen 7, iXLen %3, iXLen 3)
733 ret <vscale x 1 x half> %a
736 declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
737 <vscale x 1 x float>,
741 iXLen, iXLen, iXLen);
743 define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
744 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16:
745 ; CHECK: # %bb.0: # %entry
746 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
747 ; CHECK-NEXT: vfwmul.vv v10, v8, v9, v0.t
748 ; CHECK-NEXT: vmv1r.v v8, v10
751 %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
752 <vscale x 1 x float> undef,
753 <vscale x 1 x half> %0,
754 <vscale x 1 x half> %1,
755 <vscale x 1 x i1> %2,
756 iXLen 7, iXLen %3, iXLen 3)
758 ret <vscale x 1 x float> %a
761 declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
766 iXLen, iXLen, iXLen);
768 define <vscale x 1 x half> @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
769 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
770 ; CHECK: # %bb.0: # %entry
771 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
772 ; CHECK-NEXT: vfmacc.vv v8, v8, v9, v0.t
775 %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
776 <vscale x 1 x half> undef,
777 <vscale x 1 x half> %0,
778 <vscale x 1 x half> %1,
779 <vscale x 1 x i1> %2,
780 iXLen 7, iXLen %3, iXLen 3)
782 ret <vscale x 1 x half> %a
785 declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
786 <vscale x 1 x float>,
790 iXLen, iXLen, iXLen);
792 define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
793 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
794 ; CHECK: # %bb.0: # %entry
795 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
796 ; CHECK-NEXT: vfwmacc.vv v10, v8, v9, v0.t
797 ; CHECK-NEXT: vmv1r.v v8, v10
800 %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
801 <vscale x 1 x float> undef,
802 <vscale x 1 x half> %0,
803 <vscale x 1 x half> %1,
804 <vscale x 1 x i1> %2,
805 iXLen 7, iXLen %3, iXLen 3);
807 ret <vscale x 1 x float> %a
810 declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
814 iXLen, iXLen, iXLen);
816 define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
817 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
818 ; CHECK: # %bb.0: # %entry
819 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
820 ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t
823 %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
824 <vscale x 1 x half> undef,
825 <vscale x 1 x half> %0,
826 <vscale x 1 x i1> %1,
827 iXLen 7, iXLen %2, iXLen 3)
829 ret <vscale x 1 x half> %a
832 declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
839 define <vscale x 1 x half> @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
840 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16:
841 ; CHECK: # %bb.0: # %entry
842 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
843 ; CHECK-NEXT: vfrsqrt7.v v8, v8, v0.t
846 %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
847 <vscale x 1 x half> undef,
848 <vscale x 1 x half> %0,
849 <vscale x 1 x i1> %1,
852 ret <vscale x 1 x half> %a
855 declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
859 iXLen, iXLen, iXLen);
861 define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
862 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
863 ; CHECK: # %bb.0: # %entry
864 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
865 ; CHECK-NEXT: vfrec7.v v8, v8, v0.t
868 %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
869 <vscale x 1 x half> undef,
870 <vscale x 1 x half> %0,
871 <vscale x 1 x i1> %1,
872 iXLen 7, iXLen %2, iXLen 3)
874 ret <vscale x 1 x half> %a
877 declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
885 define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
886 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16:
887 ; CHECK: # %bb.0: # %entry
888 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
889 ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
892 %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
893 <vscale x 1 x half> undef,
894 <vscale x 1 x half> %0,
895 <vscale x 1 x half> %1,
896 <vscale x 1 x i1> %2,
899 ret <vscale x 1 x half> %a
902 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
910 define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
911 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16:
912 ; CHECK: # %bb.0: # %entry
913 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
914 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
917 %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
918 <vscale x 1 x half> undef,
919 <vscale x 1 x half> %0,
920 <vscale x 1 x half> %1,
921 <vscale x 1 x i1> %2,
924 ret <vscale x 1 x half> %a
927 declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
933 define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
934 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
935 ; CHECK: # %bb.0: # %entry
936 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
937 ; CHECK-NEXT: vfclass.v v8, v8, v0.t
939 <vscale x 1 x half> %0,
940 <vscale x 1 x i1> %1,
943 %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
944 <vscale x 1 x i16> undef,
945 <vscale x 1 x half> %0,
946 <vscale x 1 x i1> %1,
949 ret <vscale x 1 x i16> %a
952 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
956 iXLen, iXLen, iXLen);
958 define <vscale x 1 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
959 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16:
960 ; CHECK: # %bb.0: # %entry
961 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
962 ; CHECK-NEXT: vfcvt.xu.f.v v8, v8, v0.t
965 %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
966 <vscale x 1 x i16> undef,
967 <vscale x 1 x half> %0,
968 <vscale x 1 x i1> %1,
969 iXLen 7, iXLen %2, iXLen 3)
971 ret <vscale x 1 x i16> %a
974 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
978 iXLen, iXLen, iXLen);
980 define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
981 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16:
982 ; CHECK: # %bb.0: # %entry
983 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
984 ; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
987 %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
988 <vscale x 1 x half> undef,
989 <vscale x 1 x i16> %0,
990 <vscale x 1 x i1> %1,
991 iXLen 7, iXLen %2, iXLen 3)
993 ret <vscale x 1 x half> %a
996 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
1000 iXLen, iXLen, iXLen);
1002 define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1003 ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16:
1004 ; CHECK: # %bb.0: # %entry
1005 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1006 ; CHECK-NEXT: vfwcvt.xu.f.v v9, v8, v0.t
1007 ; CHECK-NEXT: vmv1r.v v8, v9
1010 %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
1011 <vscale x 1 x i32> undef,
1012 <vscale x 1 x half> %0,
1013 <vscale x 1 x i1> %1,
1014 iXLen 7, iXLen %2, iXLen 3)
1016 ret <vscale x 1 x i32> %a
1019 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
1020 <vscale x 1 x half>,
1025 define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1026 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8:
1027 ; CHECK: # %bb.0: # %entry
1028 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1029 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t
1030 ; CHECK-NEXT: vmv1r.v v8, v9
1033 %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
1034 <vscale x 1 x half> undef,
1035 <vscale x 1 x i8> %0,
1036 <vscale x 1 x i1> %1,
1039 ret <vscale x 1 x half> %a
1042 declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
1043 <vscale x 1 x float>,
1044 <vscale x 1 x half>,
1048 define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1049 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16:
1050 ; CHECK: # %bb.0: # %entry
1051 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1052 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8, v0.t
1053 ; CHECK-NEXT: vmv1r.v v8, v9
1056 %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
1057 <vscale x 1 x float> undef,
1058 <vscale x 1 x half> %0,
1059 <vscale x 1 x i1> %1,
1062 ret <vscale x 1 x float> %a
1065 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
1067 <vscale x 1 x half>,
1069 iXLen, iXLen, iXLen);
1071 define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1072 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16:
1073 ; CHECK: # %bb.0: # %entry
1074 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1075 ; CHECK-NEXT: vfncvt.xu.f.w v9, v8, v0.t
1076 ; CHECK-NEXT: vmv1r.v v8, v9
1079 %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
1080 <vscale x 1 x i8> undef,
1081 <vscale x 1 x half> %0,
1082 <vscale x 1 x i1> %1,
1083 iXLen 7, iXLen %2, iXLen 3)
1085 ret <vscale x 1 x i8> %a
1088 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
1089 <vscale x 1 x half>,
1092 iXLen, iXLen, iXLen);
1094 define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1095 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32:
1096 ; CHECK: # %bb.0: # %entry
1097 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1098 ; CHECK-NEXT: vfncvt.f.x.w v9, v8, v0.t
1099 ; CHECK-NEXT: vmv1r.v v8, v9
1102 %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
1103 <vscale x 1 x half> undef,
1104 <vscale x 1 x i32> %0,
1105 <vscale x 1 x i1> %1,
1106 iXLen 7, iXLen %2, iXLen 3)
1108 ret <vscale x 1 x half> %a
1111 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
1112 <vscale x 1 x half>,
1113 <vscale x 1 x float>,
1115 iXLen, iXLen, iXLen);
1117 define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1118 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32:
1119 ; CHECK: # %bb.0: # %entry
1120 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1121 ; CHECK-NEXT: vfncvt.f.f.w v9, v8, v0.t
1122 ; CHECK-NEXT: vmv1r.v v8, v9
1125 %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
1126 <vscale x 1 x half> undef,
1127 <vscale x 1 x float> %0,
1128 <vscale x 1 x i1> %1,
1129 iXLen 7, iXLen %2, iXLen 3)
1131 ret <vscale x 1 x half> %a
1134 declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
1141 define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1142 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
1143 ; CHECK: # %bb.0: # %entry
1144 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1145 ; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
1148 %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
1149 <vscale x 1 x i8> %0,
1150 <vscale x 1 x i8> %1,
1152 <vscale x 1 x i1> %3,
1155 ret <vscale x 1 x i8> %a
1158 declare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
1166 define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1167 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8:
1168 ; CHECK: # %bb.0: # %entry
1169 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1170 ; CHECK-NEXT: vslide1up.vx v9, v8, a0, v0.t
1171 ; CHECK-NEXT: vmv1r.v v8, v9
1174 %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
1175 <vscale x 1 x i8> undef,
1176 <vscale x 1 x i8> %0,
1178 <vscale x 1 x i1> %2,
1181 ret <vscale x 1 x i8> %a
1184 declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
1185 <vscale x 1 x half>,
1186 <vscale x 1 x half>,
1192 define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1193 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
1194 ; CHECK: # %bb.0: # %entry
1195 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1196 ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0, v0.t
1197 ; CHECK-NEXT: vmv1r.v v8, v9
1200 %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
1201 <vscale x 1 x half> undef,
1202 <vscale x 1 x half> %0,
1204 <vscale x 1 x i1> %2,
1207 ret <vscale x 1 x half> %a
1210 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
1218 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1219 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
1220 ; CHECK: # %bb.0: # %entry
1221 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1222 ; CHECK-NEXT: vrgather.vv v10, v8, v9, v0.t
1223 ; CHECK-NEXT: vmv1r.v v8, v10
1226 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
1227 <vscale x 1 x i8> undef,
1228 <vscale x 1 x i8> %0,
1229 <vscale x 1 x i8> %1,
1230 <vscale x 1 x i1> %2,
1233 ret <vscale x 1 x i8> %a
1236 declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
1244 define <vscale x 1 x i8> @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1245 ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8:
1246 ; CHECK: # %bb.0: # %entry
1247 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1248 ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9, v0.t
1249 ; CHECK-NEXT: vmv1r.v v8, v10
1252 %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
1253 <vscale x 1 x i8> undef,
1254 <vscale x 1 x i8> %0,
1255 <vscale x 1 x i16> %1,
1256 <vscale x 1 x i1> %2,
1259 ret <vscale x 1 x i8> %a
1262 declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
1267 define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i1> %0, iXLen %1) nounwind {
1268 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8:
1269 ; CHECK: # %bb.0: # %entry
1270 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1271 ; CHECK-NEXT: vid.v v8, v0.t
1274 %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
1275 <vscale x 1 x i8> undef,
1276 <vscale x 1 x i1> %0,
1279 ret <vscale x 1 x i8> %a
1282 declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
1288 define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1289 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
1290 ; CHECK: # %bb.0: # %entry
1291 ; CHECK-NEXT: vmv1r.v v9, v0
1292 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1293 ; CHECK-NEXT: vmv1r.v v0, v8
1294 ; CHECK-NEXT: viota.m v8, v9, v0.t
1297 %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
1298 <vscale x 1 x i8> undef,
1299 <vscale x 1 x i1> %0,
1300 <vscale x 1 x i1> %1,
1303 ret <vscale x 1 x i8> %a
1306 declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
1312 define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1313 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
1314 ; CHECK: # %bb.0: # %entry
1315 ; CHECK-NEXT: vmv1r.v v9, v0
1316 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1317 ; CHECK-NEXT: vmv1r.v v0, v8
1318 ; CHECK-NEXT: vmsbf.m v8, v9, v0.t
1319 ; CHECK-NEXT: vmv1r.v v0, v8
1322 %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
1323 <vscale x 1 x i1> undef,
1324 <vscale x 1 x i1> %0,
1325 <vscale x 1 x i1> %1,
1327 ret <vscale x 1 x i1> %a
1330 declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
1332 <vscale x 1 x half>,
1333 <vscale x 1 x half>,
1337 declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
1338 <vscale x 1 x half>,
1339 <vscale x 1 x half>,
1342 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
1343 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16:
1344 ; CHECK: # %bb.0: # %entry
1345 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1346 ; CHECK-NEXT: vmfeq.vv v0, v9, v10
1347 ; CHECK-NEXT: vmfeq.vv v0, v9, v10, v0.t
1350 %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
1351 <vscale x 1 x half> %1,
1352 <vscale x 1 x half> %2,
1354 %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
1355 <vscale x 1 x i1> undef,
1356 <vscale x 1 x half> %1,
1357 <vscale x 1 x half> %2,
1358 <vscale x 1 x i1> %mask,
1361 ret <vscale x 1 x i1> %a
1364 declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
1371 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1372 ; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
1373 ; RV32: # %bb.0: # %entry
1374 ; RV32-NEXT: addi sp, sp, -16
1375 ; RV32-NEXT: sw a1, 12(sp)
1376 ; RV32-NEXT: sw a0, 8(sp)
1377 ; RV32-NEXT: addi a0, sp, 8
1378 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1379 ; RV32-NEXT: vlse64.v v9, (a0), zero
1380 ; RV32-NEXT: vmseq.vv v0, v8, v9, v0.t
1381 ; RV32-NEXT: addi sp, sp, 16
1384 ; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
1385 ; RV64: # %bb.0: # %entry
1386 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1387 ; RV64-NEXT: vmseq.vx v0, v8, a0, v0.t
1390 %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
1391 <vscale x 1 x i1> undef,
1392 <vscale x 1 x i64> %0,
1394 <vscale x 1 x i1> %2,
1397 ret <vscale x 1 x i1> %a
1400 declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
1407 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1408 ; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
1409 ; RV32: # %bb.0: # %entry
1410 ; RV32-NEXT: addi sp, sp, -16
1411 ; RV32-NEXT: sw a1, 12(sp)
1412 ; RV32-NEXT: sw a0, 8(sp)
1413 ; RV32-NEXT: addi a0, sp, 8
1414 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1415 ; RV32-NEXT: vlse64.v v9, (a0), zero
1416 ; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t
1417 ; RV32-NEXT: addi sp, sp, 16
1420 ; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
1421 ; RV64: # %bb.0: # %entry
1422 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1423 ; RV64-NEXT: vmslt.vx v8, v8, a0, v0.t
1424 ; RV64-NEXT: vmxor.mm v0, v8, v0
1427 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
1428 <vscale x 1 x i1> undef,
1429 <vscale x 1 x i64> %0,
1431 <vscale x 1 x i1> %2,
1434 ret <vscale x 1 x i1> %a
1437 declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
1443 define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
1444 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
1445 ; CHECK: # %bb.0: # %entry
1446 ; CHECK-NEXT: vmv1r.v v9, v0
1447 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
1448 ; CHECK-NEXT: vmv1r.v v0, v8
1449 ; CHECK-NEXT: vmsbf.m v8, v9, v0.t
1450 ; CHECK-NEXT: vmv1r.v v0, v8
1453 %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
1454 <vscale x 64 x i1> undef,
1455 <vscale x 64 x i1> %0,
1456 <vscale x 64 x i1> %1,
1459 ret <vscale x 64 x i1> %a