1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
13 define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
14 ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma
17 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
20 %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
21 <vscale x 1 x i64> %0,
22 <vscale x 1 x i64>* %1,
26 ret <vscale x 1 x i64> %a
29 declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
35 define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
36 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
37 ; RV32: # %bb.0: # %entry
38 ; RV32-NEXT: vsetvli zero, a1, e64, m1, tu, ma
39 ; RV32-NEXT: vle64ff.v v8, (a0), v0.t
40 ; RV32-NEXT: csrr a0, vl
41 ; RV32-NEXT: sw a0, 0(a2)
44 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
45 ; RV64: # %bb.0: # %entry
46 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
47 ; RV64-NEXT: vle64ff.v v8, (a0), v0.t
48 ; RV64-NEXT: csrr a0, vl
49 ; RV64-NEXT: sd a0, 0(a2)
52 %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
53 <vscale x 1 x i64> %0,
54 <vscale x 1 x i64>* %1,
57 %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
58 %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
59 store iXLen %c, iXLen* %4
61 ret <vscale x 1 x i64> %b
64 declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
71 define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
72 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
73 ; CHECK: # %bb.0: # %entry
74 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, ma
75 ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
78 %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
79 <vscale x 1 x i64> %0,
80 <vscale x 1 x i64>* %1,
85 ret <vscale x 1 x i64> %a
88 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
95 define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x iXLen> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
97 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
99 <vscale x 1 x i8>* %1,
100 <vscale x 1 x iXLen> %2,
101 <vscale x 1 x i1> %3,
104 ret <vscale x 1 x i8> %a
107 declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
113 define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
114 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
115 ; CHECK: # %bb.0: # %entry
116 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
117 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
120 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
121 <vscale x 1 x i8> %0,
122 <vscale x 1 x i8> %1,
123 <vscale x 1 x i8> %2,
124 <vscale x 1 x i1> %3,
127 ret <vscale x 1 x i8> %a
130 declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
137 define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
138 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8:
139 ; CHECK: # %bb.0: # %entry
140 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
141 ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t
144 %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
145 <vscale x 1 x i16> %0,
146 <vscale x 1 x i8> %1,
147 <vscale x 1 x i8> %2,
148 <vscale x 1 x i1> %3,
151 ret <vscale x 1 x i16> %a
154 declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
161 define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
162 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8:
163 ; CHECK: # %bb.0: # %entry
164 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
165 ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t
168 %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
169 <vscale x 1 x i8> %0,
170 <vscale x 1 x i8> %1,
172 <vscale x 1 x i1> %3,
175 ret <vscale x 1 x i8> %a
178 declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
185 define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
186 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8:
187 ; CHECK: # %bb.0: # %entry
188 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
189 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
192 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
193 <vscale x 1 x i8> %0,
194 <vscale x 1 x i8> %1,
196 <vscale x 1 x i1> %2,
199 ret <vscale x 1 x i8> %a
202 declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
208 define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
209 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64:
210 ; CHECK: # %bb.0: # %entry
211 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
212 ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t
215 %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
216 <vscale x 1 x i64> %1,
217 <vscale x 1 x i32> %2,
218 <vscale x 1 x i1> %0,
221 ret <vscale x 1 x i64> %a
224 declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
230 define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
231 ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
234 ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
237 %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
238 <vscale x 1 x i64> %1,
239 <vscale x 1 x i16> %2,
240 <vscale x 1 x i1> %0,
243 ret <vscale x 1 x i64> %a
245 declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
251 define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
252 ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64:
253 ; CHECK: # %bb.0: # %entry
254 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
255 ; CHECK-NEXT: vzext.vf8 v8, v9, v0.t
258 %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
259 <vscale x 1 x i64> %1,
260 <vscale x 1 x i8> %2,
261 <vscale x 1 x i1> %0,
264 ret <vscale x 1 x i64> %a
267 declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
274 define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
275 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8:
276 ; CHECK: # %bb.0: # %entry
277 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
278 ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
281 %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
282 <vscale x 1 x i8> %0,
283 <vscale x 1 x i8> %1,
284 <vscale x 1 x i8> %2,
285 <vscale x 1 x i1> %3,
288 ret <vscale x 1 x i8> %a
291 declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
298 define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
299 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8:
300 ; CHECK: # %bb.0: # %entry
301 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
302 ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
305 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
306 <vscale x 1 x i8> %0,
307 <vscale x 1 x i8> %1,
308 <vscale x 1 x i8> %2,
309 <vscale x 1 x i1> %3,
312 ret <vscale x 1 x i8> %a
315 declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
322 define <vscale x 1 x i8> @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
323 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8:
324 ; CHECK: # %bb.0: # %entry
325 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
326 ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t
329 %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
330 <vscale x 1 x i8> %0,
331 <vscale x 1 x i16> %1,
332 <vscale x 1 x i8> %2,
333 <vscale x 1 x i1> %3,
336 ret <vscale x 1 x i8> %a
339 declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
346 define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
347 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8:
348 ; CHECK: # %bb.0: # %entry
349 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
350 ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
353 %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
354 <vscale x 1 x i8> %0,
355 <vscale x 1 x i8> %1,
356 <vscale x 1 x i8> %2,
357 <vscale x 1 x i1> %3,
360 ret <vscale x 1 x i8> %a
363 declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
370 define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
371 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8:
372 ; CHECK: # %bb.0: # %entry
373 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
374 ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
377 %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
378 <vscale x 1 x i8> %0,
379 <vscale x 1 x i8> %1,
380 <vscale x 1 x i8> %2,
381 <vscale x 1 x i1> %3,
384 ret <vscale x 1 x i8> %a
387 declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
394 define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
395 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8:
396 ; CHECK: # %bb.0: # %entry
397 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
398 ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t
401 %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
402 <vscale x 1 x i16> %0,
403 <vscale x 1 x i8> %1,
404 <vscale x 1 x i8> %2,
405 <vscale x 1 x i1> %3,
408 ret <vscale x 1 x i16> %a
411 declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
417 define <vscale x 1 x i8> @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
418 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
419 ; CHECK: # %bb.0: # %entry
420 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
421 ; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
424 %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
425 <vscale x 1 x i8> %0,
426 <vscale x 1 x i8> %1,
427 <vscale x 1 x i8> %2,
428 <vscale x 1 x i1> %3,
431 ret <vscale x 1 x i8> %a
434 declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
440 define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
441 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
442 ; CHECK: # %bb.0: # %entry
443 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
444 ; CHECK-NEXT: vwmacc.vv v8, v9, v10, v0.t
447 %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
448 <vscale x 1 x i16> %0,
449 <vscale x 1 x i8> %1,
450 <vscale x 1 x i8> %2,
451 <vscale x 1 x i1> %3,
454 ret <vscale x 1 x i16> %a
457 declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
464 define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
465 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
466 ; CHECK: # %bb.0: # %entry
467 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
468 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
471 %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
472 <vscale x 1 x i8> %0,
473 <vscale x 1 x i8> %1,
474 <vscale x 1 x i8> %2,
475 <vscale x 1 x i1> %3,
478 ret <vscale x 1 x i8> %a
481 declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
489 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
490 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
491 ; CHECK: # %bb.0: # %entry
492 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
493 ; CHECK-NEXT: csrwi vxrm, 1
494 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
497 %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
498 <vscale x 1 x i8> %0,
499 <vscale x 1 x i8> %1,
500 <vscale x 1 x i8> %2,
501 <vscale x 1 x i1> %3,
502 iXLen 1, iXLen %4, iXLen 2)
504 ret <vscale x 1 x i8> %a
507 declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
515 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
516 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
517 ; CHECK: # %bb.0: # %entry
518 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
519 ; CHECK-NEXT: csrwi vxrm, 0
520 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
523 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
524 <vscale x 1 x i8> %0,
525 <vscale x 1 x i8> %1,
526 <vscale x 1 x i8> %2,
527 <vscale x 1 x i1> %3,
528 iXLen 0, iXLen %4, iXLen 2)
530 ret <vscale x 1 x i8> %a
533 declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
541 define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
542 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
543 ; CHECK: # %bb.0: # %entry
544 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
545 ; CHECK-NEXT: csrwi vxrm, 0
546 ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
549 %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
550 <vscale x 1 x i8> %0,
551 <vscale x 1 x i8> %1,
552 <vscale x 1 x i8> %2,
553 <vscale x 1 x i1> %3,
554 iXLen 0, iXLen %4, iXLen 2)
556 ret <vscale x 1 x i8> %a
559 declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
567 define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
568 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
569 ; CHECK: # %bb.0: # %entry
570 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
571 ; CHECK-NEXT: csrwi vxrm, 0
572 ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
575 %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
576 <vscale x 1 x i8> %0,
577 <vscale x 1 x i16> %1,
578 <vscale x 1 x i8> %2,
579 <vscale x 1 x i1> %3,
580 iXLen 0, iXLen %4, iXLen 2)
582 ret <vscale x 1 x i8> %a
585 declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
590 iXLen, iXLen, iXLen);
591 define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
592 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
593 ; CHECK: # %bb.0: # %entry
594 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
595 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
598 %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
599 <vscale x 1 x half> %0,
600 <vscale x 1 x half> %1,
601 <vscale x 1 x half> %2,
602 <vscale x 1 x i1> %3,
603 iXLen 7, iXLen %4, iXLen 2)
605 ret <vscale x 1 x half> %a
608 declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
609 <vscale x 1 x float>,
614 define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
615 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16:
616 ; CHECK: # %bb.0: # %entry
617 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
618 ; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t
621 %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
622 <vscale x 1 x float> %0,
623 <vscale x 1 x half> %1,
624 <vscale x 1 x half> %2,
625 <vscale x 1 x i1> %3,
626 iXLen 7, iXLen %4, iXLen 2)
628 ret <vscale x 1 x float> %a
631 declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
637 define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
638 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16:
639 ; CHECK: # %bb.0: # %entry
640 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
641 ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
644 %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
645 <vscale x 1 x half> %0,
646 <vscale x 1 x half> %1,
647 <vscale x 1 x half> %2,
648 <vscale x 1 x i1> %3,
649 iXLen 7, iXLen %4, iXLen 2)
651 ret <vscale x 1 x half> %a
654 declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
660 define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
661 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16:
662 ; CHECK: # %bb.0: # %entry
663 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
664 ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t
667 %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
668 <vscale x 1 x half> %0,
669 <vscale x 1 x half> %1,
670 <vscale x 1 x half> %2,
671 <vscale x 1 x i1> %3,
672 iXLen 7, iXLen %4, iXLen 2)
674 ret <vscale x 1 x half> %a
677 declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
683 define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
684 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16:
685 ; CHECK: # %bb.0: # %entry
686 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
687 ; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t
690 %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
691 <vscale x 1 x half> %0,
692 <vscale x 1 x half> %1,
694 <vscale x 1 x i1> %3,
695 iXLen 7, iXLen %4, iXLen 2)
697 ret <vscale x 1 x half> %a
700 declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
701 <vscale x 1 x float>,
706 define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
707 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16:
708 ; CHECK: # %bb.0: # %entry
709 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
710 ; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t
713 %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
714 <vscale x 1 x float> %0,
715 <vscale x 1 x half> %1,
716 <vscale x 1 x half> %2,
717 <vscale x 1 x i1> %3,
718 iXLen 7, iXLen %4, iXLen 2)
720 ret <vscale x 1 x float> %a
723 declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
729 define <vscale x 1 x half> @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
730 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
731 ; CHECK: # %bb.0: # %entry
732 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
733 ; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t
736 %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
737 <vscale x 1 x half> %0,
738 <vscale x 1 x half> %1,
739 <vscale x 1 x half> %2,
740 <vscale x 1 x i1> %3,
741 iXLen 7, iXLen %4, iXLen 2)
743 ret <vscale x 1 x half> %a
746 declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
747 <vscale x 1 x float>,
752 define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
753 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
754 ; CHECK: # %bb.0: # %entry
755 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
756 ; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t
759 %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
760 <vscale x 1 x float> %0,
761 <vscale x 1 x half> %1,
762 <vscale x 1 x half> %2,
763 <vscale x 1 x i1> %3,
764 iXLen 7, iXLen %4, iXLen 2)
765 ret <vscale x 1 x float> %a
768 declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
773 define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
774 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
775 ; CHECK: # %bb.0: # %entry
776 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
777 ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
780 %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
781 <vscale x 1 x half> %0,
782 <vscale x 1 x half> %1,
783 <vscale x 1 x i1> %2,
784 iXLen 7, iXLen %3, iXLen 2)
786 ret <vscale x 1 x half> %a
789 declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
795 define <vscale x 1 x half> @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
796 ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16:
797 ; CHECK: # %bb.0: # %entry
798 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
799 ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t
802 %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
803 <vscale x 1 x half> %1,
804 <vscale x 1 x half> %2,
805 <vscale x 1 x i1> %0,
808 ret <vscale x 1 x half> %a
811 declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
816 define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
817 ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
818 ; CHECK: # %bb.0: # %entry
819 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
820 ; CHECK-NEXT: vfrec7.v v8, v9, v0.t
823 %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
824 <vscale x 1 x half> %1,
825 <vscale x 1 x half> %2,
826 <vscale x 1 x i1> %0,
827 iXLen 7, iXLen %3, iXLen 2)
829 ret <vscale x 1 x half> %a
832 declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
839 define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
840 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16:
841 ; CHECK: # %bb.0: # %entry
842 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
843 ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t
846 %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
847 <vscale x 1 x half> %0,
848 <vscale x 1 x half> %1,
849 <vscale x 1 x half> %2,
850 <vscale x 1 x i1> %3,
853 ret <vscale x 1 x half> %a
856 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
863 define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
864 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16:
865 ; CHECK: # %bb.0: # %entry
866 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
867 ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
870 %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
871 <vscale x 1 x half> %0,
872 <vscale x 1 x half> %1,
873 <vscale x 1 x half> %2,
874 <vscale x 1 x i1> %3,
877 ret <vscale x 1 x half> %a
880 declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
885 define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
886 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
887 ; CHECK: # %bb.0: # %entry
888 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
889 ; CHECK-NEXT: vfclass.v v8, v9, v0.t
891 <vscale x 1 x i16> %0,
892 <vscale x 1 x half> %1,
893 <vscale x 1 x i1> %2,
896 %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
897 <vscale x 1 x i16> %0,
898 <vscale x 1 x half> %1,
899 <vscale x 1 x i1> %2,
902 ret <vscale x 1 x i16> %a
905 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
910 define <vscale x 1 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
911 ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16:
912 ; CHECK: # %bb.0: # %entry
913 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
914 ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t
917 %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
918 <vscale x 1 x i16> %0,
919 <vscale x 1 x half> %1,
920 <vscale x 1 x i1> %2,
921 iXLen 7, iXLen %3, iXLen 2)
923 ret <vscale x 1 x i16> %a
926 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
931 define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
932 ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16:
933 ; CHECK: # %bb.0: # %entry
934 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
935 ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t
938 %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
939 <vscale x 1 x half> %0,
940 <vscale x 1 x i16> %1,
941 <vscale x 1 x i1> %2,
942 iXLen 7, iXLen %3, iXLen 2)
944 ret <vscale x 1 x half> %a
947 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
952 define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
953 ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16:
954 ; CHECK: # %bb.0: # %entry
955 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
956 ; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t
959 %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
960 <vscale x 1 x i32> %0,
961 <vscale x 1 x half> %1,
962 <vscale x 1 x i1> %2,
963 iXLen 7, iXLen %3, iXLen 2)
965 ret <vscale x 1 x i32> %a
968 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
973 define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
974 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8:
975 ; CHECK: # %bb.0: # %entry
976 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
977 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t
980 %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
981 <vscale x 1 x half> %0,
982 <vscale x 1 x i8> %1,
983 <vscale x 1 x i1> %2,
986 ret <vscale x 1 x half> %a
989 declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
990 <vscale x 1 x float>,
994 define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
995 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16:
996 ; CHECK: # %bb.0: # %entry
997 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
998 ; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t
1001 %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
1002 <vscale x 1 x float> %0,
1003 <vscale x 1 x half> %1,
1004 <vscale x 1 x i1> %2,
1007 ret <vscale x 1 x float> %a
1010 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
1012 <vscale x 1 x half>,
1014 iXLen, iXLen, iXLen)
1015 define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1016 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16:
1017 ; CHECK: # %bb.0: # %entry
1018 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1019 ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
1022 %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
1023 <vscale x 1 x i8> %0,
1024 <vscale x 1 x half> %1,
1025 <vscale x 1 x i1> %2,
1026 iXLen 7, iXLen %3, iXLen 2)
1028 ret <vscale x 1 x i8> %a
1031 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
1032 <vscale x 1 x half>,
1035 iXLen, iXLen, iXLen)
1036 define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1037 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32:
1038 ; CHECK: # %bb.0: # %entry
1039 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1040 ; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t
1043 %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
1044 <vscale x 1 x half> %0,
1045 <vscale x 1 x i32> %1,
1046 <vscale x 1 x i1> %2,
1047 iXLen 7, iXLen %3, iXLen 2)
1049 ret <vscale x 1 x half> %a
1052 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
1053 <vscale x 1 x half>,
1054 <vscale x 1 x float>,
1056 iXLen, iXLen, iXLen)
1057 define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1058 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32:
1059 ; CHECK: # %bb.0: # %entry
1060 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1061 ; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t
1064 %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
1065 <vscale x 1 x half> %0,
1066 <vscale x 1 x float> %1,
1067 <vscale x 1 x i1> %2,
1068 iXLen 7, iXLen %3, iXLen 2)
1070 ret <vscale x 1 x half> %a
1073 declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
1079 define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1080 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
1081 ; CHECK: # %bb.0: # %entry
1082 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
1083 ; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
1086 %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
1087 <vscale x 1 x i8> %0,
1088 <vscale x 1 x i8> %1,
1090 <vscale x 1 x i1> %3,
1093 ret <vscale x 1 x i8> %a
1096 declare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
1103 define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1104 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8:
1105 ; CHECK: # %bb.0: # %entry
1106 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
1107 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
1110 %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
1111 <vscale x 1 x i8> %0,
1112 <vscale x 1 x i8> %1,
1114 <vscale x 1 x i1> %3,
1117 ret <vscale x 1 x i8> %a
1120 declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
1121 <vscale x 1 x half>,
1122 <vscale x 1 x half>,
1127 define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1128 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
1129 ; CHECK: # %bb.0: # %entry
1130 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1131 ; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
1134 %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
1135 <vscale x 1 x half> %0,
1136 <vscale x 1 x half> %1,
1138 <vscale x 1 x i1> %3,
1141 ret <vscale x 1 x half> %a
1144 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
1151 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1152 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
1153 ; CHECK: # %bb.0: # %entry
1154 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1155 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1158 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
1159 <vscale x 1 x i8> %0,
1160 <vscale x 1 x i8> %1,
1161 <vscale x 1 x i8> %2,
1162 <vscale x 1 x i1> %3,
1165 ret <vscale x 1 x i8> %a
1168 declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
1175 define <vscale x 1 x i8> @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1176 ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8:
1177 ; CHECK: # %bb.0: # %entry
1178 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1179 ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t
1182 %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
1183 <vscale x 1 x i8> %0,
1184 <vscale x 1 x i8> %1,
1185 <vscale x 1 x i16> %2,
1186 <vscale x 1 x i1> %3,
1189 ret <vscale x 1 x i8> %a
1192 declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
1197 define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1198 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8:
1199 ; CHECK: # %bb.0: # %entry
1200 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1201 ; CHECK-NEXT: vid.v v8, v0.t
1204 %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
1205 <vscale x 1 x i8> %0,
1206 <vscale x 1 x i1> %1,
1209 ret <vscale x 1 x i8> %a
1212 declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
1218 define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1219 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
1220 ; CHECK: # %bb.0: # %entry
1221 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1222 ; CHECK-NEXT: viota.m v8, v0, v0.t
1225 %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
1226 <vscale x 1 x i8> %0,
1227 <vscale x 1 x i1> %1,
1228 <vscale x 1 x i1> %1,
1231 ret <vscale x 1 x i8> %a