1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
12 define <vscale x 1 x i8> @intrinsic_vle_v_tu_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2) nounwind {
13 ; CHECK-LABEL: intrinsic_vle_v_tu_nxv1i8_nxv1i8:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
16 ; CHECK-NEXT: vle8.v v8, (a0)
19 %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
24 ret <vscale x 1 x i8> %a
27 declare <vscale x 1 x i8> @llvm.riscv.vlse(
34 define <vscale x 1 x i8> @intrinsic_vlse_v_tu(<vscale x 1 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
35 ; CHECK-LABEL: intrinsic_vlse_v_tu:
36 ; CHECK: # %bb.0: # %entry
37 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, tu, ma
38 ; CHECK-NEXT: vlse8.v v8, (a0), a1
41 %a = call <vscale x 1 x i8> @llvm.riscv.vlse(
47 ret <vscale x 1 x i8> %a
50 declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff(
55 define <vscale x 1 x i8> @intrinsic_vleff_v_tu(<vscale x 1 x i8> %0, ptr %1, iXLen %2, iXLen* %3) nounwind {
56 ; RV32-LABEL: intrinsic_vleff_v_tu:
57 ; RV32: # %bb.0: # %entry
58 ; RV32-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
59 ; RV32-NEXT: vle8ff.v v8, (a0)
60 ; RV32-NEXT: csrr a0, vl
61 ; RV32-NEXT: sw a0, 0(a2)
64 ; RV64-LABEL: intrinsic_vleff_v_tu:
65 ; RV64: # %bb.0: # %entry
66 ; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
67 ; RV64-NEXT: vle8ff.v v8, (a0)
68 ; RV64-NEXT: csrr a0, vl
69 ; RV64-NEXT: sd a0, 0(a2)
72 %a = call { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff(
76 %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
77 %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
78 store iXLen %c, iXLen* %3
79 ret <vscale x 1 x i8> %b
82 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8(
88 define <vscale x 1 x i8> @intrinsic_vloxei_v_tu_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
89 ; CHECK-LABEL: intrinsic_vloxei_v_tu_nxv1i8_nxv1i8:
90 ; CHECK: # %bb.0: # %entry
91 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
92 ; CHECK-NEXT: vloxei8.v v8, (a0), v9
95 %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8(
101 ret <vscale x 1 x i8> %a
104 declare <vscale x 1 x i8> @llvm.riscv.vaadd.rm.nxv1i8.nxv1i8(
110 define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
111 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8:
112 ; CHECK: # %bb.0: # %entry
113 ; CHECK-NEXT: csrwi vxrm, 0
114 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
115 ; CHECK-NEXT: vaadd.vv v8, v9, v10
118 %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.rm.nxv1i8.nxv1i8(
119 <vscale x 1 x i8> %0,
120 <vscale x 1 x i8> %1,
121 <vscale x 1 x i8> %2,
124 ret <vscale x 1 x i8> %a
127 declare <vscale x 1 x i8> @llvm.riscv.vaaddu.rm.nxv1i8.nxv1i8(
133 define <vscale x 1 x i8> @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
134 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8:
135 ; CHECK: # %bb.0: # %entry
136 ; CHECK-NEXT: csrwi vxrm, 0
137 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
138 ; CHECK-NEXT: vaaddu.vv v8, v9, v10
141 %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.rm.nxv1i8.nxv1i8(
142 <vscale x 1 x i8> %0,
143 <vscale x 1 x i8> %1,
144 <vscale x 1 x i8> %2,
147 ret <vscale x 1 x i8> %a
150 declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
156 define <vscale x 1 x i8> @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
157 ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8:
158 ; CHECK: # %bb.0: # %entry
159 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
160 ; CHECK-NEXT: vadd.vv v8, v9, v10
163 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
164 <vscale x 1 x i8> %0,
165 <vscale x 1 x i8> %1,
166 <vscale x 1 x i8> %2,
169 ret <vscale x 1 x i8> %a
171 declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
177 define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
178 ; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8:
179 ; CHECK: # %bb.0: # %entry
180 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
181 ; CHECK-NEXT: vand.vv v8, v9, v10
184 %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
185 <vscale x 1 x i8> %0,
186 <vscale x 1 x i8> %1,
187 <vscale x 1 x i8> %2,
190 ret <vscale x 1 x i8> %a
193 declare <vscale x 1 x i8> @llvm.riscv.vasub.rm.nxv1i8.nxv1i8(
199 define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
200 ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8:
201 ; CHECK: # %bb.0: # %entry
202 ; CHECK-NEXT: csrwi vxrm, 0
203 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
204 ; CHECK-NEXT: vasub.vv v8, v9, v10
207 %a = call <vscale x 1 x i8> @llvm.riscv.vasub.rm.nxv1i8.nxv1i8(
208 <vscale x 1 x i8> %0,
209 <vscale x 1 x i8> %1,
210 <vscale x 1 x i8> %2,
213 ret <vscale x 1 x i8> %a
216 declare <vscale x 1 x i8> @llvm.riscv.vasubu.rm.nxv1i8.nxv1i8(
222 define <vscale x 1 x i8> @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
223 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8:
224 ; CHECK: # %bb.0: # %entry
225 ; CHECK-NEXT: csrwi vxrm, 0
226 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
227 ; CHECK-NEXT: vasubu.vv v8, v9, v10
230 %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.rm.nxv1i8.nxv1i8(
231 <vscale x 1 x i8> %0,
232 <vscale x 1 x i8> %1,
233 <vscale x 1 x i8> %2,
236 ret <vscale x 1 x i8> %a
239 declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
245 define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
246 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8:
247 ; CHECK: # %bb.0: # %entry
248 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
249 ; CHECK-NEXT: vdiv.vv v8, v9, v10
252 %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
253 <vscale x 1 x i8> %0,
254 <vscale x 1 x i8> %1,
255 <vscale x 1 x i8> %2,
258 ret <vscale x 1 x i8> %a
261 declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
267 define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
268 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8:
269 ; CHECK: # %bb.0: # %entry
270 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
271 ; CHECK-NEXT: vdivu.vv v8, v9, v10
274 %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
275 <vscale x 1 x i8> %0,
276 <vscale x 1 x i8> %1,
277 <vscale x 1 x i8> %2,
280 ret <vscale x 1 x i8> %a
283 declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
289 define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
290 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
291 ; CHECK: # %bb.0: # %entry
292 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
293 ; CHECK-NEXT: vfadd.vv v8, v9, v10
296 %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
297 <vscale x 1 x half> %0,
298 <vscale x 1 x half> %1,
299 <vscale x 1 x half> %2,
302 ret <vscale x 1 x half> %a
305 declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
311 define <vscale x 1 x half> @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
312 ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16:
313 ; CHECK: # %bb.0: # %entry
314 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
315 ; CHECK-NEXT: vfdiv.vv v8, v9, v10
318 %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
319 <vscale x 1 x half> %0,
320 <vscale x 1 x half> %1,
321 <vscale x 1 x half> %2,
324 ret <vscale x 1 x half> %a
327 declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
333 define <vscale x 1 x half> @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
334 ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16:
335 ; CHECK: # %bb.0: # %entry
336 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
337 ; CHECK-NEXT: vfmax.vv v8, v9, v10
340 %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
341 <vscale x 1 x half> %0,
342 <vscale x 1 x half> %1,
343 <vscale x 1 x half> %2,
346 ret <vscale x 1 x half> %a
349 declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
355 define <vscale x 1 x half> @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
356 ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16:
357 ; CHECK: # %bb.0: # %entry
358 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
359 ; CHECK-NEXT: vfmin.vv v8, v9, v10
362 %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
363 <vscale x 1 x half> %0,
364 <vscale x 1 x half> %1,
365 <vscale x 1 x half> %2,
368 ret <vscale x 1 x half> %a
371 declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
377 define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
378 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16:
379 ; CHECK: # %bb.0: # %entry
380 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
381 ; CHECK-NEXT: vfmul.vv v8, v9, v10
384 %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
385 <vscale x 1 x half> %0,
386 <vscale x 1 x half> %1,
387 <vscale x 1 x half> %2,
390 ret <vscale x 1 x half> %a
393 declare <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
399 define <vscale x 1 x half> @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
400 ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16:
401 ; CHECK: # %bb.0: # %entry
402 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
403 ; CHECK-NEXT: vfrdiv.vf v8, v9, fa0
406 %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
407 <vscale x 1 x half> %0,
408 <vscale x 1 x half> %1,
412 ret <vscale x 1 x half> %a
415 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
421 define <vscale x 1 x half> @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
422 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16:
423 ; CHECK: # %bb.0: # %entry
424 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
425 ; CHECK-NEXT: vfsgnj.vv v8, v9, v10
428 %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
429 <vscale x 1 x half> %0,
430 <vscale x 1 x half> %1,
431 <vscale x 1 x half> %2,
434 ret <vscale x 1 x half> %a
437 declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
443 define <vscale x 1 x half> @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
444 ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16:
445 ; CHECK: # %bb.0: # %entry
446 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
447 ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10
450 %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
451 <vscale x 1 x half> %0,
452 <vscale x 1 x half> %1,
453 <vscale x 1 x half> %2,
456 ret <vscale x 1 x half> %a
459 declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
465 define <vscale x 1 x half> @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
466 ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16:
467 ; CHECK: # %bb.0: # %entry
468 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
469 ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10
472 %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
473 <vscale x 1 x half> %0,
474 <vscale x 1 x half> %1,
475 <vscale x 1 x half> %2,
478 ret <vscale x 1 x half> %a
481 declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
487 define <vscale x 1 x half> @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
488 ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16:
489 ; CHECK: # %bb.0: # %entry
490 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
491 ; CHECK-NEXT: vfrsub.vf v8, v9, fa0
494 %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
495 <vscale x 1 x half> %0,
496 <vscale x 1 x half> %1,
500 ret <vscale x 1 x half> %a
503 declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
509 define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
510 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16:
511 ; CHECK: # %bb.0: # %entry
512 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
513 ; CHECK-NEXT: vfslide1down.vf v8, v9, fa0
516 %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
517 <vscale x 1 x half> %0,
518 <vscale x 1 x half> %1,
522 ret <vscale x 1 x half> %a
525 declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
531 define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
532 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
533 ; CHECK: # %bb.0: # %entry
534 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
535 ; CHECK-NEXT: vfslide1up.vf v8, v9, fa0
538 %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
539 <vscale x 1 x half> %0,
540 <vscale x 1 x half> %1,
544 ret <vscale x 1 x half> %a
547 declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
548 <vscale x 1 x float>,
553 define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
554 ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16:
555 ; CHECK: # %bb.0: # %entry
556 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
557 ; CHECK-NEXT: vfwsub.vv v8, v9, v10
560 %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
561 <vscale x 1 x float> %0,
562 <vscale x 1 x half> %1,
563 <vscale x 1 x half> %2,
566 ret <vscale x 1 x float> %a
569 declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
570 <vscale x 1 x float>,
571 <vscale x 1 x float>,
575 define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
576 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16:
577 ; CHECK: # %bb.0: # %entry
578 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
579 ; CHECK-NEXT: vfwsub.wv v8, v9, v10
582 %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
583 <vscale x 1 x float> %0,
584 <vscale x 1 x float> %1,
585 <vscale x 1 x half> %2,
588 ret <vscale x 1 x float> %a
591 declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
592 <vscale x 16 x float>,
593 <vscale x 16 x float>,
594 <vscale x 16 x half>,
597 define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
598 ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16:
599 ; CHECK: # %bb.0: # %entry
600 ; CHECK-NEXT: vl4re16.v v24, (a0)
601 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
602 ; CHECK-NEXT: vfwsub.wv v8, v16, v24
605 %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
606 <vscale x 16 x float> %0,
607 <vscale x 16 x float> %1,
608 <vscale x 16 x half> %2,
611 ret <vscale x 16 x float> %a
614 declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
615 <vscale x 1 x float>,
620 define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
621 ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16:
622 ; CHECK: # %bb.0: # %entry
623 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
624 ; CHECK-NEXT: vfwmul.vv v8, v9, v10
627 %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
628 <vscale x 1 x float> %0,
629 <vscale x 1 x half> %1,
630 <vscale x 1 x half> %2,
633 ret <vscale x 1 x float> %a
636 declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
637 <vscale x 1 x float>,
638 <vscale x 1 x float>,
642 define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
643 ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16:
644 ; CHECK: # %bb.0: # %entry
645 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
646 ; CHECK-NEXT: vfwadd.wv v8, v9, v10
649 %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
650 <vscale x 1 x float> %0,
651 <vscale x 1 x float> %1,
652 <vscale x 1 x half> %2,
655 ret <vscale x 1 x float> %a
658 declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
659 <vscale x 1 x float>,
664 define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
665 ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16:
666 ; CHECK: # %bb.0: # %entry
667 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
668 ; CHECK-NEXT: vfwadd.vv v8, v9, v10
671 %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
672 <vscale x 1 x float> %0,
673 <vscale x 1 x half> %1,
674 <vscale x 1 x half> %2,
677 ret <vscale x 1 x float> %a
680 declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
686 define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
687 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16:
688 ; CHECK: # %bb.0: # %entry
689 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
690 ; CHECK-NEXT: vfsub.vv v8, v9, v10
693 %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
694 <vscale x 1 x half> %0,
695 <vscale x 1 x half> %1,
696 <vscale x 1 x half> %2,
699 ret <vscale x 1 x half> %a
703 declare <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64(
709 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
710 ; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64:
711 ; RV32: # %bb.0: # %entry
712 ; RV32-NEXT: vsetvli a2, a2, e64, m1, ta, ma
713 ; RV32-NEXT: slli a2, a2, 1
714 ; RV32-NEXT: vmv1r.v v10, v8
715 ; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, ma
716 ; RV32-NEXT: vslide1down.vx v10, v9, a0
717 ; RV32-NEXT: vslide1down.vx v8, v10, a1
720 ; RV64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64:
721 ; RV64: # %bb.0: # %entry
722 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
723 ; RV64-NEXT: vslide1down.vx v8, v9, a0
726 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64(
727 <vscale x 1 x i64> %0,
728 <vscale x 1 x i64> %1,
732 ret <vscale x 1 x i64> %a
735 declare <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
741 define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
742 ; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64:
743 ; RV32: # %bb.0: # %entry
744 ; RV32-NEXT: vsetvli a2, a2, e64, m1, ta, ma
745 ; RV32-NEXT: slli a2, a2, 1
746 ; RV32-NEXT: vmv1r.v v10, v8
747 ; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, ma
748 ; RV32-NEXT: vslide1up.vx v10, v9, a1
749 ; RV32-NEXT: vslide1up.vx v8, v10, a0
752 ; RV64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64:
753 ; RV64: # %bb.0: # %entry
754 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
755 ; RV64-NEXT: vslide1up.vx v8, v9, a0
758 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
759 <vscale x 1 x i64> %0,
760 <vscale x 1 x i64> %1,
764 ret <vscale x 1 x i64> %a
767 declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
773 define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
774 ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8:
775 ; CHECK: # %bb.0: # %entry
776 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
777 ; CHECK-NEXT: vmax.vv v8, v9, v10
780 %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
781 <vscale x 1 x i8> %0,
782 <vscale x 1 x i8> %1,
783 <vscale x 1 x i8> %2,
786 ret <vscale x 1 x i8> %a
789 declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
795 define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
796 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8:
797 ; CHECK: # %bb.0: # %entry
798 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
799 ; CHECK-NEXT: vmaxu.vv v8, v9, v10
802 %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
803 <vscale x 1 x i8> %0,
804 <vscale x 1 x i8> %1,
805 <vscale x 1 x i8> %2,
808 ret <vscale x 1 x i8> %a
811 declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
817 define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
818 ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8:
819 ; CHECK: # %bb.0: # %entry
820 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
821 ; CHECK-NEXT: vmin.vv v8, v9, v10
824 %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
825 <vscale x 1 x i8> %0,
826 <vscale x 1 x i8> %1,
827 <vscale x 1 x i8> %2,
830 ret <vscale x 1 x i8> %a
833 declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
839 define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
840 ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8:
841 ; CHECK: # %bb.0: # %entry
842 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
843 ; CHECK-NEXT: vminu.vv v8, v9, v10
846 %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
847 <vscale x 1 x i8> %0,
848 <vscale x 1 x i8> %1,
849 <vscale x 1 x i8> %2,
852 ret <vscale x 1 x i8> %a
855 declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
861 define <vscale x 1 x i8> @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
862 ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8:
863 ; CHECK: # %bb.0: # %entry
864 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
865 ; CHECK-NEXT: vmul.vv v8, v9, v10
868 %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
869 <vscale x 1 x i8> %0,
870 <vscale x 1 x i8> %1,
871 <vscale x 1 x i8> %2,
874 ret <vscale x 1 x i8> %a
877 declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
883 define <vscale x 1 x i8> @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
884 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8:
885 ; CHECK: # %bb.0: # %entry
886 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
887 ; CHECK-NEXT: vmulh.vv v8, v9, v10
890 %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
891 <vscale x 1 x i8> %0,
892 <vscale x 1 x i8> %1,
893 <vscale x 1 x i8> %2,
896 ret <vscale x 1 x i8> %a
899 declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
905 define <vscale x 1 x i8> @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
906 ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8:
907 ; CHECK: # %bb.0: # %entry
908 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
909 ; CHECK-NEXT: vmulhsu.vv v8, v9, v10
912 %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
913 <vscale x 1 x i8> %0,
914 <vscale x 1 x i8> %1,
915 <vscale x 1 x i8> %2,
918 ret <vscale x 1 x i8> %a
921 declare <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
927 define <vscale x 1 x i8> @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
928 ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8:
929 ; CHECK: # %bb.0: # %entry
930 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
931 ; CHECK-NEXT: vmulhu.vv v8, v9, v10
934 %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
935 <vscale x 1 x i8> %0,
936 <vscale x 1 x i8> %1,
937 <vscale x 1 x i8> %2,
940 ret <vscale x 1 x i8> %a
943 declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
950 define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
951 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
952 ; CHECK: # %bb.0: # %entry
953 ; CHECK-NEXT: csrwi vxrm, 0
954 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
955 ; CHECK-NEXT: vnclip.wv v8, v9, v10
958 %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
959 <vscale x 1 x i8> %0,
960 <vscale x 1 x i16> %1,
961 <vscale x 1 x i8> %2,
964 ret <vscale x 1 x i8> %a
967 declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
974 define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
975 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
976 ; CHECK: # %bb.0: # %entry
977 ; CHECK-NEXT: csrwi vxrm, 0
978 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
979 ; CHECK-NEXT: vnclipu.wv v8, v9, v10
982 %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
983 <vscale x 1 x i8> %0,
984 <vscale x 1 x i16> %1,
985 <vscale x 1 x i8> %2,
988 ret <vscale x 1 x i8> %a
991 declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
997 define <vscale x 1 x i8> @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
998 ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8:
999 ; CHECK: # %bb.0: # %entry
1000 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1001 ; CHECK-NEXT: vnsra.wv v8, v9, v10
1004 %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
1005 <vscale x 1 x i8> %0,
1006 <vscale x 1 x i16> %1,
1007 <vscale x 1 x i8> %2,
1010 ret <vscale x 1 x i8> %a
1013 declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
1019 define <vscale x 1 x i8> @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1020 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8:
1021 ; CHECK: # %bb.0: # %entry
1022 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1023 ; CHECK-NEXT: vnsrl.wv v8, v9, v10
1026 %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
1027 <vscale x 1 x i8> %0,
1028 <vscale x 1 x i16> %1,
1029 <vscale x 1 x i8> %2,
1032 ret <vscale x 1 x i8> %a
1035 declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
1041 define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1042 ; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8:
1043 ; CHECK: # %bb.0: # %entry
1044 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1045 ; CHECK-NEXT: vor.vv v8, v9, v10
1048 %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
1049 <vscale x 1 x i8> %0,
1050 <vscale x 1 x i8> %1,
1051 <vscale x 1 x i8> %2,
1054 ret <vscale x 1 x i8> %a
1057 declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
1063 define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1064 ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8:
1065 ; CHECK: # %bb.0: # %entry
1066 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1067 ; CHECK-NEXT: vrem.vv v8, v9, v10
1070 %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
1071 <vscale x 1 x i8> %0,
1072 <vscale x 1 x i8> %1,
1073 <vscale x 1 x i8> %2,
1076 ret <vscale x 1 x i8> %a
1079 declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
1084 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
1090 define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1091 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8:
1092 ; CHECK: # %bb.0: # %entry
1093 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1094 ; CHECK-NEXT: vrgather.vv v8, v9, v10
1097 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
1098 <vscale x 1 x i8> %0,
1099 <vscale x 1 x i8> %1,
1100 <vscale x 1 x i8> %2,
1103 ret <vscale x 1 x i8> %a
1106 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8(
1112 define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, iXLen %3) nounwind {
1113 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8:
1114 ; CHECK: # %bb.0: # %entry
1115 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
1116 ; CHECK-NEXT: vrgather.vx v8, v9, a0
1119 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8(
1120 <vscale x 1 x i8> %0,
1121 <vscale x 1 x i8> %1,
1125 ret <vscale x 1 x i8> %a
1128 declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
1134 define <vscale x 1 x i8> @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
1135 ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8:
1136 ; CHECK: # %bb.0: # %entry
1137 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1138 ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10
1141 %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
1142 <vscale x 1 x i8> %0,
1143 <vscale x 1 x i8> %1,
1144 <vscale x 1 x i16> %2,
1147 ret <vscale x 1 x i8> %a
1150 declare <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
1156 define <vscale x 1 x i64> @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
1157 ; RV32-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64:
1158 ; RV32: # %bb.0: # %entry
1159 ; RV32-NEXT: addi sp, sp, -16
1160 ; RV32-NEXT: sw a1, 12(sp)
1161 ; RV32-NEXT: sw a0, 8(sp)
1162 ; RV32-NEXT: addi a0, sp, 8
1163 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1164 ; RV32-NEXT: vlse64.v v10, (a0), zero
1165 ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
1166 ; RV32-NEXT: vsub.vv v8, v10, v9
1167 ; RV32-NEXT: addi sp, sp, 16
1170 ; RV64-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64:
1171 ; RV64: # %bb.0: # %entry
1172 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
1173 ; RV64-NEXT: vrsub.vx v8, v9, a0
1176 %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
1177 <vscale x 1 x i64> %0,
1178 <vscale x 1 x i64> %1,
1182 ret <vscale x 1 x i64> %a
1185 declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
1191 define <vscale x 1 x i64> @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
1192 ; RV32-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64:
1193 ; RV32: # %bb.0: # %entry
1194 ; RV32-NEXT: addi sp, sp, -16
1195 ; RV32-NEXT: sw a1, 12(sp)
1196 ; RV32-NEXT: sw a0, 8(sp)
1197 ; RV32-NEXT: addi a0, sp, 8
1198 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1199 ; RV32-NEXT: vlse64.v v10, (a0), zero
1200 ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
1201 ; RV32-NEXT: vsadd.vv v8, v9, v10
1202 ; RV32-NEXT: addi sp, sp, 16
1205 ; RV64-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64:
1206 ; RV64: # %bb.0: # %entry
1207 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
1208 ; RV64-NEXT: vsadd.vx v8, v9, a0
1211 %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
1212 <vscale x 1 x i64> %0,
1213 <vscale x 1 x i64> %1,
1217 ret <vscale x 1 x i64> %a
1220 declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
1226 define <vscale x 1 x i8> @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1227 ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8:
1228 ; CHECK: # %bb.0: # %entry
1229 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1230 ; CHECK-NEXT: vsaddu.vv v8, v9, v10
1233 %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
1234 <vscale x 1 x i8> %0,
1235 <vscale x 1 x i8> %1,
1236 <vscale x 1 x i8> %2,
1239 ret <vscale x 1 x i8> %a
1242 declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
1248 define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1249 ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8:
1250 ; CHECK: # %bb.0: # %entry
1251 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1252 ; CHECK-NEXT: vsll.vv v8, v9, v10
1255 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
1256 <vscale x 1 x i8> %0,
1257 <vscale x 1 x i8> %1,
1258 <vscale x 1 x i8> %2,
1261 ret <vscale x 1 x i8> %a
1264 declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
1271 define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1272 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
1273 ; CHECK: # %bb.0: # %entry
1274 ; CHECK-NEXT: csrwi vxrm, 0
1275 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1276 ; CHECK-NEXT: vsmul.vv v8, v9, v10
1279 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
1280 <vscale x 1 x i8> %0,
1281 <vscale x 1 x i8> %1,
1282 <vscale x 1 x i8> %2,
1285 ret <vscale x 1 x i8> %a
1288 declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
1295 define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
1296 ; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
1297 ; RV32: # %bb.0: # %entry
1298 ; RV32-NEXT: addi sp, sp, -16
1299 ; RV32-NEXT: sw a1, 12(sp)
1300 ; RV32-NEXT: sw a0, 8(sp)
1301 ; RV32-NEXT: addi a0, sp, 8
1302 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1303 ; RV32-NEXT: vlse64.v v10, (a0), zero
1304 ; RV32-NEXT: csrwi vxrm, 0
1305 ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
1306 ; RV32-NEXT: vsmul.vv v8, v9, v10
1307 ; RV32-NEXT: addi sp, sp, 16
1310 ; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
1311 ; RV64: # %bb.0: # %entry
1312 ; RV64-NEXT: csrwi vxrm, 0
1313 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
1314 ; RV64-NEXT: vsmul.vx v8, v9, a0
1317 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
1318 <vscale x 1 x i64> %0,
1319 <vscale x 1 x i64> %1,
1323 ret <vscale x 1 x i64> %a
1326 declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
1332 define <vscale x 1 x i8> @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1333 ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8:
1334 ; CHECK: # %bb.0: # %entry
1335 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1336 ; CHECK-NEXT: vsra.vv v8, v9, v10
1339 %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
1340 <vscale x 1 x i8> %0,
1341 <vscale x 1 x i8> %1,
1342 <vscale x 1 x i8> %2,
1345 ret <vscale x 1 x i8> %a
1347 declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
1353 define <vscale x 1 x i8> @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1354 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8:
1355 ; CHECK: # %bb.0: # %entry
1356 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1357 ; CHECK-NEXT: vsrl.vv v8, v9, v10
1360 %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
1361 <vscale x 1 x i8> %0,
1362 <vscale x 1 x i8> %1,
1363 <vscale x 1 x i8> %2,
1366 ret <vscale x 1 x i8> %a
1369 declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
1376 define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1377 ; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8:
1378 ; CHECK: # %bb.0: # %entry
1379 ; CHECK-NEXT: csrwi vxrm, 0
1380 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1381 ; CHECK-NEXT: vssra.vv v8, v9, v10
1384 %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
1385 <vscale x 1 x i8> %0,
1386 <vscale x 1 x i8> %1,
1387 <vscale x 1 x i8> %2,
1390 ret <vscale x 1 x i8> %a
1393 declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
1400 define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1401 ; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8:
1402 ; CHECK: # %bb.0: # %entry
1403 ; CHECK-NEXT: csrwi vxrm, 0
1404 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1405 ; CHECK-NEXT: vssrl.vv v8, v9, v10
1408 %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
1409 <vscale x 1 x i8> %0,
1410 <vscale x 1 x i8> %1,
1411 <vscale x 1 x i8> %2,
1414 ret <vscale x 1 x i8> %a
1417 declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
1423 define <vscale x 1 x i8> @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1424 ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8:
1425 ; CHECK: # %bb.0: # %entry
1426 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1427 ; CHECK-NEXT: vssub.vv v8, v9, v10
1430 %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
1431 <vscale x 1 x i8> %0,
1432 <vscale x 1 x i8> %1,
1433 <vscale x 1 x i8> %2,
1436 ret <vscale x 1 x i8> %a
1439 declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
1445 define <vscale x 1 x i8> @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1446 ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8:
1447 ; CHECK: # %bb.0: # %entry
1448 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1449 ; CHECK-NEXT: vssubu.vv v8, v9, v10
1452 %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
1453 <vscale x 1 x i8> %0,
1454 <vscale x 1 x i8> %1,
1455 <vscale x 1 x i8> %2,
1458 ret <vscale x 1 x i8> %a
1461 declare <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
1467 define <vscale x 1 x i64> @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
1468 ; RV32-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64:
1469 ; RV32: # %bb.0: # %entry
1470 ; RV32-NEXT: addi sp, sp, -16
1471 ; RV32-NEXT: sw a1, 12(sp)
1472 ; RV32-NEXT: sw a0, 8(sp)
1473 ; RV32-NEXT: addi a0, sp, 8
1474 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1475 ; RV32-NEXT: vlse64.v v10, (a0), zero
1476 ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
1477 ; RV32-NEXT: vssub.vv v8, v9, v10
1478 ; RV32-NEXT: addi sp, sp, 16
1481 ; RV64-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64:
1482 ; RV64: # %bb.0: # %entry
1483 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
1484 ; RV64-NEXT: vssub.vx v8, v9, a0
1487 %a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
1488 <vscale x 1 x i64> %0,
1489 <vscale x 1 x i64> %1,
1493 ret <vscale x 1 x i64> %a
1496 declare <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
1502 define <vscale x 1 x i64> @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
1503 ; RV32-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64:
1504 ; RV32: # %bb.0: # %entry
1505 ; RV32-NEXT: addi sp, sp, -16
1506 ; RV32-NEXT: sw a1, 12(sp)
1507 ; RV32-NEXT: sw a0, 8(sp)
1508 ; RV32-NEXT: addi a0, sp, 8
1509 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1510 ; RV32-NEXT: vlse64.v v10, (a0), zero
1511 ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
1512 ; RV32-NEXT: vssubu.vv v8, v9, v10
1513 ; RV32-NEXT: addi sp, sp, 16
1516 ; RV64-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64:
1517 ; RV64: # %bb.0: # %entry
1518 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
1519 ; RV64-NEXT: vssubu.vx v8, v9, a0
1522 %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
1523 <vscale x 1 x i64> %0,
1524 <vscale x 1 x i64> %1,
1528 ret <vscale x 1 x i64> %a
1531 declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
1537 define <vscale x 1 x i8> @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1538 ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8:
1539 ; CHECK: # %bb.0: # %entry
1540 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1541 ; CHECK-NEXT: vsub.vv v8, v9, v10
1544 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
1545 <vscale x 1 x i8> %0,
1546 <vscale x 1 x i8> %1,
1547 <vscale x 1 x i8> %2,
1550 ret <vscale x 1 x i8> %a
1553 declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
1559 define <vscale x 1 x i16> @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1560 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8:
1561 ; CHECK: # %bb.0: # %entry
1562 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1563 ; CHECK-NEXT: vwadd.vv v8, v9, v10
1566 %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
1567 <vscale x 1 x i16> %0,
1568 <vscale x 1 x i8> %1,
1569 <vscale x 1 x i8> %2,
1572 ret <vscale x 1 x i16> %a
1575 declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
1581 define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1582 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8:
1583 ; CHECK: # %bb.0: # %entry
1584 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1585 ; CHECK-NEXT: vwadd.wv v8, v9, v10
1588 %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
1589 <vscale x 1 x i16> %0,
1590 <vscale x 1 x i16> %1,
1591 <vscale x 1 x i8> %2,
1594 ret <vscale x 1 x i16> %a
1597 declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
1603 define <vscale x 1 x i16> @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1604 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8:
1605 ; CHECK: # %bb.0: # %entry
1606 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1607 ; CHECK-NEXT: vwaddu.vv v8, v9, v10
1610 %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
1611 <vscale x 1 x i16> %0,
1612 <vscale x 1 x i8> %1,
1613 <vscale x 1 x i8> %2,
1616 ret <vscale x 1 x i16> %a
1619 declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
1625 define <vscale x 1 x i16> @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1626 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8:
1627 ; CHECK: # %bb.0: # %entry
1628 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1629 ; CHECK-NEXT: vwmul.vv v8, v9, v10
1632 %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
1633 <vscale x 1 x i16> %0,
1634 <vscale x 1 x i8> %1,
1635 <vscale x 1 x i8> %2,
1638 ret <vscale x 1 x i16> %a
1641 declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
1647 define <vscale x 1 x i16> @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1648 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8:
1649 ; CHECK: # %bb.0: # %entry
1650 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1651 ; CHECK-NEXT: vwmulu.vv v8, v9, v10
1654 %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
1655 <vscale x 1 x i16> %0,
1656 <vscale x 1 x i8> %1,
1657 <vscale x 1 x i8> %2,
1660 ret <vscale x 1 x i16> %a
1663 declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
1669 define <vscale x 1 x i16> @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1670 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8:
1671 ; CHECK: # %bb.0: # %entry
1672 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1673 ; CHECK-NEXT: vwmulsu.vv v8, v9, v10
1676 %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
1677 <vscale x 1 x i16> %0,
1678 <vscale x 1 x i8> %1,
1679 <vscale x 1 x i8> %2,
1682 ret <vscale x 1 x i16> %a
1685 declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
1691 define <vscale x 1 x i16> @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1692 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8:
1693 ; CHECK: # %bb.0: # %entry
1694 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1695 ; CHECK-NEXT: vwsub.vv v8, v9, v10
1698 %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
1699 <vscale x 1 x i16> %0,
1700 <vscale x 1 x i8> %1,
1701 <vscale x 1 x i8> %2,
1704 ret <vscale x 1 x i16> %a
1707 declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
1713 define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1714 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8:
1715 ; CHECK: # %bb.0: # %entry
1716 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1717 ; CHECK-NEXT: vwsub.wv v8, v9, v10
1720 %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
1721 <vscale x 1 x i16> %0,
1722 <vscale x 1 x i16> %1,
1723 <vscale x 1 x i8> %2,
1726 ret <vscale x 1 x i16> %a
1729 define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
1730 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied:
1731 ; CHECK: # %bb.0: # %entry
1732 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1733 ; CHECK-NEXT: vwsub.wv v8, v8, v9
1736 %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
1737 <vscale x 1 x i16> %0,
1738 <vscale x 1 x i16> %0,
1739 <vscale x 1 x i8> %1,
1742 ret <vscale x 1 x i16> %a
1745 declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
1751 define <vscale x 1 x i16> @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1752 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8:
1753 ; CHECK: # %bb.0: # %entry
1754 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1755 ; CHECK-NEXT: vwsubu.vv v8, v9, v10
1758 %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
1759 <vscale x 1 x i16> %0,
1760 <vscale x 1 x i8> %1,
1761 <vscale x 1 x i8> %2,
1764 ret <vscale x 1 x i16> %a
1767 declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
1773 define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1774 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8:
1775 ; CHECK: # %bb.0: # %entry
1776 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1777 ; CHECK-NEXT: vwsubu.wv v8, v9, v10
1780 %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
1781 <vscale x 1 x i16> %0,
1782 <vscale x 1 x i16> %1,
1783 <vscale x 1 x i8> %2,
1786 ret <vscale x 1 x i16> %a
1789 declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
1795 define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1796 ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8:
1797 ; CHECK: # %bb.0: # %entry
1798 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1799 ; CHECK-NEXT: vxor.vv v8, v9, v10
1802 %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
1803 <vscale x 1 x i8> %0,
1804 <vscale x 1 x i8> %1,
1805 <vscale x 1 x i8> %2,
1808 ret <vscale x 1 x i8> %a
1811 declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
1816 define <vscale x 1 x i64> @intrinsic_vsext_vf8_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
1817 ; CHECK-LABEL: intrinsic_vsext_vf8_nxv1i64:
1818 ; CHECK: # %bb.0: # %entry
1819 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
1820 ; CHECK-NEXT: vsext.vf8 v8, v9
1823 %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
1824 <vscale x 1 x i64> %0,
1825 <vscale x 1 x i8> %1,
1828 ret <vscale x 1 x i64> %a
1831 declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
1836 define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
1837 ; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64:
1838 ; CHECK: # %bb.0: # %entry
1839 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
1840 ; CHECK-NEXT: vzext.vf8 v8, v9
1843 %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
1844 <vscale x 1 x i64> %0,
1845 <vscale x 1 x i8> %1,
1848 ret <vscale x 1 x i64> %a
1851 declare <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
1853 <vscale x 2 x float>,
1856 define <vscale x 2 x i16> @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( <vscale x 2 x i16> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
1857 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32:
1858 ; CHECK: # %bb.0: # %entry
1859 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
1860 ; CHECK-NEXT: vfncvt.x.f.w v8, v9
1863 %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
1864 <vscale x 2 x i16> %0,
1865 <vscale x 2 x float> %1,
1868 ret <vscale x 2 x i16> %a
1871 declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
1875 define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
1876 ; CHECK-LABEL: intrinsic_vid_v_nxv1i8:
1877 ; CHECK: # %bb.0: # %entry
1878 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1879 ; CHECK-NEXT: vid.v v8
1882 %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
1883 <vscale x 1 x i8> %0,
1886 ret <vscale x 1 x i8> %a
1889 declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
1891 <vscale x 1 x half>,
1894 define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
1895 ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
1896 ; CHECK: # %bb.0: # %entry
1897 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1898 ; CHECK-NEXT: vfclass.v v8, v9
1900 <vscale x 1 x i16> %0,
1901 <vscale x 1 x half> %1,
1902 iXLen %2) nounwind {
1904 %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
1905 <vscale x 1 x i16> %0,
1906 <vscale x 1 x half> %1,
1909 ret <vscale x 1 x i16> %a
1912 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
1913 <vscale x 1 x half>,
1917 define <vscale x 1 x half> @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1918 ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16:
1919 ; CHECK: # %bb.0: # %entry
1920 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1921 ; CHECK-NEXT: vfcvt.f.x.v v8, v9
1924 %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
1925 <vscale x 1 x half> %0,
1926 <vscale x 1 x i16> %1,
1929 ret <vscale x 1 x half> %a
1932 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
1933 <vscale x 1 x half>,
1937 define <vscale x 1 x half> @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1938 ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16:
1939 ; CHECK: # %bb.0: # %entry
1940 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1941 ; CHECK-NEXT: vfcvt.f.xu.v v8, v9
1944 %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
1945 <vscale x 1 x half> %0,
1946 <vscale x 1 x i16> %1,
1949 ret <vscale x 1 x half> %a
1952 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
1954 <vscale x 1 x half>,
1957 define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
1958 ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16:
1959 ; CHECK: # %bb.0: # %entry
1960 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1961 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9
1964 %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
1965 <vscale x 1 x i16> %0,
1966 <vscale x 1 x half> %1,
1969 ret <vscale x 1 x i16> %a
1972 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
1974 <vscale x 1 x half>,
1977 define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
1978 ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16:
1979 ; CHECK: # %bb.0: # %entry
1980 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1981 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9
1984 %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
1985 <vscale x 1 x i16> %0,
1986 <vscale x 1 x half> %1,
1989 ret <vscale x 1 x i16> %a
1992 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
1994 <vscale x 1 x half>,
1997 define <vscale x 1 x i16> @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
1998 ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16:
1999 ; CHECK: # %bb.0: # %entry
2000 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2001 ; CHECK-NEXT: vfcvt.x.f.v v8, v9
2004 %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
2005 <vscale x 1 x i16> %0,
2006 <vscale x 1 x half> %1,
2009 ret <vscale x 1 x i16> %a
2012 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
2013 <vscale x 1 x half>,
2014 <vscale x 1 x float>,
2017 define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
2018 ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32:
2019 ; CHECK: # %bb.0: # %entry
2020 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2021 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
2024 %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
2025 <vscale x 1 x half> %0,
2026 <vscale x 1 x float> %1,
2029 ret <vscale x 1 x half> %a
2032 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
2034 <vscale x 1 x half>,
2037 define <vscale x 1 x i16> @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2038 ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16:
2039 ; CHECK: # %bb.0: # %entry
2040 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2041 ; CHECK-NEXT: vfcvt.xu.f.v v8, v9
2044 %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
2045 <vscale x 1 x i16> %0,
2046 <vscale x 1 x half> %1,
2049 ret <vscale x 1 x i16> %a
2052 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
2053 <vscale x 1 x half>,
2057 define <vscale x 1 x half> @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
2058 ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32:
2059 ; CHECK: # %bb.0: # %entry
2060 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2061 ; CHECK-NEXT: vfncvt.f.x.w v8, v9
2064 %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
2065 <vscale x 1 x half> %0,
2066 <vscale x 1 x i32> %1,
2069 ret <vscale x 1 x half> %a
2072 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
2073 <vscale x 1 x half>,
2077 define <vscale x 1 x half> @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
2078 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32:
2079 ; CHECK: # %bb.0: # %entry
2080 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2081 ; CHECK-NEXT: vfncvt.f.xu.w v8, v9
2084 %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
2085 <vscale x 1 x half> %0,
2086 <vscale x 1 x i32> %1,
2089 ret <vscale x 1 x half> %a
2092 declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
2093 <vscale x 1 x half>,
2094 <vscale x 1 x float>,
2097 define <vscale x 1 x half> @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
2098 ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32:
2099 ; CHECK: # %bb.0: # %entry
2100 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2101 ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9
2104 %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
2105 <vscale x 1 x half> %0,
2106 <vscale x 1 x float> %1,
2109 ret <vscale x 1 x half> %a
2112 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
2114 <vscale x 1 x half>,
2117 define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2118 ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16:
2119 ; CHECK: # %bb.0: # %entry
2120 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
2121 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9
2124 %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
2125 <vscale x 1 x i8> %0,
2126 <vscale x 1 x half> %1,
2129 ret <vscale x 1 x i8> %a
2132 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
2134 <vscale x 1 x half>,
2137 define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2138 ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16:
2139 ; CHECK: # %bb.0: # %entry
2140 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
2141 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9
2144 %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
2145 <vscale x 1 x i8> %0,
2146 <vscale x 1 x half> %1,
2149 ret <vscale x 1 x i8> %a
2152 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
2154 <vscale x 1 x half>,
2157 define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2158 ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16:
2159 ; CHECK: # %bb.0: # %entry
2160 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
2161 ; CHECK-NEXT: vfncvt.x.f.w v8, v9
2164 %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
2165 <vscale x 1 x i8> %0,
2166 <vscale x 1 x half> %1,
2169 ret <vscale x 1 x i8> %a
2172 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
2174 <vscale x 1 x half>,
2177 define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2178 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16:
2179 ; CHECK: # %bb.0: # %entry
2180 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
2181 ; CHECK-NEXT: vfncvt.xu.f.w v8, v9
2184 %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
2185 <vscale x 1 x i8> %0,
2186 <vscale x 1 x half> %1,
2189 ret <vscale x 1 x i8> %a
2192 declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
2193 <vscale x 1 x half>,
2194 <vscale x 1 x half>,
2197 define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2198 ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16:
2199 ; CHECK: # %bb.0: # %entry
2200 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2201 ; CHECK-NEXT: vfrec7.v v8, v9
2204 %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
2205 <vscale x 1 x half> %0,
2206 <vscale x 1 x half> %1,
2209 ret <vscale x 1 x half> %a
2212 declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
2213 <vscale x 1 x half>,
2214 <vscale x 1 x half>,
2217 define <vscale x 1 x half> @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2218 ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16:
2219 ; CHECK: # %bb.0: # %entry
2220 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2221 ; CHECK-NEXT: vfrsqrt7.v v8, v9
2224 %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
2225 <vscale x 1 x half> %0,
2226 <vscale x 1 x half> %1,
2229 ret <vscale x 1 x half> %a
2232 declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
2233 <vscale x 1 x half>,
2234 <vscale x 1 x half>,
2237 define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2238 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
2239 ; CHECK: # %bb.0: # %entry
2240 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2241 ; CHECK-NEXT: vfsqrt.v v8, v9
2244 %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
2245 <vscale x 1 x half> %0,
2246 <vscale x 1 x half> %1,
2249 ret <vscale x 1 x half> %a
2252 declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
2253 <vscale x 1 x float>,
2254 <vscale x 1 x half>,
2257 define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2258 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
2259 ; CHECK: # %bb.0: # %entry
2260 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2261 ; CHECK-NEXT: vfwcvt.f.f.v v8, v9
2264 %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
2265 <vscale x 1 x float> %0,
2266 <vscale x 1 x half> %1,
2269 ret <vscale x 1 x float> %a
2272 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
2273 <vscale x 1 x half>,
2277 define <vscale x 1 x half> @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
2278 ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8:
2279 ; CHECK: # %bb.0: # %entry
2280 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
2281 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
2284 %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
2285 <vscale x 1 x half> %0,
2286 <vscale x 1 x i8> %1,
2289 ret <vscale x 1 x half> %a
2292 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
2293 <vscale x 1 x half>,
2297 define <vscale x 1 x half> @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
2298 ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8:
2299 ; CHECK: # %bb.0: # %entry
2300 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
2301 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
2304 %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
2305 <vscale x 1 x half> %0,
2306 <vscale x 1 x i8> %1,
2309 ret <vscale x 1 x half> %a
2312 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
2314 <vscale x 1 x half>,
2317 define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2318 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16:
2319 ; CHECK: # %bb.0: # %entry
2320 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2321 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9
2324 %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
2325 <vscale x 1 x i32> %0,
2326 <vscale x 1 x half> %1,
2329 ret <vscale x 1 x i32> %a
2332 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
2334 <vscale x 1 x half>,
2337 define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2338 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16:
2339 ; CHECK: # %bb.0: # %entry
2340 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2341 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9
2344 %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
2345 <vscale x 1 x i32> %0,
2346 <vscale x 1 x half> %1,
2349 ret <vscale x 1 x i32> %a
2352 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
2354 <vscale x 1 x half>,
2357 define <vscale x 1 x i32> @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2358 ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16:
2359 ; CHECK: # %bb.0: # %entry
2360 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2361 ; CHECK-NEXT: vfwcvt.x.f.v v8, v9
2364 %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
2365 <vscale x 1 x i32> %0,
2366 <vscale x 1 x half> %1,
2369 ret <vscale x 1 x i32> %a
2372 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
2374 <vscale x 1 x half>,
2377 define <vscale x 1 x i32> @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2378 ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16:
2379 ; CHECK: # %bb.0: # %entry
2380 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2381 ; CHECK-NEXT: vfwcvt.xu.f.v v8, v9
2384 %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
2385 <vscale x 1 x i32> %0,
2386 <vscale x 1 x half> %1,
2389 ret <vscale x 1 x i32> %a
2392 declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
2397 define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
2398 ; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
2399 ; CHECK: # %bb.0: # %entry
2400 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
2401 ; CHECK-NEXT: viota.m v8, v0
2404 %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
2405 <vscale x 1 x i8> %0,
2406 <vscale x 1 x i1> %1,
2409 ret <vscale x 1 x i8> %a
2412 declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
2419 define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2420 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8:
2421 ; CHECK: # %bb.0: # %entry
2422 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
2423 ; CHECK-NEXT: vadc.vvm v8, v9, v10, v0
2426 %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
2427 <vscale x 1 x i8> %0,
2428 <vscale x 1 x i8> %1,
2429 <vscale x 1 x i8> %2,
2430 <vscale x 1 x i1> %3,
2433 ret <vscale x 1 x i8> %a
2436 declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
2443 define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2444 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8:
2445 ; CHECK: # %bb.0: # %entry
2446 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
2447 ; CHECK-NEXT: vsbc.vvm v8, v9, v10, v0
2450 %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
2451 <vscale x 1 x i8> %0,
2452 <vscale x 1 x i8> %1,
2453 <vscale x 1 x i8> %2,
2454 <vscale x 1 x i1> %3,
2457 ret <vscale x 1 x i8> %a
2460 declare <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
2467 define <vscale x 1 x i8> @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2468 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8:
2469 ; CHECK: # %bb.0: # %entry
2470 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
2471 ; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
2474 %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
2475 <vscale x 1 x i8> %0,
2476 <vscale x 1 x i8> %1,
2477 <vscale x 1 x i8> %2,
2478 <vscale x 1 x i1> %3,
2481 ret <vscale x 1 x i8> %a
2484 declare <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
2491 define <vscale x 8 x i64> @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2492 ; RV32-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64:
2493 ; RV32: # %bb.0: # %entry
2494 ; RV32-NEXT: addi sp, sp, -16
2495 ; RV32-NEXT: sw a1, 12(sp)
2496 ; RV32-NEXT: sw a0, 8(sp)
2497 ; RV32-NEXT: addi a0, sp, 8
2498 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2499 ; RV32-NEXT: vlse64.v v24, (a0), zero
2500 ; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, ma
2501 ; RV32-NEXT: vmerge.vvm v8, v16, v24, v0
2502 ; RV32-NEXT: addi sp, sp, 16
2505 ; RV64-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64:
2506 ; RV64: # %bb.0: # %entry
2507 ; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, ma
2508 ; RV64-NEXT: vmerge.vxm v8, v16, a0, v0
2511 %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
2512 <vscale x 8 x i64> %0,
2513 <vscale x 8 x i64> %1,
2515 <vscale x 8 x i1> %3,
2518 ret <vscale x 8 x i64> %a
2521 define <vscale x 8 x i64> @intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2522 ; RV32-LABEL: intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64:
2523 ; RV32: # %bb.0: # %entry
2524 ; RV32-NEXT: addi sp, sp, -16
2525 ; RV32-NEXT: li a1, 15
2526 ; RV32-NEXT: sw a1, 12(sp)
2527 ; RV32-NEXT: li a1, -1
2528 ; RV32-NEXT: sw a1, 8(sp)
2529 ; RV32-NEXT: addi a1, sp, 8
2530 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2531 ; RV32-NEXT: vlse64.v v24, (a1), zero
2532 ; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, ma
2533 ; RV32-NEXT: vmerge.vvm v8, v16, v24, v0
2534 ; RV32-NEXT: addi sp, sp, 16
2537 ; RV64-LABEL: intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64:
2538 ; RV64: # %bb.0: # %entry
2539 ; RV64-NEXT: li a1, -1
2540 ; RV64-NEXT: srli a1, a1, 28
2541 ; RV64-NEXT: vsetvli zero, a0, e64, m8, tu, ma
2542 ; RV64-NEXT: vmerge.vxm v8, v16, a1, v0
2545 %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
2546 <vscale x 8 x i64> %0,
2547 <vscale x 8 x i64> %1,
2549 <vscale x 8 x i1> %2,
2552 ret <vscale x 8 x i64> %a
2555 declare <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
2556 <vscale x 8 x double>,
2557 <vscale x 8 x double>,
2562 define <vscale x 8 x double> @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2563 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64:
2564 ; CHECK: # %bb.0: # %entry
2565 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
2566 ; CHECK-NEXT: vfmerge.vfm v8, v16, fa0, v0
2569 %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
2570 <vscale x 8 x double> %0,
2571 <vscale x 8 x double> %1,
2573 <vscale x 8 x i1> %3,
2576 ret <vscale x 8 x double> %a
2579 declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
2580 <vscale x 1 x half>,
2581 <vscale x 1 x half>,
2582 <vscale x 1 x half>,
2586 define <vscale x 1 x half> @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2587 ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16:
2588 ; CHECK: # %bb.0: # %entry
2589 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2590 ; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
2593 %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
2594 <vscale x 1 x half> %0,
2595 <vscale x 1 x half> %1,
2596 <vscale x 1 x half> %2,
2597 <vscale x 1 x i1> %3,
2600 ret <vscale x 1 x half> %a
2603 declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
2604 <vscale x 1 x half>,
2605 <vscale x 1 x half>,
2610 define <vscale x 1 x half> @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2611 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16:
2612 ; CHECK: # %bb.0: # %entry
2613 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2614 ; CHECK-NEXT: vmerge.vim v8, v9, 0, v0
2617 %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
2618 <vscale x 1 x half> %0,
2619 <vscale x 1 x half> %1,
2620 half zeroinitializer,
2621 <vscale x 1 x i1> %2,
2624 ret <vscale x 1 x half> %a
2627 declare <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
2632 define <vscale x 1 x i8> @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
2633 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8:
2634 ; CHECK: # %bb.0: # %entry
2635 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
2636 ; CHECK-NEXT: vmv.v.v v8, v9
2639 %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
2640 <vscale x 1 x i8> %0,
2641 <vscale x 1 x i8> %1,
2644 ret <vscale x 1 x i8> %a
2647 declare <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
2648 <vscale x 1 x float>,
2649 <vscale x 1 x float>,
2652 define <vscale x 1 x float> @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
2653 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32:
2654 ; CHECK: # %bb.0: # %entry
2655 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
2656 ; CHECK-NEXT: vmv.v.v v8, v9
2659 %a = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
2660 <vscale x 1 x float> %0,
2661 <vscale x 1 x float> %1,
2664 ret <vscale x 1 x float> %a
2667 declare <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
2672 define <vscale x 1 x i64> @intrinsic_vmv.v.x_x_nxv1i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
2673 ; RV32-LABEL: intrinsic_vmv.v.x_x_nxv1i64:
2674 ; RV32: # %bb.0: # %entry
2675 ; RV32-NEXT: addi sp, sp, -16
2676 ; RV32-NEXT: sw a1, 12(sp)
2677 ; RV32-NEXT: sw a0, 8(sp)
2678 ; RV32-NEXT: addi a0, sp, 8
2679 ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma
2680 ; RV32-NEXT: vlse64.v v8, (a0), zero
2681 ; RV32-NEXT: addi sp, sp, 16
2684 ; RV64-LABEL: intrinsic_vmv.v.x_x_nxv1i64:
2685 ; RV64: # %bb.0: # %entry
2686 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
2687 ; RV64-NEXT: vmv.v.x v8, a0
2690 %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
2691 <vscale x 1 x i64> %0,
2695 ret <vscale x 1 x i64> %a
2698 declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
2699 <vscale x 1 x float>,
2703 define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
2704 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32:
2705 ; CHECK: # %bb.0: # %entry
2706 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
2707 ; CHECK-NEXT: vfmv.v.f v8, fa0
2710 %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
2711 <vscale x 1 x float> %0,
2715 ret <vscale x 1 x float> %a