1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
14 define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, iXLen %3) nounwind {
15 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
16 ; CHECK: # %bb.0: # %entry
17 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
18 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
21 %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
28 ret <vscale x 1 x i8> %a
31 declare <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
38 define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8:
40 ; CHECK: # %bb.0: # %entry
41 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
42 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
45 %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
52 ret <vscale x 1 x i8> %a
55 define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
56 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8:
57 ; CHECK: # %bb.0: # %entry
58 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
59 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
62 %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
69 ret <vscale x 1 x i8> %a
72 define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
73 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8:
74 ; CHECK: # %bb.0: # %entry
75 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
76 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
79 %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
86 ret <vscale x 1 x i8> %a
89 declare <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
96 define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, iXLen %3) nounwind {
97 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8:
98 ; CHECK: # %bb.0: # %entry
99 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
100 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
103 %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
104 <vscale x 2 x i8> %0,
105 <vscale x 2 x i8> %1,
110 ret <vscale x 2 x i8> %a
113 declare <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
120 define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
121 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8:
122 ; CHECK: # %bb.0: # %entry
123 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
124 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
127 %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
128 <vscale x 2 x i8> %0,
129 <vscale x 2 x i8> %1,
131 <vscale x 2 x i1> %3,
134 ret <vscale x 2 x i8> %a
137 define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
138 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8:
139 ; CHECK: # %bb.0: # %entry
140 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
141 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
144 %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
145 <vscale x 2 x i8> %0,
146 <vscale x 2 x i8> %1,
151 ret <vscale x 2 x i8> %a
154 define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
155 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8:
156 ; CHECK: # %bb.0: # %entry
157 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
158 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
161 %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
162 <vscale x 2 x i8> %0,
163 <vscale x 2 x i8> %1,
165 <vscale x 2 x i1> %2,
168 ret <vscale x 2 x i8> %a
171 declare <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
178 define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, iXLen %3) nounwind {
179 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
182 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
185 %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
186 <vscale x 4 x i8> %0,
187 <vscale x 4 x i8> %1,
192 ret <vscale x 4 x i8> %a
195 declare <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
202 define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
203 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8:
204 ; CHECK: # %bb.0: # %entry
205 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
206 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
209 %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
210 <vscale x 4 x i8> %0,
211 <vscale x 4 x i8> %1,
213 <vscale x 4 x i1> %3,
216 ret <vscale x 4 x i8> %a
219 define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
220 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8:
221 ; CHECK: # %bb.0: # %entry
222 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
223 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
226 %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
227 <vscale x 4 x i8> %0,
228 <vscale x 4 x i8> %1,
233 ret <vscale x 4 x i8> %a
236 define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
237 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8:
238 ; CHECK: # %bb.0: # %entry
239 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
240 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
243 %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
244 <vscale x 4 x i8> %0,
245 <vscale x 4 x i8> %1,
247 <vscale x 4 x i1> %2,
250 ret <vscale x 4 x i8> %a
253 declare <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
260 define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, iXLen %3) nounwind {
261 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8:
262 ; CHECK: # %bb.0: # %entry
263 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
264 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
267 %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
268 <vscale x 8 x i8> %0,
269 <vscale x 8 x i8> %1,
274 ret <vscale x 8 x i8> %a
277 declare <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
284 define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
285 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8:
286 ; CHECK: # %bb.0: # %entry
287 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
288 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
291 %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
292 <vscale x 8 x i8> %0,
293 <vscale x 8 x i8> %1,
295 <vscale x 8 x i1> %3,
298 ret <vscale x 8 x i8> %a
301 define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
302 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8:
303 ; CHECK: # %bb.0: # %entry
304 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
305 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
308 %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
309 <vscale x 8 x i8> %0,
310 <vscale x 8 x i8> %1,
315 ret <vscale x 8 x i8> %a
318 define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
319 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8:
320 ; CHECK: # %bb.0: # %entry
321 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
322 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
325 %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
326 <vscale x 8 x i8> %0,
327 <vscale x 8 x i8> %1,
329 <vscale x 8 x i1> %2,
332 ret <vscale x 8 x i8> %a
335 declare <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
342 define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, iXLen %3) nounwind {
343 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8:
344 ; CHECK: # %bb.0: # %entry
345 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
346 ; CHECK-NEXT: vslidedown.vx v8, v10, a0
349 %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
350 <vscale x 16 x i8> %0,
351 <vscale x 16 x i8> %1,
356 ret <vscale x 16 x i8> %a
359 declare <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
366 define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
367 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8:
368 ; CHECK: # %bb.0: # %entry
369 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
370 ; CHECK-NEXT: vslidedown.vx v8, v10, a0, v0.t
373 %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
374 <vscale x 16 x i8> %0,
375 <vscale x 16 x i8> %1,
377 <vscale x 16 x i1> %3,
380 ret <vscale x 16 x i8> %a
383 define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
384 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8:
385 ; CHECK: # %bb.0: # %entry
386 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
387 ; CHECK-NEXT: vslidedown.vi v8, v10, 9
390 %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
391 <vscale x 16 x i8> %0,
392 <vscale x 16 x i8> %1,
397 ret <vscale x 16 x i8> %a
400 define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
401 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8:
402 ; CHECK: # %bb.0: # %entry
403 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
404 ; CHECK-NEXT: vslidedown.vi v8, v10, 9, v0.t
407 %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
408 <vscale x 16 x i8> %0,
409 <vscale x 16 x i8> %1,
411 <vscale x 16 x i1> %2,
414 ret <vscale x 16 x i8> %a
417 declare <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
424 define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, iXLen %3) nounwind {
425 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8:
426 ; CHECK: # %bb.0: # %entry
427 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
428 ; CHECK-NEXT: vslidedown.vx v8, v12, a0
431 %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
432 <vscale x 32 x i8> %0,
433 <vscale x 32 x i8> %1,
438 ret <vscale x 32 x i8> %a
441 declare <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
448 define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
449 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8:
450 ; CHECK: # %bb.0: # %entry
451 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
452 ; CHECK-NEXT: vslidedown.vx v8, v12, a0, v0.t
455 %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
456 <vscale x 32 x i8> %0,
457 <vscale x 32 x i8> %1,
459 <vscale x 32 x i1> %3,
462 ret <vscale x 32 x i8> %a
465 define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
466 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8:
467 ; CHECK: # %bb.0: # %entry
468 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
469 ; CHECK-NEXT: vslidedown.vi v8, v12, 9
472 %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
473 <vscale x 32 x i8> %0,
474 <vscale x 32 x i8> %1,
479 ret <vscale x 32 x i8> %a
482 define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
483 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8:
484 ; CHECK: # %bb.0: # %entry
485 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
486 ; CHECK-NEXT: vslidedown.vi v8, v12, 9, v0.t
489 %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
490 <vscale x 32 x i8> %0,
491 <vscale x 32 x i8> %1,
493 <vscale x 32 x i1> %2,
496 ret <vscale x 32 x i8> %a
499 declare <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
506 define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, iXLen %3) nounwind {
507 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16:
508 ; CHECK: # %bb.0: # %entry
509 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
510 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
513 %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
514 <vscale x 1 x i16> %0,
515 <vscale x 1 x i16> %1,
520 ret <vscale x 1 x i16> %a
523 declare <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
530 define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
531 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16:
532 ; CHECK: # %bb.0: # %entry
533 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
534 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
537 %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
538 <vscale x 1 x i16> %0,
539 <vscale x 1 x i16> %1,
541 <vscale x 1 x i1> %3,
544 ret <vscale x 1 x i16> %a
547 define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
548 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16:
549 ; CHECK: # %bb.0: # %entry
550 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
551 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
554 %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
555 <vscale x 1 x i16> %0,
556 <vscale x 1 x i16> %1,
561 ret <vscale x 1 x i16> %a
564 define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
565 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16:
566 ; CHECK: # %bb.0: # %entry
567 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
568 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
571 %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
572 <vscale x 1 x i16> %0,
573 <vscale x 1 x i16> %1,
575 <vscale x 1 x i1> %2,
578 ret <vscale x 1 x i16> %a
581 declare <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
588 define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, iXLen %3) nounwind {
589 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16:
590 ; CHECK: # %bb.0: # %entry
591 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
592 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
595 %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
596 <vscale x 2 x i16> %0,
597 <vscale x 2 x i16> %1,
602 ret <vscale x 2 x i16> %a
605 declare <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
612 define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
613 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16:
614 ; CHECK: # %bb.0: # %entry
615 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
616 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
619 %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
620 <vscale x 2 x i16> %0,
621 <vscale x 2 x i16> %1,
623 <vscale x 2 x i1> %3,
626 ret <vscale x 2 x i16> %a
629 define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
630 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16:
631 ; CHECK: # %bb.0: # %entry
632 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
633 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
636 %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
637 <vscale x 2 x i16> %0,
638 <vscale x 2 x i16> %1,
643 ret <vscale x 2 x i16> %a
646 define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
647 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16:
648 ; CHECK: # %bb.0: # %entry
649 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
650 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
653 %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
654 <vscale x 2 x i16> %0,
655 <vscale x 2 x i16> %1,
657 <vscale x 2 x i1> %2,
660 ret <vscale x 2 x i16> %a
663 declare <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
670 define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, iXLen %3) nounwind {
671 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16:
672 ; CHECK: # %bb.0: # %entry
673 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
674 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
677 %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
678 <vscale x 4 x i16> %0,
679 <vscale x 4 x i16> %1,
684 ret <vscale x 4 x i16> %a
687 declare <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
694 define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
695 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16:
696 ; CHECK: # %bb.0: # %entry
697 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
698 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
701 %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
702 <vscale x 4 x i16> %0,
703 <vscale x 4 x i16> %1,
705 <vscale x 4 x i1> %3,
708 ret <vscale x 4 x i16> %a
711 define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
712 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16:
713 ; CHECK: # %bb.0: # %entry
714 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
715 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
718 %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
719 <vscale x 4 x i16> %0,
720 <vscale x 4 x i16> %1,
725 ret <vscale x 4 x i16> %a
728 define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
729 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16:
730 ; CHECK: # %bb.0: # %entry
731 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
732 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
735 %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
736 <vscale x 4 x i16> %0,
737 <vscale x 4 x i16> %1,
739 <vscale x 4 x i1> %2,
742 ret <vscale x 4 x i16> %a
745 declare <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
752 define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, iXLen %3) nounwind {
753 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16:
754 ; CHECK: # %bb.0: # %entry
755 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
756 ; CHECK-NEXT: vslidedown.vx v8, v10, a0
759 %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
760 <vscale x 8 x i16> %0,
761 <vscale x 8 x i16> %1,
766 ret <vscale x 8 x i16> %a
769 declare <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
776 define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
777 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16:
778 ; CHECK: # %bb.0: # %entry
779 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
780 ; CHECK-NEXT: vslidedown.vx v8, v10, a0, v0.t
783 %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
784 <vscale x 8 x i16> %0,
785 <vscale x 8 x i16> %1,
787 <vscale x 8 x i1> %3,
790 ret <vscale x 8 x i16> %a
793 define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
794 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16:
795 ; CHECK: # %bb.0: # %entry
796 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
797 ; CHECK-NEXT: vslidedown.vi v8, v10, 9
800 %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
801 <vscale x 8 x i16> %0,
802 <vscale x 8 x i16> %1,
807 ret <vscale x 8 x i16> %a
810 define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
811 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16:
812 ; CHECK: # %bb.0: # %entry
813 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
814 ; CHECK-NEXT: vslidedown.vi v8, v10, 9, v0.t
817 %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
818 <vscale x 8 x i16> %0,
819 <vscale x 8 x i16> %1,
821 <vscale x 8 x i1> %2,
824 ret <vscale x 8 x i16> %a
827 declare <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
834 define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, iXLen %3) nounwind {
835 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16:
836 ; CHECK: # %bb.0: # %entry
837 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
838 ; CHECK-NEXT: vslidedown.vx v8, v12, a0
841 %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
842 <vscale x 16 x i16> %0,
843 <vscale x 16 x i16> %1,
848 ret <vscale x 16 x i16> %a
851 declare <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
858 define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
859 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16:
860 ; CHECK: # %bb.0: # %entry
861 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
862 ; CHECK-NEXT: vslidedown.vx v8, v12, a0, v0.t
865 %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
866 <vscale x 16 x i16> %0,
867 <vscale x 16 x i16> %1,
869 <vscale x 16 x i1> %3,
872 ret <vscale x 16 x i16> %a
875 define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
876 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16:
877 ; CHECK: # %bb.0: # %entry
878 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
879 ; CHECK-NEXT: vslidedown.vi v8, v12, 9
882 %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
883 <vscale x 16 x i16> %0,
884 <vscale x 16 x i16> %1,
889 ret <vscale x 16 x i16> %a
892 define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
893 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16:
894 ; CHECK: # %bb.0: # %entry
895 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
896 ; CHECK-NEXT: vslidedown.vi v8, v12, 9, v0.t
899 %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
900 <vscale x 16 x i16> %0,
901 <vscale x 16 x i16> %1,
903 <vscale x 16 x i1> %2,
906 ret <vscale x 16 x i16> %a
909 declare <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
916 define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, iXLen %3) nounwind {
917 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32:
918 ; CHECK: # %bb.0: # %entry
919 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
920 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
923 %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
924 <vscale x 1 x i32> %0,
925 <vscale x 1 x i32> %1,
930 ret <vscale x 1 x i32> %a
933 declare <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
940 define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
941 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32:
942 ; CHECK: # %bb.0: # %entry
943 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
944 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
947 %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
948 <vscale x 1 x i32> %0,
949 <vscale x 1 x i32> %1,
951 <vscale x 1 x i1> %3,
954 ret <vscale x 1 x i32> %a
957 define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
958 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32:
959 ; CHECK: # %bb.0: # %entry
960 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
961 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
964 %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
965 <vscale x 1 x i32> %0,
966 <vscale x 1 x i32> %1,
971 ret <vscale x 1 x i32> %a
974 define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
975 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32:
976 ; CHECK: # %bb.0: # %entry
977 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
978 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
981 %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
982 <vscale x 1 x i32> %0,
983 <vscale x 1 x i32> %1,
985 <vscale x 1 x i1> %2,
988 ret <vscale x 1 x i32> %a
991 declare <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
998 define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, iXLen %3) nounwind {
999 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32:
1000 ; CHECK: # %bb.0: # %entry
1001 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1002 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
1005 %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
1006 <vscale x 2 x i32> %0,
1007 <vscale x 2 x i32> %1,
1012 ret <vscale x 2 x i32> %a
1015 declare <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
1022 define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1023 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32:
1024 ; CHECK: # %bb.0: # %entry
1025 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
1026 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
1029 %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
1030 <vscale x 2 x i32> %0,
1031 <vscale x 2 x i32> %1,
1033 <vscale x 2 x i1> %3,
1036 ret <vscale x 2 x i32> %a
1039 define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
1040 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32:
1041 ; CHECK: # %bb.0: # %entry
1042 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1043 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
1046 %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
1047 <vscale x 2 x i32> %0,
1048 <vscale x 2 x i32> %1,
1053 ret <vscale x 2 x i32> %a
1056 define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1057 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32:
1058 ; CHECK: # %bb.0: # %entry
1059 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
1060 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
1063 %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
1064 <vscale x 2 x i32> %0,
1065 <vscale x 2 x i32> %1,
1067 <vscale x 2 x i1> %2,
1070 ret <vscale x 2 x i32> %a
1073 declare <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
1080 define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, iXLen %3) nounwind {
1081 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32:
1082 ; CHECK: # %bb.0: # %entry
1083 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1084 ; CHECK-NEXT: vslidedown.vx v8, v10, a0
1087 %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
1088 <vscale x 4 x i32> %0,
1089 <vscale x 4 x i32> %1,
1094 ret <vscale x 4 x i32> %a
1097 declare <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
1104 define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1105 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32:
1106 ; CHECK: # %bb.0: # %entry
1107 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
1108 ; CHECK-NEXT: vslidedown.vx v8, v10, a0, v0.t
1111 %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
1112 <vscale x 4 x i32> %0,
1113 <vscale x 4 x i32> %1,
1115 <vscale x 4 x i1> %3,
1118 ret <vscale x 4 x i32> %a
1121 define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1122 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32:
1123 ; CHECK: # %bb.0: # %entry
1124 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1125 ; CHECK-NEXT: vslidedown.vi v8, v10, 9
1128 %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
1129 <vscale x 4 x i32> %0,
1130 <vscale x 4 x i32> %1,
1135 ret <vscale x 4 x i32> %a
1138 define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1139 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32:
1140 ; CHECK: # %bb.0: # %entry
1141 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
1142 ; CHECK-NEXT: vslidedown.vi v8, v10, 9, v0.t
1145 %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
1146 <vscale x 4 x i32> %0,
1147 <vscale x 4 x i32> %1,
1149 <vscale x 4 x i1> %2,
1152 ret <vscale x 4 x i32> %a
1155 declare <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
1162 define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, iXLen %3) nounwind {
1163 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32:
1164 ; CHECK: # %bb.0: # %entry
1165 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1166 ; CHECK-NEXT: vslidedown.vx v8, v12, a0
1169 %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
1170 <vscale x 8 x i32> %0,
1171 <vscale x 8 x i32> %1,
1176 ret <vscale x 8 x i32> %a
1179 declare <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
1186 define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1187 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32:
1188 ; CHECK: # %bb.0: # %entry
1189 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
1190 ; CHECK-NEXT: vslidedown.vx v8, v12, a0, v0.t
1193 %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
1194 <vscale x 8 x i32> %0,
1195 <vscale x 8 x i32> %1,
1197 <vscale x 8 x i1> %3,
1200 ret <vscale x 8 x i32> %a
1203 define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1204 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32:
1205 ; CHECK: # %bb.0: # %entry
1206 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1207 ; CHECK-NEXT: vslidedown.vi v8, v12, 9
1210 %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
1211 <vscale x 8 x i32> %0,
1212 <vscale x 8 x i32> %1,
1217 ret <vscale x 8 x i32> %a
1220 define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1221 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32:
1222 ; CHECK: # %bb.0: # %entry
1223 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
1224 ; CHECK-NEXT: vslidedown.vi v8, v12, 9, v0.t
1227 %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
1228 <vscale x 8 x i32> %0,
1229 <vscale x 8 x i32> %1,
1231 <vscale x 8 x i1> %2,
1234 ret <vscale x 8 x i32> %a
1237 declare <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
1244 define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, iXLen %3) nounwind {
1245 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64:
1246 ; CHECK: # %bb.0: # %entry
1247 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1248 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
1251 %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
1252 <vscale x 1 x i64> %0,
1253 <vscale x 1 x i64> %1,
1258 ret <vscale x 1 x i64> %a
1261 declare <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
1268 define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1269 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64:
1270 ; CHECK: # %bb.0: # %entry
1271 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
1272 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
1275 %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
1276 <vscale x 1 x i64> %0,
1277 <vscale x 1 x i64> %1,
1279 <vscale x 1 x i1> %3,
1282 ret <vscale x 1 x i64> %a
1285 define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
1286 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64:
1287 ; CHECK: # %bb.0: # %entry
1288 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1289 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
1292 %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
1293 <vscale x 1 x i64> %0,
1294 <vscale x 1 x i64> %1,
1299 ret <vscale x 1 x i64> %a
1302 define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1303 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64:
1304 ; CHECK: # %bb.0: # %entry
1305 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
1306 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
1309 %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
1310 <vscale x 1 x i64> %0,
1311 <vscale x 1 x i64> %1,
1313 <vscale x 1 x i1> %2,
1316 ret <vscale x 1 x i64> %a
1319 declare <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
1326 define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, iXLen %3) nounwind {
1327 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64:
1328 ; CHECK: # %bb.0: # %entry
1329 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1330 ; CHECK-NEXT: vslidedown.vx v8, v10, a0
1333 %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
1334 <vscale x 2 x i64> %0,
1335 <vscale x 2 x i64> %1,
1340 ret <vscale x 2 x i64> %a
1343 declare <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
1350 define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1351 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64:
1352 ; CHECK: # %bb.0: # %entry
1353 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
1354 ; CHECK-NEXT: vslidedown.vx v8, v10, a0, v0.t
1357 %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
1358 <vscale x 2 x i64> %0,
1359 <vscale x 2 x i64> %1,
1361 <vscale x 2 x i1> %3,
1364 ret <vscale x 2 x i64> %a
1367 define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
1368 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64:
1369 ; CHECK: # %bb.0: # %entry
1370 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1371 ; CHECK-NEXT: vslidedown.vi v8, v10, 9
1374 %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
1375 <vscale x 2 x i64> %0,
1376 <vscale x 2 x i64> %1,
1381 ret <vscale x 2 x i64> %a
1384 define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1385 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64:
1386 ; CHECK: # %bb.0: # %entry
1387 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
1388 ; CHECK-NEXT: vslidedown.vi v8, v10, 9, v0.t
1391 %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
1392 <vscale x 2 x i64> %0,
1393 <vscale x 2 x i64> %1,
1395 <vscale x 2 x i1> %2,
1398 ret <vscale x 2 x i64> %a
1401 declare <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
1408 define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, iXLen %3) nounwind {
1409 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64:
1410 ; CHECK: # %bb.0: # %entry
1411 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1412 ; CHECK-NEXT: vslidedown.vx v8, v12, a0
1415 %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
1416 <vscale x 4 x i64> %0,
1417 <vscale x 4 x i64> %1,
1422 ret <vscale x 4 x i64> %a
1425 declare <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
1432 define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1433 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64:
1434 ; CHECK: # %bb.0: # %entry
1435 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
1436 ; CHECK-NEXT: vslidedown.vx v8, v12, a0, v0.t
1439 %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
1440 <vscale x 4 x i64> %0,
1441 <vscale x 4 x i64> %1,
1443 <vscale x 4 x i1> %3,
1446 ret <vscale x 4 x i64> %a
1449 define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
1450 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64:
1451 ; CHECK: # %bb.0: # %entry
1452 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1453 ; CHECK-NEXT: vslidedown.vi v8, v12, 9
1456 %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
1457 <vscale x 4 x i64> %0,
1458 <vscale x 4 x i64> %1,
1463 ret <vscale x 4 x i64> %a
1466 define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1467 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64:
1468 ; CHECK: # %bb.0: # %entry
1469 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
1470 ; CHECK-NEXT: vslidedown.vi v8, v12, 9, v0.t
1473 %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
1474 <vscale x 4 x i64> %0,
1475 <vscale x 4 x i64> %1,
1477 <vscale x 4 x i1> %2,
1480 ret <vscale x 4 x i64> %a
1483 declare <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
1484 <vscale x 1 x half>,
1485 <vscale x 1 x half>,
1490 define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2, iXLen %3) nounwind {
1491 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16:
1492 ; CHECK: # %bb.0: # %entry
1493 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1494 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
1497 %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
1498 <vscale x 1 x half> %0,
1499 <vscale x 1 x half> %1,
1504 ret <vscale x 1 x half> %a
1507 declare <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
1508 <vscale x 1 x half>,
1509 <vscale x 1 x half>,
1514 define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1515 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16:
1516 ; CHECK: # %bb.0: # %entry
1517 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
1518 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
1521 %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
1522 <vscale x 1 x half> %0,
1523 <vscale x 1 x half> %1,
1525 <vscale x 1 x i1> %3,
1528 ret <vscale x 1 x half> %a
1531 define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
1532 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16:
1533 ; CHECK: # %bb.0: # %entry
1534 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1535 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
1538 %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
1539 <vscale x 1 x half> %0,
1540 <vscale x 1 x half> %1,
1545 ret <vscale x 1 x half> %a
1548 define <vscale x 1 x half> @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1549 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16:
1550 ; CHECK: # %bb.0: # %entry
1551 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
1552 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
1555 %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
1556 <vscale x 1 x half> %0,
1557 <vscale x 1 x half> %1,
1559 <vscale x 1 x i1> %2,
1562 ret <vscale x 1 x half> %a
1565 declare <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
1566 <vscale x 2 x half>,
1567 <vscale x 2 x half>,
1572 define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2, iXLen %3) nounwind {
1573 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16:
1574 ; CHECK: # %bb.0: # %entry
1575 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1576 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
1579 %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
1580 <vscale x 2 x half> %0,
1581 <vscale x 2 x half> %1,
1586 ret <vscale x 2 x half> %a
1589 declare <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
1590 <vscale x 2 x half>,
1591 <vscale x 2 x half>,
1596 define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1597 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16:
1598 ; CHECK: # %bb.0: # %entry
1599 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
1600 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
1603 %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
1604 <vscale x 2 x half> %0,
1605 <vscale x 2 x half> %1,
1607 <vscale x 2 x i1> %3,
1610 ret <vscale x 2 x half> %a
1613 define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
1614 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16:
1615 ; CHECK: # %bb.0: # %entry
1616 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1617 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
1620 %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
1621 <vscale x 2 x half> %0,
1622 <vscale x 2 x half> %1,
1627 ret <vscale x 2 x half> %a
1630 define <vscale x 2 x half> @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1631 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16:
1632 ; CHECK: # %bb.0: # %entry
1633 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
1634 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
1637 %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
1638 <vscale x 2 x half> %0,
1639 <vscale x 2 x half> %1,
1641 <vscale x 2 x i1> %2,
1644 ret <vscale x 2 x half> %a
1647 declare <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
1648 <vscale x 4 x half>,
1649 <vscale x 4 x half>,
1654 define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2, iXLen %3) nounwind {
1655 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16:
1656 ; CHECK: # %bb.0: # %entry
1657 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1658 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
1661 %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
1662 <vscale x 4 x half> %0,
1663 <vscale x 4 x half> %1,
1668 ret <vscale x 4 x half> %a
1671 declare <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
1672 <vscale x 4 x half>,
1673 <vscale x 4 x half>,
1678 define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1679 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16:
1680 ; CHECK: # %bb.0: # %entry
1681 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
1682 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
1685 %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
1686 <vscale x 4 x half> %0,
1687 <vscale x 4 x half> %1,
1689 <vscale x 4 x i1> %3,
1692 ret <vscale x 4 x half> %a
1695 define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
1696 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16:
1697 ; CHECK: # %bb.0: # %entry
1698 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1699 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
1702 %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
1703 <vscale x 4 x half> %0,
1704 <vscale x 4 x half> %1,
1709 ret <vscale x 4 x half> %a
1712 define <vscale x 4 x half> @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1713 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16:
1714 ; CHECK: # %bb.0: # %entry
1715 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
1716 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
1719 %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
1720 <vscale x 4 x half> %0,
1721 <vscale x 4 x half> %1,
1723 <vscale x 4 x i1> %2,
1726 ret <vscale x 4 x half> %a
1729 declare <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
1730 <vscale x 8 x half>,
1731 <vscale x 8 x half>,
1736 define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2, iXLen %3) nounwind {
1737 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16:
1738 ; CHECK: # %bb.0: # %entry
1739 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1740 ; CHECK-NEXT: vslidedown.vx v8, v10, a0
1743 %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
1744 <vscale x 8 x half> %0,
1745 <vscale x 8 x half> %1,
1750 ret <vscale x 8 x half> %a
1753 declare <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
1754 <vscale x 8 x half>,
1755 <vscale x 8 x half>,
1760 define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1761 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16:
1762 ; CHECK: # %bb.0: # %entry
1763 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
1764 ; CHECK-NEXT: vslidedown.vx v8, v10, a0, v0.t
1767 %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
1768 <vscale x 8 x half> %0,
1769 <vscale x 8 x half> %1,
1771 <vscale x 8 x i1> %3,
1774 ret <vscale x 8 x half> %a
1777 define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
1778 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16:
1779 ; CHECK: # %bb.0: # %entry
1780 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1781 ; CHECK-NEXT: vslidedown.vi v8, v10, 9
1784 %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
1785 <vscale x 8 x half> %0,
1786 <vscale x 8 x half> %1,
1791 ret <vscale x 8 x half> %a
1794 define <vscale x 8 x half> @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1795 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16:
1796 ; CHECK: # %bb.0: # %entry
1797 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
1798 ; CHECK-NEXT: vslidedown.vi v8, v10, 9, v0.t
1801 %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
1802 <vscale x 8 x half> %0,
1803 <vscale x 8 x half> %1,
1805 <vscale x 8 x i1> %2,
1808 ret <vscale x 8 x half> %a
1811 declare <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
1812 <vscale x 16 x half>,
1813 <vscale x 16 x half>,
1818 define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2, iXLen %3) nounwind {
1819 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16:
1820 ; CHECK: # %bb.0: # %entry
1821 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1822 ; CHECK-NEXT: vslidedown.vx v8, v12, a0
1825 %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
1826 <vscale x 16 x half> %0,
1827 <vscale x 16 x half> %1,
1832 ret <vscale x 16 x half> %a
1835 declare <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
1836 <vscale x 16 x half>,
1837 <vscale x 16 x half>,
1842 define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1843 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16:
1844 ; CHECK: # %bb.0: # %entry
1845 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
1846 ; CHECK-NEXT: vslidedown.vx v8, v12, a0, v0.t
1849 %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
1850 <vscale x 16 x half> %0,
1851 <vscale x 16 x half> %1,
1853 <vscale x 16 x i1> %3,
1856 ret <vscale x 16 x half> %a
1859 define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
1860 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16:
1861 ; CHECK: # %bb.0: # %entry
1862 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1863 ; CHECK-NEXT: vslidedown.vi v8, v12, 9
1866 %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
1867 <vscale x 16 x half> %0,
1868 <vscale x 16 x half> %1,
1873 ret <vscale x 16 x half> %a
1876 define <vscale x 16 x half> @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1877 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16:
1878 ; CHECK: # %bb.0: # %entry
1879 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
1880 ; CHECK-NEXT: vslidedown.vi v8, v12, 9, v0.t
1883 %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
1884 <vscale x 16 x half> %0,
1885 <vscale x 16 x half> %1,
1887 <vscale x 16 x i1> %2,
1890 ret <vscale x 16 x half> %a
1893 declare <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
1894 <vscale x 1 x float>,
1895 <vscale x 1 x float>,
1900 define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2, iXLen %3) nounwind {
1901 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32:
1902 ; CHECK: # %bb.0: # %entry
1903 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1904 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
1907 %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
1908 <vscale x 1 x float> %0,
1909 <vscale x 1 x float> %1,
1914 ret <vscale x 1 x float> %a
1917 declare <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
1918 <vscale x 1 x float>,
1919 <vscale x 1 x float>,
1924 define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1925 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32:
1926 ; CHECK: # %bb.0: # %entry
1927 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
1928 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
1931 %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
1932 <vscale x 1 x float> %0,
1933 <vscale x 1 x float> %1,
1935 <vscale x 1 x i1> %3,
1938 ret <vscale x 1 x float> %a
1941 define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
1942 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32:
1943 ; CHECK: # %bb.0: # %entry
1944 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1945 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
1948 %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
1949 <vscale x 1 x float> %0,
1950 <vscale x 1 x float> %1,
1955 ret <vscale x 1 x float> %a
1958 define <vscale x 1 x float> @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1959 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32:
1960 ; CHECK: # %bb.0: # %entry
1961 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
1962 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
1965 %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
1966 <vscale x 1 x float> %0,
1967 <vscale x 1 x float> %1,
1969 <vscale x 1 x i1> %2,
1972 ret <vscale x 1 x float> %a
1975 declare <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
1976 <vscale x 2 x float>,
1977 <vscale x 2 x float>,
1982 define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2, iXLen %3) nounwind {
1983 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32:
1984 ; CHECK: # %bb.0: # %entry
1985 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1986 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
1989 %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
1990 <vscale x 2 x float> %0,
1991 <vscale x 2 x float> %1,
1996 ret <vscale x 2 x float> %a
1999 declare <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
2000 <vscale x 2 x float>,
2001 <vscale x 2 x float>,
2006 define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2007 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32:
2008 ; CHECK: # %bb.0: # %entry
2009 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
2010 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
2013 %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
2014 <vscale x 2 x float> %0,
2015 <vscale x 2 x float> %1,
2017 <vscale x 2 x i1> %3,
2020 ret <vscale x 2 x float> %a
2023 define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
2024 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32:
2025 ; CHECK: # %bb.0: # %entry
2026 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2027 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
2030 %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
2031 <vscale x 2 x float> %0,
2032 <vscale x 2 x float> %1,
2037 ret <vscale x 2 x float> %a
2040 define <vscale x 2 x float> @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2041 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32:
2042 ; CHECK: # %bb.0: # %entry
2043 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
2044 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
2047 %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
2048 <vscale x 2 x float> %0,
2049 <vscale x 2 x float> %1,
2051 <vscale x 2 x i1> %2,
2054 ret <vscale x 2 x float> %a
2057 declare <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
2058 <vscale x 4 x float>,
2059 <vscale x 4 x float>,
2064 define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2, iXLen %3) nounwind {
2065 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32:
2066 ; CHECK: # %bb.0: # %entry
2067 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2068 ; CHECK-NEXT: vslidedown.vx v8, v10, a0
2071 %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
2072 <vscale x 4 x float> %0,
2073 <vscale x 4 x float> %1,
2078 ret <vscale x 4 x float> %a
2081 declare <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
2082 <vscale x 4 x float>,
2083 <vscale x 4 x float>,
2088 define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2089 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32:
2090 ; CHECK: # %bb.0: # %entry
2091 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
2092 ; CHECK-NEXT: vslidedown.vx v8, v10, a0, v0.t
2095 %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
2096 <vscale x 4 x float> %0,
2097 <vscale x 4 x float> %1,
2099 <vscale x 4 x i1> %3,
2102 ret <vscale x 4 x float> %a
2105 define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
2106 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32:
2107 ; CHECK: # %bb.0: # %entry
2108 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2109 ; CHECK-NEXT: vslidedown.vi v8, v10, 9
2112 %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
2113 <vscale x 4 x float> %0,
2114 <vscale x 4 x float> %1,
2119 ret <vscale x 4 x float> %a
2122 define <vscale x 4 x float> @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2123 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32:
2124 ; CHECK: # %bb.0: # %entry
2125 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
2126 ; CHECK-NEXT: vslidedown.vi v8, v10, 9, v0.t
2129 %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
2130 <vscale x 4 x float> %0,
2131 <vscale x 4 x float> %1,
2133 <vscale x 4 x i1> %2,
2136 ret <vscale x 4 x float> %a
2139 declare <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
2140 <vscale x 8 x float>,
2141 <vscale x 8 x float>,
2146 define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2, iXLen %3) nounwind {
2147 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32:
2148 ; CHECK: # %bb.0: # %entry
2149 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2150 ; CHECK-NEXT: vslidedown.vx v8, v12, a0
2153 %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
2154 <vscale x 8 x float> %0,
2155 <vscale x 8 x float> %1,
2160 ret <vscale x 8 x float> %a
2163 declare <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
2164 <vscale x 8 x float>,
2165 <vscale x 8 x float>,
2170 define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2171 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32:
2172 ; CHECK: # %bb.0: # %entry
2173 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
2174 ; CHECK-NEXT: vslidedown.vx v8, v12, a0, v0.t
2177 %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
2178 <vscale x 8 x float> %0,
2179 <vscale x 8 x float> %1,
2181 <vscale x 8 x i1> %3,
2184 ret <vscale x 8 x float> %a
2187 define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
2188 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32:
2189 ; CHECK: # %bb.0: # %entry
2190 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2191 ; CHECK-NEXT: vslidedown.vi v8, v12, 9
2194 %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
2195 <vscale x 8 x float> %0,
2196 <vscale x 8 x float> %1,
2201 ret <vscale x 8 x float> %a
2204 define <vscale x 8 x float> @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2205 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32:
2206 ; CHECK: # %bb.0: # %entry
2207 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
2208 ; CHECK-NEXT: vslidedown.vi v8, v12, 9, v0.t
2211 %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
2212 <vscale x 8 x float> %0,
2213 <vscale x 8 x float> %1,
2215 <vscale x 8 x i1> %2,
2218 ret <vscale x 8 x float> %a
2221 declare <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
2222 <vscale x 1 x double>,
2223 <vscale x 1 x double>,
2228 define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2, iXLen %3) nounwind {
2229 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64:
2230 ; CHECK: # %bb.0: # %entry
2231 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2232 ; CHECK-NEXT: vslidedown.vx v8, v9, a0
2235 %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
2236 <vscale x 1 x double> %0,
2237 <vscale x 1 x double> %1,
2242 ret <vscale x 1 x double> %a
2245 declare <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
2246 <vscale x 1 x double>,
2247 <vscale x 1 x double>,
2252 define <vscale x 1 x double> @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2253 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64:
2254 ; CHECK: # %bb.0: # %entry
2255 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
2256 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
2259 %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
2260 <vscale x 1 x double> %0,
2261 <vscale x 1 x double> %1,
2263 <vscale x 1 x i1> %3,
2266 ret <vscale x 1 x double> %a
2269 define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
2270 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64:
2271 ; CHECK: # %bb.0: # %entry
2272 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2273 ; CHECK-NEXT: vslidedown.vi v8, v9, 9
2276 %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
2277 <vscale x 1 x double> %0,
2278 <vscale x 1 x double> %1,
2283 ret <vscale x 1 x double> %a
2286 define <vscale x 1 x double> @intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2287 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64:
2288 ; CHECK: # %bb.0: # %entry
2289 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
2290 ; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
2293 %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
2294 <vscale x 1 x double> %0,
2295 <vscale x 1 x double> %1,
2297 <vscale x 1 x i1> %2,
2300 ret <vscale x 1 x double> %a
2303 declare <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
2304 <vscale x 2 x double>,
2305 <vscale x 2 x double>,
2310 define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2, iXLen %3) nounwind {
2311 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64:
2312 ; CHECK: # %bb.0: # %entry
2313 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2314 ; CHECK-NEXT: vslidedown.vx v8, v10, a0
2317 %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
2318 <vscale x 2 x double> %0,
2319 <vscale x 2 x double> %1,
2324 ret <vscale x 2 x double> %a
2327 declare <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
2328 <vscale x 2 x double>,
2329 <vscale x 2 x double>,
2334 define <vscale x 2 x double> @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2335 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64:
2336 ; CHECK: # %bb.0: # %entry
2337 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
2338 ; CHECK-NEXT: vslidedown.vx v8, v10, a0, v0.t
2341 %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
2342 <vscale x 2 x double> %0,
2343 <vscale x 2 x double> %1,
2345 <vscale x 2 x i1> %3,
2348 ret <vscale x 2 x double> %a
2351 define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
2352 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64:
2353 ; CHECK: # %bb.0: # %entry
2354 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2355 ; CHECK-NEXT: vslidedown.vi v8, v10, 9
2358 %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
2359 <vscale x 2 x double> %0,
2360 <vscale x 2 x double> %1,
2365 ret <vscale x 2 x double> %a
2368 define <vscale x 2 x double> @intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2369 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64:
2370 ; CHECK: # %bb.0: # %entry
2371 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
2372 ; CHECK-NEXT: vslidedown.vi v8, v10, 9, v0.t
2375 %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
2376 <vscale x 2 x double> %0,
2377 <vscale x 2 x double> %1,
2379 <vscale x 2 x i1> %2,
2382 ret <vscale x 2 x double> %a
2385 declare <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
2386 <vscale x 4 x double>,
2387 <vscale x 4 x double>,
2392 define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2, iXLen %3) nounwind {
2393 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64:
2394 ; CHECK: # %bb.0: # %entry
2395 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2396 ; CHECK-NEXT: vslidedown.vx v8, v12, a0
2399 %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
2400 <vscale x 4 x double> %0,
2401 <vscale x 4 x double> %1,
2406 ret <vscale x 4 x double> %a
2409 declare <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
2410 <vscale x 4 x double>,
2411 <vscale x 4 x double>,
2416 define <vscale x 4 x double> @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2417 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64:
2418 ; CHECK: # %bb.0: # %entry
2419 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
2420 ; CHECK-NEXT: vslidedown.vx v8, v12, a0, v0.t
2423 %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
2424 <vscale x 4 x double> %0,
2425 <vscale x 4 x double> %1,
2427 <vscale x 4 x i1> %3,
2430 ret <vscale x 4 x double> %a
2433 define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
2434 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64:
2435 ; CHECK: # %bb.0: # %entry
2436 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2437 ; CHECK-NEXT: vslidedown.vi v8, v12, 9
2440 %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
2441 <vscale x 4 x double> %0,
2442 <vscale x 4 x double> %1,
2447 ret <vscale x 4 x double> %a
2450 define <vscale x 4 x double> @intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2451 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64:
2452 ; CHECK: # %bb.0: # %entry
2453 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
2454 ; CHECK-NEXT: vslidedown.vi v8, v12, 9, v0.t
2457 %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
2458 <vscale x 4 x double> %0,
2459 <vscale x 4 x double> %1,
2461 <vscale x 4 x i1> %2,
2464 ret <vscale x 4 x double> %a