1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
13 define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
17 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
20 %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
21 <vscale x 1 x half> undef,
22 <vscale x 1 x half> %0,
26 ret <vscale x 1 x half> %a
29 declare <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
37 define <vscale x 1 x half> @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
41 ; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t
44 %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
45 <vscale x 1 x half> %0,
46 <vscale x 1 x half> %1,
51 ret <vscale x 1 x half> %a
54 declare <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
60 define <vscale x 2 x half> @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
61 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16:
62 ; CHECK: # %bb.0: # %entry
63 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
64 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
67 %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
68 <vscale x 2 x half> undef,
69 <vscale x 2 x half> %0,
73 ret <vscale x 2 x half> %a
76 declare <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
84 define <vscale x 2 x half> @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16:
86 ; CHECK: # %bb.0: # %entry
87 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
88 ; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t
91 %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
92 <vscale x 2 x half> %0,
93 <vscale x 2 x half> %1,
98 ret <vscale x 2 x half> %a
101 declare <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
107 define <vscale x 4 x half> @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
108 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16:
109 ; CHECK: # %bb.0: # %entry
110 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
111 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
114 %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
115 <vscale x 4 x half> undef,
116 <vscale x 4 x half> %0,
120 ret <vscale x 4 x half> %a
123 declare <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
131 define <vscale x 4 x half> @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16:
133 ; CHECK: # %bb.0: # %entry
134 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
135 ; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t
138 %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
139 <vscale x 4 x half> %0,
140 <vscale x 4 x half> %1,
142 <vscale x 4 x i1> %3,
145 ret <vscale x 4 x half> %a
148 declare <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
154 define <vscale x 8 x half> @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
155 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16:
156 ; CHECK: # %bb.0: # %entry
157 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
158 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
161 %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
162 <vscale x 8 x half> undef,
163 <vscale x 8 x half> %0,
167 ret <vscale x 8 x half> %a
170 declare <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
178 define <vscale x 8 x half> @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
182 ; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t
185 %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
186 <vscale x 8 x half> %0,
187 <vscale x 8 x half> %1,
189 <vscale x 8 x i1> %3,
192 ret <vscale x 8 x half> %a
195 declare <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
196 <vscale x 16 x half>,
197 <vscale x 16 x half>,
201 define <vscale x 16 x half> @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
202 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16:
203 ; CHECK: # %bb.0: # %entry
204 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
205 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
208 %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
209 <vscale x 16 x half> undef,
210 <vscale x 16 x half> %0,
214 ret <vscale x 16 x half> %a
217 declare <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
218 <vscale x 16 x half>,
219 <vscale x 16 x half>,
225 define <vscale x 16 x half> @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
226 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16:
227 ; CHECK: # %bb.0: # %entry
228 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
229 ; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t
232 %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
233 <vscale x 16 x half> %0,
234 <vscale x 16 x half> %1,
236 <vscale x 16 x i1> %3,
239 ret <vscale x 16 x half> %a
242 declare <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
243 <vscale x 32 x half>,
244 <vscale x 32 x half>,
248 define <vscale x 32 x half> @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
249 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16:
250 ; CHECK: # %bb.0: # %entry
251 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
252 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
255 %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
256 <vscale x 32 x half> undef,
257 <vscale x 32 x half> %0,
261 ret <vscale x 32 x half> %a
264 declare <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
265 <vscale x 32 x half>,
266 <vscale x 32 x half>,
272 define <vscale x 32 x half> @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16:
274 ; CHECK: # %bb.0: # %entry
275 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
276 ; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t
279 %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
280 <vscale x 32 x half> %0,
281 <vscale x 32 x half> %1,
283 <vscale x 32 x i1> %3,
286 ret <vscale x 32 x half> %a
289 declare <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
290 <vscale x 1 x float>,
291 <vscale x 1 x float>,
295 define <vscale x 1 x float> @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
296 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32:
297 ; CHECK: # %bb.0: # %entry
298 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
299 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
302 %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
303 <vscale x 1 x float> undef,
304 <vscale x 1 x float> %0,
308 ret <vscale x 1 x float> %a
311 declare <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
312 <vscale x 1 x float>,
313 <vscale x 1 x float>,
319 define <vscale x 1 x float> @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
320 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32:
321 ; CHECK: # %bb.0: # %entry
322 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
323 ; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t
326 %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
327 <vscale x 1 x float> %0,
328 <vscale x 1 x float> %1,
330 <vscale x 1 x i1> %3,
333 ret <vscale x 1 x float> %a
336 declare <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
337 <vscale x 2 x float>,
338 <vscale x 2 x float>,
342 define <vscale x 2 x float> @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
343 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32:
344 ; CHECK: # %bb.0: # %entry
345 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
346 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
349 %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
350 <vscale x 2 x float> undef,
351 <vscale x 2 x float> %0,
355 ret <vscale x 2 x float> %a
358 declare <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
359 <vscale x 2 x float>,
360 <vscale x 2 x float>,
366 define <vscale x 2 x float> @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
367 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32:
368 ; CHECK: # %bb.0: # %entry
369 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
370 ; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t
373 %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
374 <vscale x 2 x float> %0,
375 <vscale x 2 x float> %1,
377 <vscale x 2 x i1> %3,
380 ret <vscale x 2 x float> %a
383 declare <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
384 <vscale x 4 x float>,
385 <vscale x 4 x float>,
389 define <vscale x 4 x float> @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
390 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32:
391 ; CHECK: # %bb.0: # %entry
392 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
393 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
396 %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
397 <vscale x 4 x float> undef,
398 <vscale x 4 x float> %0,
402 ret <vscale x 4 x float> %a
405 declare <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
406 <vscale x 4 x float>,
407 <vscale x 4 x float>,
413 define <vscale x 4 x float> @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
414 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32:
415 ; CHECK: # %bb.0: # %entry
416 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
417 ; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t
420 %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
421 <vscale x 4 x float> %0,
422 <vscale x 4 x float> %1,
424 <vscale x 4 x i1> %3,
427 ret <vscale x 4 x float> %a
430 declare <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
431 <vscale x 8 x float>,
432 <vscale x 8 x float>,
436 define <vscale x 8 x float> @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
437 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32:
438 ; CHECK: # %bb.0: # %entry
439 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
440 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
443 %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
444 <vscale x 8 x float> undef,
445 <vscale x 8 x float> %0,
449 ret <vscale x 8 x float> %a
452 declare <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
453 <vscale x 8 x float>,
454 <vscale x 8 x float>,
460 define <vscale x 8 x float> @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
461 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32:
462 ; CHECK: # %bb.0: # %entry
463 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
464 ; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t
467 %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
468 <vscale x 8 x float> %0,
469 <vscale x 8 x float> %1,
471 <vscale x 8 x i1> %3,
474 ret <vscale x 8 x float> %a
477 declare <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
478 <vscale x 16 x float>,
479 <vscale x 16 x float>,
483 define <vscale x 16 x float> @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
484 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32:
485 ; CHECK: # %bb.0: # %entry
486 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
487 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
490 %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
491 <vscale x 16 x float> undef,
492 <vscale x 16 x float> %0,
496 ret <vscale x 16 x float> %a
499 declare <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
500 <vscale x 16 x float>,
501 <vscale x 16 x float>,
507 define <vscale x 16 x float> @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
508 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32:
509 ; CHECK: # %bb.0: # %entry
510 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
511 ; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t
514 %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
515 <vscale x 16 x float> %0,
516 <vscale x 16 x float> %1,
518 <vscale x 16 x i1> %3,
521 ret <vscale x 16 x float> %a
524 declare <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
525 <vscale x 1 x double>,
526 <vscale x 1 x double>,
530 define <vscale x 1 x double> @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
531 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64:
532 ; CHECK: # %bb.0: # %entry
533 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
534 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
537 %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
538 <vscale x 1 x double> undef,
539 <vscale x 1 x double> %0,
543 ret <vscale x 1 x double> %a
546 declare <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
547 <vscale x 1 x double>,
548 <vscale x 1 x double>,
554 define <vscale x 1 x double> @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
555 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64:
556 ; CHECK: # %bb.0: # %entry
557 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
558 ; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t
561 %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
562 <vscale x 1 x double> %0,
563 <vscale x 1 x double> %1,
565 <vscale x 1 x i1> %3,
568 ret <vscale x 1 x double> %a
571 declare <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
572 <vscale x 2 x double>,
573 <vscale x 2 x double>,
577 define <vscale x 2 x double> @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
578 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64:
579 ; CHECK: # %bb.0: # %entry
580 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
581 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
584 %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
585 <vscale x 2 x double> undef,
586 <vscale x 2 x double> %0,
590 ret <vscale x 2 x double> %a
593 declare <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
594 <vscale x 2 x double>,
595 <vscale x 2 x double>,
601 define <vscale x 2 x double> @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
602 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64:
603 ; CHECK: # %bb.0: # %entry
604 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
605 ; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t
608 %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
609 <vscale x 2 x double> %0,
610 <vscale x 2 x double> %1,
612 <vscale x 2 x i1> %3,
615 ret <vscale x 2 x double> %a
618 declare <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
619 <vscale x 4 x double>,
620 <vscale x 4 x double>,
624 define <vscale x 4 x double> @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
625 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64:
626 ; CHECK: # %bb.0: # %entry
627 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
628 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
631 %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
632 <vscale x 4 x double> undef,
633 <vscale x 4 x double> %0,
637 ret <vscale x 4 x double> %a
640 declare <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
641 <vscale x 4 x double>,
642 <vscale x 4 x double>,
648 define <vscale x 4 x double> @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
649 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64:
650 ; CHECK: # %bb.0: # %entry
651 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
652 ; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t
655 %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
656 <vscale x 4 x double> %0,
657 <vscale x 4 x double> %1,
659 <vscale x 4 x i1> %3,
662 ret <vscale x 4 x double> %a
665 declare <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
666 <vscale x 8 x double>,
667 <vscale x 8 x double>,
671 define <vscale x 8 x double> @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
672 ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64:
673 ; CHECK: # %bb.0: # %entry
674 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
675 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
678 %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
679 <vscale x 8 x double> undef,
680 <vscale x 8 x double> %0,
684 ret <vscale x 8 x double> %a
687 declare <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
688 <vscale x 8 x double>,
689 <vscale x 8 x double>,
695 define <vscale x 8 x double> @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
696 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64:
697 ; CHECK: # %bb.0: # %entry
698 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
699 ; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t
702 %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
703 <vscale x 8 x double> %0,
704 <vscale x 8 x double> %1,
706 <vscale x 8 x i1> %3,
709 ret <vscale x 8 x double> %a