1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
13 define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
17 ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
18 ; CHECK-NEXT: vmv1r.v v8, v9
21 %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
22 <vscale x 1 x half> undef,
23 <vscale x 1 x half> %0,
27 ret <vscale x 1 x half> %a
30 declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
38 define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
40 ; CHECK: # %bb.0: # %entry
41 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
42 ; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
45 %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
46 <vscale x 1 x half> %0,
47 <vscale x 1 x half> %1,
52 ret <vscale x 1 x half> %a
55 declare <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
61 define <vscale x 2 x half> @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
62 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
65 ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
66 ; CHECK-NEXT: vmv1r.v v8, v9
69 %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
70 <vscale x 2 x half> undef,
71 <vscale x 2 x half> %0,
75 ret <vscale x 2 x half> %a
78 declare <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
86 define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
87 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16:
88 ; CHECK: # %bb.0: # %entry
89 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
90 ; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
93 %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
94 <vscale x 2 x half> %0,
95 <vscale x 2 x half> %1,
100 ret <vscale x 2 x half> %a
103 declare <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
109 define <vscale x 4 x half> @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
110 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16:
111 ; CHECK: # %bb.0: # %entry
112 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
113 ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
114 ; CHECK-NEXT: vmv.v.v v8, v9
117 %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
118 <vscale x 4 x half> undef,
119 <vscale x 4 x half> %0,
123 ret <vscale x 4 x half> %a
126 declare <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
134 define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
135 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16:
136 ; CHECK: # %bb.0: # %entry
137 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
138 ; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
141 %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
142 <vscale x 4 x half> %0,
143 <vscale x 4 x half> %1,
145 <vscale x 4 x i1> %3,
148 ret <vscale x 4 x half> %a
151 declare <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
157 define <vscale x 8 x half> @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
158 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16:
159 ; CHECK: # %bb.0: # %entry
160 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
161 ; CHECK-NEXT: vfslide1up.vf v10, v8, fa0
162 ; CHECK-NEXT: vmv.v.v v8, v10
165 %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
166 <vscale x 8 x half> undef,
167 <vscale x 8 x half> %0,
171 ret <vscale x 8 x half> %a
174 declare <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
182 define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
183 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16:
184 ; CHECK: # %bb.0: # %entry
185 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
186 ; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t
189 %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
190 <vscale x 8 x half> %0,
191 <vscale x 8 x half> %1,
193 <vscale x 8 x i1> %3,
196 ret <vscale x 8 x half> %a
199 declare <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
200 <vscale x 16 x half>,
201 <vscale x 16 x half>,
205 define <vscale x 16 x half> @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
206 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16:
207 ; CHECK: # %bb.0: # %entry
208 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
209 ; CHECK-NEXT: vfslide1up.vf v12, v8, fa0
210 ; CHECK-NEXT: vmv.v.v v8, v12
213 %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
214 <vscale x 16 x half> undef,
215 <vscale x 16 x half> %0,
219 ret <vscale x 16 x half> %a
222 declare <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
223 <vscale x 16 x half>,
224 <vscale x 16 x half>,
230 define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
231 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
234 ; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t
237 %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
238 <vscale x 16 x half> %0,
239 <vscale x 16 x half> %1,
241 <vscale x 16 x i1> %3,
244 ret <vscale x 16 x half> %a
247 declare <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
248 <vscale x 32 x half>,
249 <vscale x 32 x half>,
253 define <vscale x 32 x half> @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
254 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16:
255 ; CHECK: # %bb.0: # %entry
256 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
257 ; CHECK-NEXT: vfslide1up.vf v16, v8, fa0
258 ; CHECK-NEXT: vmv.v.v v8, v16
261 %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
262 <vscale x 32 x half> undef,
263 <vscale x 32 x half> %0,
267 ret <vscale x 32 x half> %a
270 declare <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
271 <vscale x 32 x half>,
272 <vscale x 32 x half>,
278 define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
279 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16:
280 ; CHECK: # %bb.0: # %entry
281 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
282 ; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t
285 %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
286 <vscale x 32 x half> %0,
287 <vscale x 32 x half> %1,
289 <vscale x 32 x i1> %3,
292 ret <vscale x 32 x half> %a
295 declare <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
296 <vscale x 1 x float>,
297 <vscale x 1 x float>,
301 define <vscale x 1 x float> @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
302 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32:
303 ; CHECK: # %bb.0: # %entry
304 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
305 ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
306 ; CHECK-NEXT: vmv1r.v v8, v9
309 %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
310 <vscale x 1 x float> undef,
311 <vscale x 1 x float> %0,
315 ret <vscale x 1 x float> %a
318 declare <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
319 <vscale x 1 x float>,
320 <vscale x 1 x float>,
326 define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
327 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32:
328 ; CHECK: # %bb.0: # %entry
329 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
330 ; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
333 %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
334 <vscale x 1 x float> %0,
335 <vscale x 1 x float> %1,
337 <vscale x 1 x i1> %3,
340 ret <vscale x 1 x float> %a
343 declare <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
344 <vscale x 2 x float>,
345 <vscale x 2 x float>,
349 define <vscale x 2 x float> @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
350 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32:
351 ; CHECK: # %bb.0: # %entry
352 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
353 ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
354 ; CHECK-NEXT: vmv.v.v v8, v9
357 %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
358 <vscale x 2 x float> undef,
359 <vscale x 2 x float> %0,
363 ret <vscale x 2 x float> %a
366 declare <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
367 <vscale x 2 x float>,
368 <vscale x 2 x float>,
374 define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
375 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32:
376 ; CHECK: # %bb.0: # %entry
377 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
378 ; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
381 %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
382 <vscale x 2 x float> %0,
383 <vscale x 2 x float> %1,
385 <vscale x 2 x i1> %3,
388 ret <vscale x 2 x float> %a
391 declare <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
392 <vscale x 4 x float>,
393 <vscale x 4 x float>,
397 define <vscale x 4 x float> @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
398 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32:
399 ; CHECK: # %bb.0: # %entry
400 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
401 ; CHECK-NEXT: vfslide1up.vf v10, v8, fa0
402 ; CHECK-NEXT: vmv.v.v v8, v10
405 %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
406 <vscale x 4 x float> undef,
407 <vscale x 4 x float> %0,
411 ret <vscale x 4 x float> %a
414 declare <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
415 <vscale x 4 x float>,
416 <vscale x 4 x float>,
422 define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
423 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32:
424 ; CHECK: # %bb.0: # %entry
425 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
426 ; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t
429 %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
430 <vscale x 4 x float> %0,
431 <vscale x 4 x float> %1,
433 <vscale x 4 x i1> %3,
436 ret <vscale x 4 x float> %a
439 declare <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
440 <vscale x 8 x float>,
441 <vscale x 8 x float>,
445 define <vscale x 8 x float> @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
446 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32:
447 ; CHECK: # %bb.0: # %entry
448 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
449 ; CHECK-NEXT: vfslide1up.vf v12, v8, fa0
450 ; CHECK-NEXT: vmv.v.v v8, v12
453 %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
454 <vscale x 8 x float> undef,
455 <vscale x 8 x float> %0,
459 ret <vscale x 8 x float> %a
462 declare <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
463 <vscale x 8 x float>,
464 <vscale x 8 x float>,
470 define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
471 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32:
472 ; CHECK: # %bb.0: # %entry
473 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
474 ; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t
477 %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
478 <vscale x 8 x float> %0,
479 <vscale x 8 x float> %1,
481 <vscale x 8 x i1> %3,
484 ret <vscale x 8 x float> %a
487 declare <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
488 <vscale x 16 x float>,
489 <vscale x 16 x float>,
493 define <vscale x 16 x float> @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
494 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32:
495 ; CHECK: # %bb.0: # %entry
496 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
497 ; CHECK-NEXT: vfslide1up.vf v16, v8, fa0
498 ; CHECK-NEXT: vmv.v.v v8, v16
501 %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
502 <vscale x 16 x float> undef,
503 <vscale x 16 x float> %0,
507 ret <vscale x 16 x float> %a
510 declare <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
511 <vscale x 16 x float>,
512 <vscale x 16 x float>,
518 define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
519 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32:
520 ; CHECK: # %bb.0: # %entry
521 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
522 ; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t
525 %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
526 <vscale x 16 x float> %0,
527 <vscale x 16 x float> %1,
529 <vscale x 16 x i1> %3,
532 ret <vscale x 16 x float> %a
535 declare <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
536 <vscale x 1 x double>,
537 <vscale x 1 x double>,
541 define <vscale x 1 x double> @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
542 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64:
543 ; CHECK: # %bb.0: # %entry
544 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
545 ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
546 ; CHECK-NEXT: vmv.v.v v8, v9
549 %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
550 <vscale x 1 x double> undef,
551 <vscale x 1 x double> %0,
555 ret <vscale x 1 x double> %a
558 declare <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
559 <vscale x 1 x double>,
560 <vscale x 1 x double>,
566 define <vscale x 1 x double> @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
567 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64:
568 ; CHECK: # %bb.0: # %entry
569 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
570 ; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
573 %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
574 <vscale x 1 x double> %0,
575 <vscale x 1 x double> %1,
577 <vscale x 1 x i1> %3,
580 ret <vscale x 1 x double> %a
583 declare <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
584 <vscale x 2 x double>,
585 <vscale x 2 x double>,
589 define <vscale x 2 x double> @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
590 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64:
591 ; CHECK: # %bb.0: # %entry
592 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
593 ; CHECK-NEXT: vfslide1up.vf v10, v8, fa0
594 ; CHECK-NEXT: vmv.v.v v8, v10
597 %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
598 <vscale x 2 x double> undef,
599 <vscale x 2 x double> %0,
603 ret <vscale x 2 x double> %a
606 declare <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
607 <vscale x 2 x double>,
608 <vscale x 2 x double>,
614 define <vscale x 2 x double> @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
615 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64:
616 ; CHECK: # %bb.0: # %entry
617 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
618 ; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t
621 %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
622 <vscale x 2 x double> %0,
623 <vscale x 2 x double> %1,
625 <vscale x 2 x i1> %3,
628 ret <vscale x 2 x double> %a
631 declare <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
632 <vscale x 4 x double>,
633 <vscale x 4 x double>,
637 define <vscale x 4 x double> @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
638 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64:
639 ; CHECK: # %bb.0: # %entry
640 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
641 ; CHECK-NEXT: vfslide1up.vf v12, v8, fa0
642 ; CHECK-NEXT: vmv.v.v v8, v12
645 %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
646 <vscale x 4 x double> undef,
647 <vscale x 4 x double> %0,
651 ret <vscale x 4 x double> %a
654 declare <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
655 <vscale x 4 x double>,
656 <vscale x 4 x double>,
662 define <vscale x 4 x double> @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
663 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64:
664 ; CHECK: # %bb.0: # %entry
665 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
666 ; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t
669 %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
670 <vscale x 4 x double> %0,
671 <vscale x 4 x double> %1,
673 <vscale x 4 x i1> %3,
676 ret <vscale x 4 x double> %a
679 declare <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
680 <vscale x 8 x double>,
681 <vscale x 8 x double>,
685 define <vscale x 8 x double> @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
686 ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64:
687 ; CHECK: # %bb.0: # %entry
688 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
689 ; CHECK-NEXT: vfslide1up.vf v16, v8, fa0
690 ; CHECK-NEXT: vmv.v.v v8, v16
693 %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
694 <vscale x 8 x double> undef,
695 <vscale x 8 x double> %0,
699 ret <vscale x 8 x double> %a
702 declare <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
703 <vscale x 8 x double>,
704 <vscale x 8 x double>,
710 define <vscale x 8 x double> @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
711 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64:
712 ; CHECK: # %bb.0: # %entry
713 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
714 ; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t
717 %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
718 <vscale x 8 x double> %0,
719 <vscale x 8 x double> %1,
721 <vscale x 8 x i1> %3,
724 ret <vscale x 8 x double> %a