1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
3 ; RUN: < %s | FileCheck %s
5 declare <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
13 define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
14 ; CHECK-LABEL: intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, ma
17 ; CHECK-NEXT: slli a3, a3, 1
18 ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma
19 ; CHECK-NEXT: vslide1down.vx v9, v9, a0
20 ; CHECK-NEXT: vslide1down.vx v9, v9, a1
21 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, ma
22 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
25 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
26 <vscale x 1 x i64> %0,
27 <vscale x 1 x i64> %1,
32 ret <vscale x 1 x i64> %a
35 define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
36 ; CHECK-LABEL: intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64:
37 ; CHECK: # %bb.0: # %entry
38 ; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, ma
39 ; CHECK-NEXT: slli a3, a3, 1
40 ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma
41 ; CHECK-NEXT: vslide1down.vx v9, v9, a0
42 ; CHECK-NEXT: vslide1down.vx v9, v9, a1
43 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
44 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
47 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
48 <vscale x 1 x i64> %0,
49 <vscale x 1 x i64> %1,
54 ret <vscale x 1 x i64> %a
58 ; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic.
59 define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
60 ; CHECK-LABEL: intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, ma
63 ; CHECK-NEXT: slli a3, a3, 1
64 ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma
65 ; CHECK-NEXT: vslide1down.vx v9, v9, a0
66 ; CHECK-NEXT: vslide1down.vx v9, v9, a1
67 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, ma
68 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
71 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
72 <vscale x 1 x i64> %0,
73 <vscale x 1 x i64> %1,
78 ret <vscale x 1 x i64> %a
81 ; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic.
82 define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
83 ; CHECK-LABEL: intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, ma
86 ; CHECK-NEXT: slli a2, a2, 1
87 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
88 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
89 ; CHECK-NEXT: vslide1down.vx v8, v8, a1
92 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
93 <vscale x 1 x i64> undef,
94 <vscale x 1 x i64> %0,
99 ret <vscale x 1 x i64> %a
102 define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
103 ; CHECK-LABEL: intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64:
104 ; CHECK: # %bb.0: # %entry
105 ; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, ma
106 ; CHECK-NEXT: slli a2, a2, 1
107 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
108 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
109 ; CHECK-NEXT: vslide1down.vx v8, v8, a1
112 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
113 <vscale x 1 x i64> undef,
114 <vscale x 1 x i64> %0,
116 <vscale x 1 x i1> undef,
119 ret <vscale x 1 x i64> %a