1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfh,+zfbfmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32
3 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfh,+zfbfmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64
4 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32
5 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64
7 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
9 define <2 x i8> @vslide1down_2xi8(<2 x i8> %v, i8 %b) {
10 ; CHECK-LABEL: vslide1down_2xi8:
12 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
13 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
15 %vb = insertelement <2 x i8> poison, i8 %b, i64 0
16 %v1 = shufflevector <2 x i8> %v, <2 x i8> %vb, <2 x i32> <i32 1, i32 2>
20 define <4 x i8> @vslide1down_4xi8(<4 x i8> %v, i8 %b) {
21 ; CHECK-LABEL: vslide1down_4xi8:
23 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
24 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
26 %vb = insertelement <4 x i8> poison, i8 %b, i64 0
27 %v1 = shufflevector <4 x i8> %v, <4 x i8> %vb, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
31 define <4 x i8> @vslide1down_4xi8_swapped(<4 x i8> %v, i8 %b) {
32 ; CHECK-LABEL: vslide1down_4xi8_swapped:
34 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
35 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
37 %vb = insertelement <4 x i8> poison, i8 %b, i64 0
38 %v1 = shufflevector <4 x i8> %vb, <4 x i8> %v, <4 x i32> <i32 5, i32 6, i32 7, i32 0>
42 define <2 x i16> @vslide1down_2xi16(<2 x i16> %v, i16 %b) {
43 ; CHECK-LABEL: vslide1down_2xi16:
45 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
46 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
48 %vb = insertelement <2 x i16> poison, i16 %b, i64 0
49 %v1 = shufflevector <2 x i16> %v, <2 x i16> %vb, <2 x i32> <i32 1, i32 2>
53 define <4 x i16> @vslide1down_4xi16(<4 x i16> %v, i16 %b) {
54 ; CHECK-LABEL: vslide1down_4xi16:
56 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
57 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
59 %vb = insertelement <4 x i16> poison, i16 %b, i64 0
60 %v1 = shufflevector <4 x i16> %v, <4 x i16> %vb, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
64 define <2 x i32> @vslide1down_2xi32(<2 x i32> %v, i32 %b) {
65 ; CHECK-LABEL: vslide1down_2xi32:
67 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
68 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
70 %vb = insertelement <2 x i32> poison, i32 %b, i64 0
71 %v1 = shufflevector <2 x i32> %v, <2 x i32> %vb, <2 x i32> <i32 1, i32 2>
75 define <4 x i32> @vslide1down_4xi32(<4 x i32> %v, i32 %b) {
76 ; CHECK-LABEL: vslide1down_4xi32:
78 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
79 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
81 %vb = insertelement <4 x i32> poison, i32 %b, i64 0
82 %v1 = shufflevector <4 x i32> %v, <4 x i32> %vb, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
86 define <2 x i64> @vslide1down_2xi64(<2 x i64> %v, i64 %b) {
87 ; RV32-LABEL: vslide1down_2xi64:
89 ; RV32-NEXT: addi sp, sp, -16
90 ; RV32-NEXT: .cfi_def_cfa_offset 16
91 ; RV32-NEXT: sw a0, 8(sp)
92 ; RV32-NEXT: sw a1, 12(sp)
93 ; RV32-NEXT: addi a0, sp, 8
94 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
95 ; RV32-NEXT: vlse64.v v9, (a0), zero
96 ; RV32-NEXT: vslidedown.vi v8, v8, 1
97 ; RV32-NEXT: vslideup.vi v8, v9, 1
98 ; RV32-NEXT: addi sp, sp, 16
99 ; RV32-NEXT: .cfi_def_cfa_offset 0
102 ; RV64-LABEL: vslide1down_2xi64:
104 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
105 ; RV64-NEXT: vslide1down.vx v8, v8, a0
107 %vb = insertelement <2 x i64> poison, i64 %b, i64 0
108 %v1 = shufflevector <2 x i64> %v, <2 x i64> %vb, <2 x i32> <i32 1, i32 2>
112 define <4 x i64> @vslide1down_4xi64(<4 x i64> %v, i64 %b) {
113 ; RV32-LABEL: vslide1down_4xi64:
115 ; RV32-NEXT: addi sp, sp, -16
116 ; RV32-NEXT: .cfi_def_cfa_offset 16
117 ; RV32-NEXT: sw a0, 8(sp)
118 ; RV32-NEXT: sw a1, 12(sp)
119 ; RV32-NEXT: addi a0, sp, 8
120 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
121 ; RV32-NEXT: vlse64.v v10, (a0), zero
122 ; RV32-NEXT: vslidedown.vi v8, v8, 1
123 ; RV32-NEXT: vslideup.vi v8, v10, 3
124 ; RV32-NEXT: addi sp, sp, 16
125 ; RV32-NEXT: .cfi_def_cfa_offset 0
128 ; RV64-LABEL: vslide1down_4xi64:
130 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
131 ; RV64-NEXT: vslide1down.vx v8, v8, a0
133 %vb = insertelement <4 x i64> poison, i64 %b, i64 0
134 %v1 = shufflevector <4 x i64> %v, <4 x i64> %vb, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
138 define <2 x bfloat> @vslide1down_2xbf16(<2 x bfloat> %v, bfloat %b) {
139 ; CHECK-LABEL: vslide1down_2xbf16:
141 ; CHECK-NEXT: fmv.x.h a0, fa0
142 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
143 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
145 %vb = insertelement <2 x bfloat> poison, bfloat %b, i64 0
146 %v1 = shufflevector <2 x bfloat> %v, <2 x bfloat> %vb, <2 x i32> <i32 1, i32 2>
150 define <4 x bfloat> @vslide1down_4xbf16(<4 x bfloat> %v, bfloat %b) {
151 ; CHECK-LABEL: vslide1down_4xbf16:
153 ; CHECK-NEXT: fmv.x.h a0, fa0
154 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
155 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
157 %vb = insertelement <4 x bfloat> poison, bfloat %b, i64 0
158 %v1 = shufflevector <4 x bfloat> %v, <4 x bfloat> %vb, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
162 define <2 x half> @vslide1down_2xf16(<2 x half> %v, half %b) {
163 ; ZVFH-LABEL: vslide1down_2xf16:
165 ; ZVFH-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
166 ; ZVFH-NEXT: vfslide1down.vf v8, v8, fa0
169 ; ZVFHMIN-LABEL: vslide1down_2xf16:
171 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
172 ; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
173 ; ZVFHMIN-NEXT: vslide1down.vx v8, v8, a0
175 %vb = insertelement <2 x half> poison, half %b, i64 0
176 %v1 = shufflevector <2 x half> %v, <2 x half> %vb, <2 x i32> <i32 1, i32 2>
180 define <4 x half> @vslide1down_4xf16(<4 x half> %v, half %b) {
181 ; ZVFH-LABEL: vslide1down_4xf16:
183 ; ZVFH-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
184 ; ZVFH-NEXT: vfslide1down.vf v8, v8, fa0
187 ; ZVFHMIN-LABEL: vslide1down_4xf16:
189 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
190 ; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
191 ; ZVFHMIN-NEXT: vslide1down.vx v8, v8, a0
193 %vb = insertelement <4 x half> poison, half %b, i64 0
194 %v1 = shufflevector <4 x half> %v, <4 x half> %vb, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
198 define <2 x float> @vslide1down_2xf32(<2 x float> %v, float %b) {
199 ; CHECK-LABEL: vslide1down_2xf32:
201 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
202 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
204 %vb = insertelement <2 x float> poison, float %b, i64 0
205 %v1 = shufflevector <2 x float> %v, <2 x float> %vb, <2 x i32> <i32 1, i32 2>
209 define <4 x float> @vslide1down_4xf32(<4 x float> %v, float %b) {
210 ; CHECK-LABEL: vslide1down_4xf32:
212 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
213 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
215 %vb = insertelement <4 x float> poison, float %b, i64 0
216 %v1 = shufflevector <4 x float> %v, <4 x float> %vb, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
220 define <2 x double> @vslide1down_2xf64(<2 x double> %v, double %b) {
221 ; CHECK-LABEL: vslide1down_2xf64:
223 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
224 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
226 %vb = insertelement <2 x double> poison, double %b, i64 0
227 %v1 = shufflevector <2 x double> %v, <2 x double> %vb, <2 x i32> <i32 1, i32 2>
231 define <4 x double> @vslide1down_4xf64(<4 x double> %v, double %b) {
232 ; CHECK-LABEL: vslide1down_4xf64:
234 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
235 ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
237 %vb = insertelement <4 x double> poison, double %b, i64 0
238 %v1 = shufflevector <4 x double> %v, <4 x double> %vb, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
242 define <4 x i8> @vslide1down_4xi8_with_splat(<4 x i8> %v, i8 %b) {
243 ; CHECK-LABEL: vslide1down_4xi8_with_splat:
245 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
246 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
248 %vb = insertelement <4 x i8> poison, i8 %b, i64 0
249 %v1 = shufflevector <4 x i8> %vb, <4 x i8> poison, <4 x i32> zeroinitializer
250 %v2 = shufflevector <4 x i8> %v1, <4 x i8> %v, <4 x i32> <i32 5, i32 6, i32 7, i32 1>
254 define <2 x double> @vslide1down_v2f64_inverted(<2 x double> %v, double %b) {
255 ; CHECK-LABEL: vslide1down_v2f64_inverted:
257 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
258 ; CHECK-NEXT: vrgather.vi v9, v8, 0
259 ; CHECK-NEXT: vfmv.s.f v8, fa0
260 ; CHECK-NEXT: vslideup.vi v9, v8, 1
261 ; CHECK-NEXT: vmv.v.v v8, v9
263 %v1 = shufflevector <2 x double> %v, <2 x double> poison, <2 x i32> <i32 0, i32 0>
264 %v2 = insertelement <2 x double> %v1, double %b, i64 1
268 define <4 x i8> @vslide1down_4xi8_inverted(<4 x i8> %v, i8 %b) {
269 ; CHECK-LABEL: vslide1down_4xi8_inverted:
271 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
272 ; CHECK-NEXT: vslideup.vi v9, v8, 1
273 ; CHECK-NEXT: vmv.s.x v8, a0
274 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
275 ; CHECK-NEXT: vslideup.vi v9, v8, 1
276 ; CHECK-NEXT: vmv1r.v v8, v9
278 %v1 = shufflevector <4 x i8> %v, <4 x i8> poison, <4 x i32> <i32 undef, i32 0, i32 1, i32 2>
279 %v2 = insertelement <4 x i8> %v1, i8 %b, i64 1