1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
3 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
5 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
7 define <2 x i8> @vslide1up_2xi8(<2 x i8> %v, i8 %b) {
8 ; CHECK-LABEL: vslide1up_2xi8:
10 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
11 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
12 ; CHECK-NEXT: vmv1r.v v8, v9
14 %vb = insertelement <2 x i8> poison, i8 %b, i64 0
15 %v1 = shufflevector <2 x i8> %v, <2 x i8> %vb, <2 x i32> <i32 2, i32 0>
19 define <4 x i8> @vslide1up_4xi8(<4 x i8> %v, i8 %b) {
20 ; CHECK-LABEL: vslide1up_4xi8:
22 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
23 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
24 ; CHECK-NEXT: vmv1r.v v8, v9
26 %vb = insertelement <4 x i8> poison, i8 %b, i64 0
27 %v1 = shufflevector <4 x i8> %v, <4 x i8> %vb, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
31 define <4 x i8> @vslide1up_4xi8_swapped(<4 x i8> %v, i8 %b) {
32 ; CHECK-LABEL: vslide1up_4xi8_swapped:
34 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
35 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
36 ; CHECK-NEXT: vmv1r.v v8, v9
38 %vb = insertelement <4 x i8> poison, i8 %b, i64 0
39 %v1 = shufflevector <4 x i8> %vb, <4 x i8> %v, <4 x i32> <i32 0, i32 4, i32 5, i32 6>
43 define <2 x i16> @vslide1up_2xi16(<2 x i16> %v, i16 %b) {
44 ; CHECK-LABEL: vslide1up_2xi16:
46 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
47 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
48 ; CHECK-NEXT: vmv1r.v v8, v9
50 %vb = insertelement <2 x i16> poison, i16 %b, i64 0
51 %v1 = shufflevector <2 x i16> %v, <2 x i16> %vb, <2 x i32> <i32 2, i32 0>
55 define <4 x i16> @vslide1up_4xi16(<4 x i16> %v, i16 %b) {
56 ; CHECK-LABEL: vslide1up_4xi16:
58 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
59 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
60 ; CHECK-NEXT: vmv1r.v v8, v9
62 %vb = insertelement <4 x i16> poison, i16 %b, i64 0
63 %v1 = shufflevector <4 x i16> %v, <4 x i16> %vb, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
67 define <2 x i32> @vslide1up_2xi32(<2 x i32> %v, i32 %b) {
68 ; CHECK-LABEL: vslide1up_2xi32:
70 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
71 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
72 ; CHECK-NEXT: vmv1r.v v8, v9
74 %vb = insertelement <2 x i32> poison, i32 %b, i64 0
75 %v1 = shufflevector <2 x i32> %v, <2 x i32> %vb, <2 x i32> <i32 2, i32 0>
79 define <4 x i32> @vslide1up_4xi32(<4 x i32> %v, i32 %b) {
80 ; CHECK-LABEL: vslide1up_4xi32:
82 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
83 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
84 ; CHECK-NEXT: vmv.v.v v8, v9
86 %vb = insertelement <4 x i32> poison, i32 %b, i64 0
87 %v1 = shufflevector <4 x i32> %v, <4 x i32> %vb, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
91 define <2 x i64> @vslide1up_2xi64(<2 x i64> %v, i64 %b) {
92 ; RV32-LABEL: vslide1up_2xi64:
94 ; RV32-NEXT: addi sp, sp, -16
95 ; RV32-NEXT: .cfi_def_cfa_offset 16
96 ; RV32-NEXT: sw a1, 12(sp)
97 ; RV32-NEXT: sw a0, 8(sp)
98 ; RV32-NEXT: addi a0, sp, 8
99 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
100 ; RV32-NEXT: vlse64.v v9, (a0), zero
101 ; RV32-NEXT: vslideup.vi v9, v8, 1
102 ; RV32-NEXT: vmv.v.v v8, v9
103 ; RV32-NEXT: addi sp, sp, 16
106 ; RV64-LABEL: vslide1up_2xi64:
108 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
109 ; RV64-NEXT: vslide1up.vx v9, v8, a0
110 ; RV64-NEXT: vmv.v.v v8, v9
112 %vb = insertelement <2 x i64> poison, i64 %b, i64 0
113 %v1 = shufflevector <2 x i64> %v, <2 x i64> %vb, <2 x i32> <i32 2, i32 0>
117 define <4 x i64> @vslide1up_4xi64(<4 x i64> %v, i64 %b) {
118 ; RV32-LABEL: vslide1up_4xi64:
120 ; RV32-NEXT: addi sp, sp, -16
121 ; RV32-NEXT: .cfi_def_cfa_offset 16
122 ; RV32-NEXT: sw a1, 12(sp)
123 ; RV32-NEXT: sw a0, 8(sp)
124 ; RV32-NEXT: addi a0, sp, 8
125 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
126 ; RV32-NEXT: vlse64.v v10, (a0), zero
127 ; RV32-NEXT: vslideup.vi v10, v8, 1
128 ; RV32-NEXT: vmv.v.v v8, v10
129 ; RV32-NEXT: addi sp, sp, 16
132 ; RV64-LABEL: vslide1up_4xi64:
134 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
135 ; RV64-NEXT: vslide1up.vx v10, v8, a0
136 ; RV64-NEXT: vmv.v.v v8, v10
138 %vb = insertelement <4 x i64> poison, i64 %b, i64 0
139 %v1 = shufflevector <4 x i64> %v, <4 x i64> %vb, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
143 define <2 x half> @vslide1up_2xf16(<2 x half> %v, half %b) {
144 ; CHECK-LABEL: vslide1up_2xf16:
146 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
147 ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
148 ; CHECK-NEXT: vmv1r.v v8, v9
150 %vb = insertelement <2 x half> poison, half %b, i64 0
151 %v1 = shufflevector <2 x half> %v, <2 x half> %vb, <2 x i32> <i32 2, i32 0>
155 define <4 x half> @vslide1up_4xf16(<4 x half> %v, half %b) {
156 ; CHECK-LABEL: vslide1up_4xf16:
158 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
159 ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
160 ; CHECK-NEXT: vmv1r.v v8, v9
162 %vb = insertelement <4 x half> poison, half %b, i64 0
163 %v1 = shufflevector <4 x half> %v, <4 x half> %vb, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
167 define <2 x float> @vslide1up_2xf32(<2 x float> %v, float %b) {
168 ; CHECK-LABEL: vslide1up_2xf32:
170 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
171 ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
172 ; CHECK-NEXT: vmv1r.v v8, v9
174 %vb = insertelement <2 x float> poison, float %b, i64 0
175 %v1 = shufflevector <2 x float> %v, <2 x float> %vb, <2 x i32> <i32 2, i32 0>
179 define <4 x float> @vslide1up_4xf32(<4 x float> %v, float %b) {
180 ; CHECK-LABEL: vslide1up_4xf32:
182 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
183 ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
184 ; CHECK-NEXT: vmv.v.v v8, v9
186 %vb = insertelement <4 x float> poison, float %b, i64 0
187 %v1 = shufflevector <4 x float> %v, <4 x float> %vb, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
191 define <2 x double> @vslide1up_2xf64(<2 x double> %v, double %b) {
192 ; CHECK-LABEL: vslide1up_2xf64:
194 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
195 ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
196 ; CHECK-NEXT: vmv.v.v v8, v9
198 %vb = insertelement <2 x double> poison, double %b, i64 0
199 %v1 = shufflevector <2 x double> %v, <2 x double> %vb, <2 x i32> <i32 2, i32 0>
203 define <4 x double> @vslide1up_4xf64(<4 x double> %v, double %b) {
204 ; CHECK-LABEL: vslide1up_4xf64:
206 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
207 ; CHECK-NEXT: vfmv.v.f v10, fa0
208 ; CHECK-NEXT: vslideup.vi v10, v8, 3
209 ; CHECK-NEXT: vmv.v.v v8, v10
211 %vb = insertelement <4 x double> poison, double %b, i64 0
212 %v1 = shufflevector <4 x double> %v, <4 x double> %vb, <4 x i32> <i32 4, i32 5, i32 6, i32 0>
216 define <4 x i8> @vslide1up_4xi8_with_splat(<4 x i8> %v, i8 %b) {
217 ; CHECK-LABEL: vslide1up_4xi8_with_splat:
219 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
220 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
221 ; CHECK-NEXT: vmv1r.v v8, v9
223 %vb = insertelement <4 x i8> poison, i8 %b, i64 0
224 %v1 = shufflevector <4 x i8> %vb, <4 x i8> poison, <4 x i32> zeroinitializer
225 %v2 = shufflevector <4 x i8> %v1, <4 x i8> %v, <4 x i32> <i32 1, i32 4, i32 5, i32 6>
229 define <2 x double> @vslide1up_v2f64_inverted(<2 x double> %v, double %b) {
230 ; CHECK-LABEL: vslide1up_v2f64_inverted:
232 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
233 ; CHECK-NEXT: vrgather.vi v9, v8, 0
234 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma
235 ; CHECK-NEXT: vfmv.s.f v9, fa0
236 ; CHECK-NEXT: vmv1r.v v8, v9
238 %v1 = shufflevector <2 x double> %v, <2 x double> poison, <2 x i32> <i32 0, i32 0>
239 %v2 = insertelement <2 x double> %v1, double %b, i64 0
243 define <4 x i8> @vslide1up_4xi8_inverted(<4 x i8> %v, i8 %b) {
244 ; CHECK-LABEL: vslide1up_4xi8_inverted:
246 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
247 ; CHECK-NEXT: vslideup.vi v9, v8, 1
248 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
249 ; CHECK-NEXT: vmv.s.x v9, a0
250 ; CHECK-NEXT: vmv1r.v v8, v9
252 %v1 = shufflevector <4 x i8> %v, <4 x i8> poison, <4 x i32> <i32 undef, i32 0, i32 1, i32 2>
253 %v2 = insertelement <4 x i8> %v1, i8 %b, i64 0
257 define <2 x double> @vslide1up_2xf64_as_rotate(<2 x double> %v, double %b) {
258 ; CHECK-LABEL: vslide1up_2xf64_as_rotate:
260 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
261 ; CHECK-NEXT: vfmv.s.f v9, fa0
262 ; CHECK-NEXT: vslideup.vi v8, v9, 1
263 ; CHECK-NEXT: vslidedown.vi v9, v8, 1
264 ; CHECK-NEXT: vslideup.vi v9, v8, 1
265 ; CHECK-NEXT: vmv.v.v v8, v9
267 %v1 = insertelement <2 x double> %v, double %b, i64 1
268 %v2 = shufflevector <2 x double> %v1, <2 x double> poison, <2 x i32> <i32 1, i32 0>
272 define <4 x i8> @vslide1up_4xi8_as_rotate(<4 x i8> %v, i8 %b) {
273 ; CHECK-LABEL: vslide1up_4xi8_as_rotate:
275 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
276 ; CHECK-NEXT: vmv.s.x v9, a0
277 ; CHECK-NEXT: vslideup.vi v8, v9, 3
278 ; CHECK-NEXT: vslidedown.vi v9, v8, 3
279 ; CHECK-NEXT: vslideup.vi v9, v8, 1
280 ; CHECK-NEXT: vmv1r.v v8, v9
282 %v1 = insertelement <4 x i8> %v, i8 %b, i64 3
283 %v2 = shufflevector <4 x i8> %v1, <4 x i8> poison, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
287 ; The length of the shift is less than the suffix, since we'd have to
288 ; materailize the splat, using the vslide1up doesn't help us.
289 define <4 x i32> @vslide1up_4xi32_neg1(<4 x i32> %v, i32 %b) {
290 ; CHECK-LABEL: vslide1up_4xi32_neg1:
292 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
293 ; CHECK-NEXT: vmv.v.x v9, a0
294 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
295 ; CHECK-NEXT: vslideup.vi v9, v8, 1
296 ; CHECK-NEXT: vmv1r.v v8, v9
298 %vb = insertelement <4 x i32> poison, i32 %b, i64 0
299 %vb2 = insertelement <4 x i32> %vb, i32 %b, i64 3
300 %v1 = shufflevector <4 x i32> %v, <4 x i32> %vb2, <4 x i32> <i32 4, i32 0, i32 1, i32 7>
304 ; We don't know the scalar to do the vslide1up
305 define <4 x i32> @vslide1up_4xi32_neg2(<4 x i32> %v1, <4 x i32> %v2) {
306 ; CHECK-LABEL: vslide1up_4xi32_neg2:
308 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
309 ; CHECK-NEXT: vslideup.vi v9, v8, 1
310 ; CHECK-NEXT: vmv.v.v v8, v9
312 %res = shufflevector <4 x i32> %v1, <4 x i32> %v2, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
316 ; Not profitable - can just use a slideup instead
317 define <4 x i8> @vslide1up_4xi8_neg_undef_insert(<4 x i8> %v, i8 %b) {
318 ; CHECK-LABEL: vslide1up_4xi8_neg_undef_insert:
320 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
321 ; CHECK-NEXT: vslideup.vi v9, v8, 1
322 ; CHECK-NEXT: vmv1r.v v8, v9
324 %v2 = shufflevector <4 x i8> poison, <4 x i8> %v, <4 x i32> <i32 0, i32 4, i32 5, i32 6>
328 define <4 x i8> @vslide1up_4xi8_neg_incorrect_insert(<4 x i8> %v, i8 %b) {
329 ; CHECK-LABEL: vslide1up_4xi8_neg_incorrect_insert:
331 ; CHECK-NEXT: lui a0, 8208
332 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
333 ; CHECK-NEXT: vmv.s.x v10, a0
334 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
335 ; CHECK-NEXT: vrgather.vv v9, v8, v10
336 ; CHECK-NEXT: vmv1r.v v8, v9
338 %v2 = shufflevector <4 x i8> poison, <4 x i8> %v, <4 x i32> <i32 4, i32 4, i32 5, i32 6>
342 define <4 x i8> @vslide1up_4xi8_neg_incorrect_insert2(<4 x i8> %v, i8 %b) {
343 ; CHECK-LABEL: vslide1up_4xi8_neg_incorrect_insert2:
345 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
346 ; CHECK-NEXT: vslidedown.vi v9, v8, 3
347 ; CHECK-NEXT: vslideup.vi v9, v8, 1
348 ; CHECK-NEXT: vmv1r.v v8, v9
350 %v2 = shufflevector <4 x i8> poison, <4 x i8> %v, <4 x i32> <i32 7, i32 4, i32 5, i32 6>
354 define <4 x i8> @vslide1up_4xi8_neg_incorrect_insert3(<4 x i8> %v, i8 %b) {
355 ; CHECK-LABEL: vslide1up_4xi8_neg_incorrect_insert3:
357 ; CHECK-NEXT: lui a0, 8208
358 ; CHECK-NEXT: addi a0, a0, 1
359 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
360 ; CHECK-NEXT: vmv.s.x v10, a0
361 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
362 ; CHECK-NEXT: vrgather.vv v9, v8, v10
363 ; CHECK-NEXT: vmv1r.v v8, v9
365 %v2 = shufflevector <4 x i8> poison, <4 x i8> %v, <4 x i32> <i32 5, i32 4, i32 5, i32 6>
369 define <2 x i8> @vslide1up_4xi8_neg_length_changing(<4 x i8> %v, i8 %b) {
370 ; CHECK-LABEL: vslide1up_4xi8_neg_length_changing:
372 ; CHECK-NEXT: vmv1r.v v9, v8
373 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
374 ; CHECK-NEXT: vmv.s.x v9, a0
375 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
376 ; CHECK-NEXT: vslideup.vi v9, v8, 1
377 ; CHECK-NEXT: vmv1r.v v8, v9
379 %v1 = insertelement <4 x i8> %v, i8 %b, i64 0
380 %v2 = shufflevector <4 x i8> %v1, <4 x i8> %v, <2 x i32> <i32 0, i32 4>