1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple riscv32 -mattr=+m,+f,+d,+v,+zfh,+zvfh < %s | FileCheck %s
3 ; RUN: llc -mtriple riscv64 -mattr=+m,+f,+d,+v,+zfh,+zvfh < %s | FileCheck %s
5 ; Tests assume VLEN=128 or vscale_range_min=2.
7 declare <vscale x 1 x i1> @llvm.vector.splice.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>, i32)
9 define <vscale x 1 x i1> @splice_nxv1i1_offset_negone(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) #0 {
10 ; CHECK-LABEL: splice_nxv1i1_offset_negone:
12 ; CHECK-NEXT: vmv1r.v v9, v0
13 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
14 ; CHECK-NEXT: vmv.v.i v10, 0
15 ; CHECK-NEXT: vmv1r.v v0, v8
16 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
17 ; CHECK-NEXT: vmv1r.v v0, v9
18 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
19 ; CHECK-NEXT: csrr a0, vlenb
20 ; CHECK-NEXT: srli a0, a0, 3
21 ; CHECK-NEXT: addi a0, a0, -1
22 ; CHECK-NEXT: vslidedown.vx v9, v9, a0
23 ; CHECK-NEXT: vslideup.vi v9, v8, 1
24 ; CHECK-NEXT: vand.vi v8, v9, 1
25 ; CHECK-NEXT: vmsne.vi v0, v8, 0
27 %res = call <vscale x 1 x i1> @llvm.vector.splice.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, i32 -1)
28 ret <vscale x 1 x i1> %res
31 define <vscale x 1 x i1> @splice_nxv1i1_offset_max(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) #0 {
32 ; CHECK-LABEL: splice_nxv1i1_offset_max:
34 ; CHECK-NEXT: vmv1r.v v9, v0
35 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
36 ; CHECK-NEXT: vmv.v.i v10, 0
37 ; CHECK-NEXT: vmv1r.v v0, v8
38 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
39 ; CHECK-NEXT: vmv1r.v v0, v9
40 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
41 ; CHECK-NEXT: csrr a0, vlenb
42 ; CHECK-NEXT: srli a0, a0, 3
43 ; CHECK-NEXT: addi a0, a0, -1
44 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
45 ; CHECK-NEXT: vslidedown.vi v9, v9, 1
46 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
47 ; CHECK-NEXT: vslideup.vx v9, v8, a0
48 ; CHECK-NEXT: vand.vi v8, v9, 1
49 ; CHECK-NEXT: vmsne.vi v0, v8, 0
51 %res = call <vscale x 1 x i1> @llvm.vector.splice.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, i32 1)
52 ret <vscale x 1 x i1> %res
55 declare <vscale x 2 x i1> @llvm.vector.splice.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, i32)
57 define <vscale x 2 x i1> @splice_nxv2i1_offset_negone(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
58 ; CHECK-LABEL: splice_nxv2i1_offset_negone:
60 ; CHECK-NEXT: vmv1r.v v9, v0
61 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
62 ; CHECK-NEXT: vmv.v.i v10, 0
63 ; CHECK-NEXT: vmv1r.v v0, v8
64 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
65 ; CHECK-NEXT: vmv1r.v v0, v9
66 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
67 ; CHECK-NEXT: csrr a0, vlenb
68 ; CHECK-NEXT: srli a0, a0, 2
69 ; CHECK-NEXT: addi a0, a0, -1
70 ; CHECK-NEXT: vslidedown.vx v9, v9, a0
71 ; CHECK-NEXT: vslideup.vi v9, v8, 1
72 ; CHECK-NEXT: vand.vi v8, v9, 1
73 ; CHECK-NEXT: vmsne.vi v0, v8, 0
75 %res = call <vscale x 2 x i1> @llvm.vector.splice.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b, i32 -1)
76 ret <vscale x 2 x i1> %res
79 define <vscale x 2 x i1> @splice_nxv2i1_offset_max(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
80 ; CHECK-LABEL: splice_nxv2i1_offset_max:
82 ; CHECK-NEXT: vmv1r.v v9, v0
83 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
84 ; CHECK-NEXT: vmv.v.i v10, 0
85 ; CHECK-NEXT: vmv1r.v v0, v8
86 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
87 ; CHECK-NEXT: vmv1r.v v0, v9
88 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
89 ; CHECK-NEXT: csrr a0, vlenb
90 ; CHECK-NEXT: srli a0, a0, 2
91 ; CHECK-NEXT: addi a0, a0, -3
92 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
93 ; CHECK-NEXT: vslidedown.vi v9, v9, 3
94 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
95 ; CHECK-NEXT: vslideup.vx v9, v8, a0
96 ; CHECK-NEXT: vand.vi v8, v9, 1
97 ; CHECK-NEXT: vmsne.vi v0, v8, 0
99 %res = call <vscale x 2 x i1> @llvm.vector.splice.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b, i32 3)
100 ret <vscale x 2 x i1> %res
103 declare <vscale x 4 x i1> @llvm.vector.splice.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, i32)
105 define <vscale x 4 x i1> @splice_nxv4i1_offset_negone(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
106 ; CHECK-LABEL: splice_nxv4i1_offset_negone:
108 ; CHECK-NEXT: vmv1r.v v9, v0
109 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
110 ; CHECK-NEXT: vmv.v.i v10, 0
111 ; CHECK-NEXT: vmv1r.v v0, v8
112 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
113 ; CHECK-NEXT: vmv1r.v v0, v9
114 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
115 ; CHECK-NEXT: csrr a0, vlenb
116 ; CHECK-NEXT: srli a0, a0, 1
117 ; CHECK-NEXT: addi a0, a0, -1
118 ; CHECK-NEXT: vslidedown.vx v9, v9, a0
119 ; CHECK-NEXT: vslideup.vi v9, v8, 1
120 ; CHECK-NEXT: vand.vi v8, v9, 1
121 ; CHECK-NEXT: vmsne.vi v0, v8, 0
123 %res = call <vscale x 4 x i1> @llvm.vector.splice.nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b, i32 -1)
124 ret <vscale x 4 x i1> %res
127 define <vscale x 4 x i1> @splice_nxv4i1_offset_max(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
128 ; CHECK-LABEL: splice_nxv4i1_offset_max:
130 ; CHECK-NEXT: vmv1r.v v9, v0
131 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
132 ; CHECK-NEXT: vmv.v.i v10, 0
133 ; CHECK-NEXT: vmv1r.v v0, v8
134 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
135 ; CHECK-NEXT: vmv1r.v v0, v9
136 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
137 ; CHECK-NEXT: csrr a0, vlenb
138 ; CHECK-NEXT: srli a0, a0, 1
139 ; CHECK-NEXT: addi a0, a0, -7
140 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
141 ; CHECK-NEXT: vslidedown.vi v9, v9, 7
142 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
143 ; CHECK-NEXT: vslideup.vx v9, v8, a0
144 ; CHECK-NEXT: vand.vi v8, v9, 1
145 ; CHECK-NEXT: vmsne.vi v0, v8, 0
147 %res = call <vscale x 4 x i1> @llvm.vector.splice.nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b, i32 7)
148 ret <vscale x 4 x i1> %res
151 declare <vscale x 8 x i1> @llvm.vector.splice.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, i32)
153 define <vscale x 8 x i1> @splice_nxv8i1_offset_negone(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
154 ; CHECK-LABEL: splice_nxv8i1_offset_negone:
156 ; CHECK-NEXT: vmv1r.v v9, v0
157 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
158 ; CHECK-NEXT: vmv.v.i v10, 0
159 ; CHECK-NEXT: vmv1r.v v0, v8
160 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
161 ; CHECK-NEXT: vmv1r.v v0, v9
162 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
163 ; CHECK-NEXT: csrr a0, vlenb
164 ; CHECK-NEXT: addi a0, a0, -1
165 ; CHECK-NEXT: vslidedown.vx v9, v9, a0
166 ; CHECK-NEXT: vslideup.vi v9, v8, 1
167 ; CHECK-NEXT: vand.vi v8, v9, 1
168 ; CHECK-NEXT: vmsne.vi v0, v8, 0
170 %res = call <vscale x 8 x i1> @llvm.vector.splice.nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b, i32 -1)
171 ret <vscale x 8 x i1> %res
174 define <vscale x 8 x i1> @splice_nxv8i1_offset_max(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
175 ; CHECK-LABEL: splice_nxv8i1_offset_max:
177 ; CHECK-NEXT: vmv1r.v v9, v0
178 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
179 ; CHECK-NEXT: vmv.v.i v10, 0
180 ; CHECK-NEXT: vmv1r.v v0, v8
181 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
182 ; CHECK-NEXT: vmv1r.v v0, v9
183 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
184 ; CHECK-NEXT: csrr a0, vlenb
185 ; CHECK-NEXT: addi a0, a0, -15
186 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
187 ; CHECK-NEXT: vslidedown.vi v9, v9, 15
188 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
189 ; CHECK-NEXT: vslideup.vx v9, v8, a0
190 ; CHECK-NEXT: vand.vi v8, v9, 1
191 ; CHECK-NEXT: vmsne.vi v0, v8, 0
193 %res = call <vscale x 8 x i1> @llvm.vector.splice.nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b, i32 15)
194 ret <vscale x 8 x i1> %res
197 declare <vscale x 16 x i1> @llvm.vector.splice.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, i32)
199 define <vscale x 16 x i1> @splice_nxv16i1_offset_negone(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
200 ; CHECK-LABEL: splice_nxv16i1_offset_negone:
202 ; CHECK-NEXT: vmv1r.v v9, v0
203 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
204 ; CHECK-NEXT: vmv.v.i v10, 0
205 ; CHECK-NEXT: vmv1r.v v0, v8
206 ; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
207 ; CHECK-NEXT: vmv1r.v v0, v9
208 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
209 ; CHECK-NEXT: csrr a0, vlenb
210 ; CHECK-NEXT: slli a0, a0, 1
211 ; CHECK-NEXT: addi a0, a0, -1
212 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
213 ; CHECK-NEXT: vslideup.vi v8, v12, 1
214 ; CHECK-NEXT: vand.vi v8, v8, 1
215 ; CHECK-NEXT: vmsne.vi v0, v8, 0
217 %res = call <vscale x 16 x i1> @llvm.vector.splice.nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, i32 -1)
218 ret <vscale x 16 x i1> %res
221 define <vscale x 16 x i1> @splice_nxv16i1_offset_max(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
222 ; CHECK-LABEL: splice_nxv16i1_offset_max:
224 ; CHECK-NEXT: vmv1r.v v9, v0
225 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
226 ; CHECK-NEXT: vmv.v.i v10, 0
227 ; CHECK-NEXT: vmv1r.v v0, v8
228 ; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
229 ; CHECK-NEXT: vmv1r.v v0, v9
230 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
231 ; CHECK-NEXT: csrr a0, vlenb
232 ; CHECK-NEXT: slli a0, a0, 1
233 ; CHECK-NEXT: addi a0, a0, -31
234 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
235 ; CHECK-NEXT: vslidedown.vi v8, v8, 31
236 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
237 ; CHECK-NEXT: vslideup.vx v8, v12, a0
238 ; CHECK-NEXT: vand.vi v8, v8, 1
239 ; CHECK-NEXT: vmsne.vi v0, v8, 0
241 %res = call <vscale x 16 x i1> @llvm.vector.splice.nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, i32 31)
242 ret <vscale x 16 x i1> %res
245 declare <vscale x 32 x i1> @llvm.vector.splice.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, i32)
247 define <vscale x 32 x i1> @splice_nxv32i1_offset_negone(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b) #0 {
248 ; CHECK-LABEL: splice_nxv32i1_offset_negone:
250 ; CHECK-NEXT: vmv1r.v v9, v0
251 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
252 ; CHECK-NEXT: vmv.v.i v12, 0
253 ; CHECK-NEXT: vmv1r.v v0, v8
254 ; CHECK-NEXT: vmerge.vim v16, v12, 1, v0
255 ; CHECK-NEXT: vmv1r.v v0, v9
256 ; CHECK-NEXT: vmerge.vim v8, v12, 1, v0
257 ; CHECK-NEXT: csrr a0, vlenb
258 ; CHECK-NEXT: slli a0, a0, 2
259 ; CHECK-NEXT: addi a0, a0, -1
260 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
261 ; CHECK-NEXT: vslideup.vi v8, v16, 1
262 ; CHECK-NEXT: vand.vi v8, v8, 1
263 ; CHECK-NEXT: vmsne.vi v0, v8, 0
265 %res = call <vscale x 32 x i1> @llvm.vector.splice.nxv32i1(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b, i32 -1)
266 ret <vscale x 32 x i1> %res
269 define <vscale x 32 x i1> @splice_nxv32i1_offset_max(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b) #0 {
270 ; CHECK-LABEL: splice_nxv32i1_offset_max:
272 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
273 ; CHECK-NEXT: vmv.v.i v12, 0
274 ; CHECK-NEXT: vmerge.vim v16, v12, 1, v0
275 ; CHECK-NEXT: csrr a0, vlenb
276 ; CHECK-NEXT: slli a0, a0, 2
277 ; CHECK-NEXT: addi a0, a0, -63
278 ; CHECK-NEXT: li a1, 63
279 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
280 ; CHECK-NEXT: vslidedown.vx v16, v16, a1
281 ; CHECK-NEXT: vmv1r.v v0, v8
282 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
283 ; CHECK-NEXT: vmerge.vim v8, v12, 1, v0
284 ; CHECK-NEXT: vslideup.vx v16, v8, a0
285 ; CHECK-NEXT: vand.vi v8, v16, 1
286 ; CHECK-NEXT: vmsne.vi v0, v8, 0
288 %res = call <vscale x 32 x i1> @llvm.vector.splice.nxv32i1(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b, i32 63)
289 ret <vscale x 32 x i1> %res
292 declare <vscale x 64 x i1> @llvm.vector.splice.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>, i32)
294 define <vscale x 64 x i1> @splice_nxv64i1_offset_negone(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) #0 {
295 ; CHECK-LABEL: splice_nxv64i1_offset_negone:
297 ; CHECK-NEXT: vmv1r.v v9, v0
298 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
299 ; CHECK-NEXT: vmv.v.i v24, 0
300 ; CHECK-NEXT: vmv1r.v v0, v8
301 ; CHECK-NEXT: vmerge.vim v16, v24, 1, v0
302 ; CHECK-NEXT: vmv1r.v v0, v9
303 ; CHECK-NEXT: vmerge.vim v8, v24, 1, v0
304 ; CHECK-NEXT: csrr a0, vlenb
305 ; CHECK-NEXT: slli a0, a0, 3
306 ; CHECK-NEXT: addi a0, a0, -1
307 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
308 ; CHECK-NEXT: vslideup.vi v8, v16, 1
309 ; CHECK-NEXT: vand.vi v8, v8, 1
310 ; CHECK-NEXT: vmsne.vi v0, v8, 0
312 %res = call <vscale x 64 x i1> @llvm.vector.splice.nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b, i32 -1)
313 ret <vscale x 64 x i1> %res
316 define <vscale x 64 x i1> @splice_nxv64i1_offset_max(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) #0 {
317 ; CHECK-LABEL: splice_nxv64i1_offset_max:
319 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
320 ; CHECK-NEXT: vmv.v.i v16, 0
321 ; CHECK-NEXT: vmerge.vim v24, v16, 1, v0
322 ; CHECK-NEXT: csrr a0, vlenb
323 ; CHECK-NEXT: slli a0, a0, 3
324 ; CHECK-NEXT: addi a0, a0, -127
325 ; CHECK-NEXT: li a1, 127
326 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
327 ; CHECK-NEXT: vslidedown.vx v24, v24, a1
328 ; CHECK-NEXT: vmv1r.v v0, v8
329 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
330 ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0
331 ; CHECK-NEXT: vslideup.vx v24, v8, a0
332 ; CHECK-NEXT: vand.vi v8, v24, 1
333 ; CHECK-NEXT: vmsne.vi v0, v8, 0
335 %res = call <vscale x 64 x i1> @llvm.vector.splice.nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b, i32 127)
336 ret <vscale x 64 x i1> %res
339 declare <vscale x 1 x i8> @llvm.vector.splice.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, i32)
341 define <vscale x 1 x i8> @splice_nxv1i8_offset_zero(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) #0 {
342 ; CHECK-LABEL: splice_nxv1i8_offset_zero:
345 %res = call <vscale x 1 x i8> @llvm.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 0)
346 ret <vscale x 1 x i8> %res
349 define <vscale x 1 x i8> @splice_nxv1i8_offset_negone(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) #0 {
350 ; CHECK-LABEL: splice_nxv1i8_offset_negone:
352 ; CHECK-NEXT: csrr a0, vlenb
353 ; CHECK-NEXT: srli a0, a0, 3
354 ; CHECK-NEXT: addi a0, a0, -1
355 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
356 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
357 ; CHECK-NEXT: vslideup.vi v8, v9, 1
359 %res = call <vscale x 1 x i8> @llvm.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 -1)
360 ret <vscale x 1 x i8> %res
363 define <vscale x 1 x i8> @splice_nxv1i8_offset_min(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) #0 {
364 ; CHECK-LABEL: splice_nxv1i8_offset_min:
366 ; CHECK-NEXT: csrr a0, vlenb
367 ; CHECK-NEXT: srli a0, a0, 3
368 ; CHECK-NEXT: addi a0, a0, -2
369 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
370 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
371 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
372 ; CHECK-NEXT: vslideup.vi v8, v9, 2
374 %res = call <vscale x 1 x i8> @llvm.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 -2)
375 ret <vscale x 1 x i8> %res
378 define <vscale x 1 x i8> @splice_nxv1i8_offset_max(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) #0 {
379 ; CHECK-LABEL: splice_nxv1i8_offset_max:
381 ; CHECK-NEXT: csrr a0, vlenb
382 ; CHECK-NEXT: srli a0, a0, 3
383 ; CHECK-NEXT: addi a0, a0, -1
384 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
385 ; CHECK-NEXT: vslidedown.vi v8, v8, 1
386 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
387 ; CHECK-NEXT: vslideup.vx v8, v9, a0
389 %res = call <vscale x 1 x i8> @llvm.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 1)
390 ret <vscale x 1 x i8> %res
393 declare <vscale x 2 x i8> @llvm.vector.splice.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, i32)
395 define <vscale x 2 x i8> @splice_nxv2i8_offset_zero(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) #0 {
396 ; CHECK-LABEL: splice_nxv2i8_offset_zero:
399 %res = call <vscale x 2 x i8> @llvm.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 0)
400 ret <vscale x 2 x i8> %res
403 define <vscale x 2 x i8> @splice_nxv2i8_offset_negone(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) #0 {
404 ; CHECK-LABEL: splice_nxv2i8_offset_negone:
406 ; CHECK-NEXT: csrr a0, vlenb
407 ; CHECK-NEXT: srli a0, a0, 2
408 ; CHECK-NEXT: addi a0, a0, -1
409 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
410 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
411 ; CHECK-NEXT: vslideup.vi v8, v9, 1
413 %res = call <vscale x 2 x i8> @llvm.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 -1)
414 ret <vscale x 2 x i8> %res
417 define <vscale x 2 x i8> @splice_nxv2i8_offset_min(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) #0 {
418 ; CHECK-LABEL: splice_nxv2i8_offset_min:
420 ; CHECK-NEXT: csrr a0, vlenb
421 ; CHECK-NEXT: srli a0, a0, 2
422 ; CHECK-NEXT: addi a0, a0, -4
423 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
424 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
425 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
426 ; CHECK-NEXT: vslideup.vi v8, v9, 4
428 %res = call <vscale x 2 x i8> @llvm.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 -4)
429 ret <vscale x 2 x i8> %res
432 define <vscale x 2 x i8> @splice_nxv2i8_offset_max(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) #0 {
433 ; CHECK-LABEL: splice_nxv2i8_offset_max:
435 ; CHECK-NEXT: csrr a0, vlenb
436 ; CHECK-NEXT: srli a0, a0, 2
437 ; CHECK-NEXT: addi a0, a0, -3
438 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
439 ; CHECK-NEXT: vslidedown.vi v8, v8, 3
440 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
441 ; CHECK-NEXT: vslideup.vx v8, v9, a0
443 %res = call <vscale x 2 x i8> @llvm.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 3)
444 ret <vscale x 2 x i8> %res
447 declare <vscale x 4 x i8> @llvm.vector.splice.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, i32)
449 define <vscale x 4 x i8> @splice_nxv4i8_offset_zero(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) #0 {
450 ; CHECK-LABEL: splice_nxv4i8_offset_zero:
453 %res = call <vscale x 4 x i8> @llvm.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 0)
454 ret <vscale x 4 x i8> %res
457 define <vscale x 4 x i8> @splice_nxv4i8_offset_negone(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) #0 {
458 ; CHECK-LABEL: splice_nxv4i8_offset_negone:
460 ; CHECK-NEXT: csrr a0, vlenb
461 ; CHECK-NEXT: srli a0, a0, 1
462 ; CHECK-NEXT: addi a0, a0, -1
463 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
464 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
465 ; CHECK-NEXT: vslideup.vi v8, v9, 1
467 %res = call <vscale x 4 x i8> @llvm.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 -1)
468 ret <vscale x 4 x i8> %res
471 define <vscale x 4 x i8> @splice_nxv4i8_offset_min(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) #0 {
472 ; CHECK-LABEL: splice_nxv4i8_offset_min:
474 ; CHECK-NEXT: csrr a0, vlenb
475 ; CHECK-NEXT: srli a0, a0, 1
476 ; CHECK-NEXT: addi a0, a0, -8
477 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
478 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
479 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
480 ; CHECK-NEXT: vslideup.vi v8, v9, 8
482 %res = call <vscale x 4 x i8> @llvm.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 -8)
483 ret <vscale x 4 x i8> %res
486 define <vscale x 4 x i8> @splice_nxv4i8_offset_max(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) #0 {
487 ; CHECK-LABEL: splice_nxv4i8_offset_max:
489 ; CHECK-NEXT: csrr a0, vlenb
490 ; CHECK-NEXT: srli a0, a0, 1
491 ; CHECK-NEXT: addi a0, a0, -7
492 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
493 ; CHECK-NEXT: vslidedown.vi v8, v8, 7
494 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
495 ; CHECK-NEXT: vslideup.vx v8, v9, a0
497 %res = call <vscale x 4 x i8> @llvm.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 7)
498 ret <vscale x 4 x i8> %res
501 declare <vscale x 8 x i8> @llvm.vector.splice.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, i32)
503 define <vscale x 8 x i8> @splice_nxv8i8_offset_zero(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) #0 {
504 ; CHECK-LABEL: splice_nxv8i8_offset_zero:
507 %res = call <vscale x 8 x i8> @llvm.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 0)
508 ret <vscale x 8 x i8> %res
511 define <vscale x 8 x i8> @splice_nxv8i8_offset_negone(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) #0 {
512 ; CHECK-LABEL: splice_nxv8i8_offset_negone:
514 ; CHECK-NEXT: csrr a0, vlenb
515 ; CHECK-NEXT: addi a0, a0, -1
516 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
517 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
518 ; CHECK-NEXT: vslideup.vi v8, v9, 1
520 %res = call <vscale x 8 x i8> @llvm.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 -1)
521 ret <vscale x 8 x i8> %res
524 define <vscale x 8 x i8> @splice_nxv8i8_offset_min(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) #0 {
525 ; CHECK-LABEL: splice_nxv8i8_offset_min:
527 ; CHECK-NEXT: csrr a0, vlenb
528 ; CHECK-NEXT: addi a0, a0, -16
529 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
530 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
531 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
532 ; CHECK-NEXT: vslideup.vi v8, v9, 16
534 %res = call <vscale x 8 x i8> @llvm.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 -16)
535 ret <vscale x 8 x i8> %res
538 define <vscale x 8 x i8> @splice_nxv8i8_offset_max(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) #0 {
539 ; CHECK-LABEL: splice_nxv8i8_offset_max:
541 ; CHECK-NEXT: csrr a0, vlenb
542 ; CHECK-NEXT: addi a0, a0, -15
543 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
544 ; CHECK-NEXT: vslidedown.vi v8, v8, 15
545 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
546 ; CHECK-NEXT: vslideup.vx v8, v9, a0
548 %res = call <vscale x 8 x i8> @llvm.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 15)
549 ret <vscale x 8 x i8> %res
552 declare <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, i32)
554 define <vscale x 16 x i8> @splice_nxv16i8_offset_zero(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
555 ; CHECK-LABEL: splice_nxv16i8_offset_zero:
558 %res = call <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 0)
559 ret <vscale x 16 x i8> %res
562 define <vscale x 16 x i8> @splice_nxv16i8_offset_negone(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
563 ; CHECK-LABEL: splice_nxv16i8_offset_negone:
565 ; CHECK-NEXT: csrr a0, vlenb
566 ; CHECK-NEXT: slli a0, a0, 1
567 ; CHECK-NEXT: addi a0, a0, -1
568 ; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
569 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
570 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
571 ; CHECK-NEXT: vslideup.vi v8, v10, 1
573 %res = call <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -1)
574 ret <vscale x 16 x i8> %res
577 define <vscale x 16 x i8> @splice_nxv16i8_offset_min(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
578 ; CHECK-LABEL: splice_nxv16i8_offset_min:
580 ; CHECK-NEXT: csrr a0, vlenb
581 ; CHECK-NEXT: slli a0, a0, 1
582 ; CHECK-NEXT: addi a0, a0, -32
583 ; CHECK-NEXT: li a1, 32
584 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
585 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
586 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
587 ; CHECK-NEXT: vslideup.vx v8, v10, a1
589 %res = call <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -32)
590 ret <vscale x 16 x i8> %res
593 define <vscale x 16 x i8> @splice_nxv16i8_offset_max(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
594 ; CHECK-LABEL: splice_nxv16i8_offset_max:
596 ; CHECK-NEXT: csrr a0, vlenb
597 ; CHECK-NEXT: slli a0, a0, 1
598 ; CHECK-NEXT: addi a0, a0, -31
599 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
600 ; CHECK-NEXT: vslidedown.vi v8, v8, 31
601 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
602 ; CHECK-NEXT: vslideup.vx v8, v10, a0
604 %res = call <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 31)
605 ret <vscale x 16 x i8> %res
608 declare <vscale x 32 x i8> @llvm.vector.splice.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, i32)
610 define <vscale x 32 x i8> @splice_nxv32i8_offset_zero(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) #0 {
611 ; CHECK-LABEL: splice_nxv32i8_offset_zero:
614 %res = call <vscale x 32 x i8> @llvm.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 0)
615 ret <vscale x 32 x i8> %res
618 define <vscale x 32 x i8> @splice_nxv32i8_offset_negone(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) #0 {
619 ; CHECK-LABEL: splice_nxv32i8_offset_negone:
621 ; CHECK-NEXT: csrr a0, vlenb
622 ; CHECK-NEXT: slli a0, a0, 2
623 ; CHECK-NEXT: addi a0, a0, -1
624 ; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma
625 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
626 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
627 ; CHECK-NEXT: vslideup.vi v8, v12, 1
629 %res = call <vscale x 32 x i8> @llvm.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 -1)
630 ret <vscale x 32 x i8> %res
633 define <vscale x 32 x i8> @splice_nxv32i8_offset_min(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) #0 {
634 ; CHECK-LABEL: splice_nxv32i8_offset_min:
636 ; CHECK-NEXT: csrr a0, vlenb
637 ; CHECK-NEXT: slli a0, a0, 2
638 ; CHECK-NEXT: addi a0, a0, -64
639 ; CHECK-NEXT: li a1, 64
640 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
641 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
642 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
643 ; CHECK-NEXT: vslideup.vx v8, v12, a1
645 %res = call <vscale x 32 x i8> @llvm.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 -64)
646 ret <vscale x 32 x i8> %res
649 define <vscale x 32 x i8> @splice_nxv32i8_offset_max(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) #0 {
650 ; CHECK-LABEL: splice_nxv32i8_offset_max:
652 ; CHECK-NEXT: csrr a0, vlenb
653 ; CHECK-NEXT: slli a0, a0, 2
654 ; CHECK-NEXT: addi a0, a0, -63
655 ; CHECK-NEXT: li a1, 63
656 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
657 ; CHECK-NEXT: vslidedown.vx v8, v8, a1
658 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
659 ; CHECK-NEXT: vslideup.vx v8, v12, a0
661 %res = call <vscale x 32 x i8> @llvm.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 63)
662 ret <vscale x 32 x i8> %res
665 declare <vscale x 64 x i8> @llvm.vector.splice.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, i32)
667 define <vscale x 64 x i8> @splice_nxv64i8_offset_zero(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) #0 {
668 ; CHECK-LABEL: splice_nxv64i8_offset_zero:
671 %res = call <vscale x 64 x i8> @llvm.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 0)
672 ret <vscale x 64 x i8> %res
675 define <vscale x 64 x i8> @splice_nxv64i8_offset_negone(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) #0 {
676 ; CHECK-LABEL: splice_nxv64i8_offset_negone:
678 ; CHECK-NEXT: csrr a0, vlenb
679 ; CHECK-NEXT: slli a0, a0, 3
680 ; CHECK-NEXT: addi a0, a0, -1
681 ; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
682 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
683 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
684 ; CHECK-NEXT: vslideup.vi v8, v16, 1
686 %res = call <vscale x 64 x i8> @llvm.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 -1)
687 ret <vscale x 64 x i8> %res
690 define <vscale x 64 x i8> @splice_nxv64i8_offset_min(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) #0 {
691 ; CHECK-LABEL: splice_nxv64i8_offset_min:
693 ; CHECK-NEXT: csrr a0, vlenb
694 ; CHECK-NEXT: slli a0, a0, 3
695 ; CHECK-NEXT: addi a0, a0, -128
696 ; CHECK-NEXT: li a1, 128
697 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
698 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
699 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
700 ; CHECK-NEXT: vslideup.vx v8, v16, a1
702 %res = call <vscale x 64 x i8> @llvm.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 -128)
703 ret <vscale x 64 x i8> %res
706 define <vscale x 64 x i8> @splice_nxv64i8_offset_max(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) #0 {
707 ; CHECK-LABEL: splice_nxv64i8_offset_max:
709 ; CHECK-NEXT: csrr a0, vlenb
710 ; CHECK-NEXT: slli a0, a0, 3
711 ; CHECK-NEXT: addi a0, a0, -127
712 ; CHECK-NEXT: li a1, 127
713 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
714 ; CHECK-NEXT: vslidedown.vx v8, v8, a1
715 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
716 ; CHECK-NEXT: vslideup.vx v8, v16, a0
718 %res = call <vscale x 64 x i8> @llvm.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 127)
719 ret <vscale x 64 x i8> %res
722 declare <vscale x 1 x i16> @llvm.vector.splice.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, i32)
724 define <vscale x 1 x i16> @splice_nxv1i16_offset_zero(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) #0 {
725 ; CHECK-LABEL: splice_nxv1i16_offset_zero:
728 %res = call <vscale x 1 x i16> @llvm.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 0)
729 ret <vscale x 1 x i16> %res
732 define <vscale x 1 x i16> @splice_nxv1i16_offset_negone(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) #0 {
733 ; CHECK-LABEL: splice_nxv1i16_offset_negone:
735 ; CHECK-NEXT: csrr a0, vlenb
736 ; CHECK-NEXT: srli a0, a0, 3
737 ; CHECK-NEXT: addi a0, a0, -1
738 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
739 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
740 ; CHECK-NEXT: vslideup.vi v8, v9, 1
742 %res = call <vscale x 1 x i16> @llvm.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 -1)
743 ret <vscale x 1 x i16> %res
746 define <vscale x 1 x i16> @splice_nxv1i16_offset_min(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) #0 {
747 ; CHECK-LABEL: splice_nxv1i16_offset_min:
749 ; CHECK-NEXT: csrr a0, vlenb
750 ; CHECK-NEXT: srli a0, a0, 3
751 ; CHECK-NEXT: addi a0, a0, -2
752 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
753 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
754 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
755 ; CHECK-NEXT: vslideup.vi v8, v9, 2
757 %res = call <vscale x 1 x i16> @llvm.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 -2)
758 ret <vscale x 1 x i16> %res
761 define <vscale x 1 x i16> @splice_nxv1i16_offset_max(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) #0 {
762 ; CHECK-LABEL: splice_nxv1i16_offset_max:
764 ; CHECK-NEXT: csrr a0, vlenb
765 ; CHECK-NEXT: srli a0, a0, 3
766 ; CHECK-NEXT: addi a0, a0, -1
767 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
768 ; CHECK-NEXT: vslidedown.vi v8, v8, 1
769 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
770 ; CHECK-NEXT: vslideup.vx v8, v9, a0
772 %res = call <vscale x 1 x i16> @llvm.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 1)
773 ret <vscale x 1 x i16> %res
776 declare <vscale x 2 x i16> @llvm.vector.splice.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, i32)
778 define <vscale x 2 x i16> @splice_nxv2i16_offset_zero(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) #0 {
779 ; CHECK-LABEL: splice_nxv2i16_offset_zero:
782 %res = call <vscale x 2 x i16> @llvm.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 0)
783 ret <vscale x 2 x i16> %res
786 define <vscale x 2 x i16> @splice_nxv2i16_offset_negone(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) #0 {
787 ; CHECK-LABEL: splice_nxv2i16_offset_negone:
789 ; CHECK-NEXT: csrr a0, vlenb
790 ; CHECK-NEXT: srli a0, a0, 2
791 ; CHECK-NEXT: addi a0, a0, -1
792 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
793 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
794 ; CHECK-NEXT: vslideup.vi v8, v9, 1
796 %res = call <vscale x 2 x i16> @llvm.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 -1)
797 ret <vscale x 2 x i16> %res
800 define <vscale x 2 x i16> @splice_nxv2i16_offset_min(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) #0 {
801 ; CHECK-LABEL: splice_nxv2i16_offset_min:
803 ; CHECK-NEXT: csrr a0, vlenb
804 ; CHECK-NEXT: srli a0, a0, 2
805 ; CHECK-NEXT: addi a0, a0, -4
806 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
807 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
808 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
809 ; CHECK-NEXT: vslideup.vi v8, v9, 4
811 %res = call <vscale x 2 x i16> @llvm.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 -4)
812 ret <vscale x 2 x i16> %res
815 define <vscale x 2 x i16> @splice_nxv2i16_offset_max(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) #0 {
816 ; CHECK-LABEL: splice_nxv2i16_offset_max:
818 ; CHECK-NEXT: csrr a0, vlenb
819 ; CHECK-NEXT: srli a0, a0, 2
820 ; CHECK-NEXT: addi a0, a0, -3
821 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
822 ; CHECK-NEXT: vslidedown.vi v8, v8, 3
823 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
824 ; CHECK-NEXT: vslideup.vx v8, v9, a0
826 %res = call <vscale x 2 x i16> @llvm.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 3)
827 ret <vscale x 2 x i16> %res
830 declare <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, i32)
832 define <vscale x 4 x i16> @splice_nxv4i16_offset_zero(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
833 ; CHECK-LABEL: splice_nxv4i16_offset_zero:
836 %res = call <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 0)
837 ret <vscale x 4 x i16> %res
840 define <vscale x 4 x i16> @splice_nxv4i16_offset_negone(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
841 ; CHECK-LABEL: splice_nxv4i16_offset_negone:
843 ; CHECK-NEXT: csrr a0, vlenb
844 ; CHECK-NEXT: srli a0, a0, 1
845 ; CHECK-NEXT: addi a0, a0, -1
846 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
847 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
848 ; CHECK-NEXT: vslideup.vi v8, v9, 1
850 %res = call <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 -1)
851 ret <vscale x 4 x i16> %res
854 define <vscale x 4 x i16> @splice_nxv4i16_offset_min(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
855 ; CHECK-LABEL: splice_nxv4i16_offset_min:
857 ; CHECK-NEXT: csrr a0, vlenb
858 ; CHECK-NEXT: srli a0, a0, 1
859 ; CHECK-NEXT: addi a0, a0, -8
860 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
861 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
862 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
863 ; CHECK-NEXT: vslideup.vi v8, v9, 8
865 %res = call <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 -8)
866 ret <vscale x 4 x i16> %res
869 define <vscale x 4 x i16> @splice_nxv4i16_offset_max(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
870 ; CHECK-LABEL: splice_nxv4i16_offset_max:
872 ; CHECK-NEXT: csrr a0, vlenb
873 ; CHECK-NEXT: srli a0, a0, 1
874 ; CHECK-NEXT: addi a0, a0, -7
875 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
876 ; CHECK-NEXT: vslidedown.vi v8, v8, 7
877 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
878 ; CHECK-NEXT: vslideup.vx v8, v9, a0
880 %res = call <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 7)
881 ret <vscale x 4 x i16> %res
884 declare <vscale x 8 x i16> @llvm.vector.splice.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, i32)
886 define <vscale x 8 x i16> @splice_nxv8i16_offset_zero(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
887 ; CHECK-LABEL: splice_nxv8i16_offset_zero:
890 %res = call <vscale x 8 x i16> @llvm.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 0)
891 ret <vscale x 8 x i16> %res
894 define <vscale x 8 x i16> @splice_nxv8i16_offset_negone(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
895 ; CHECK-LABEL: splice_nxv8i16_offset_negone:
897 ; CHECK-NEXT: csrr a0, vlenb
898 ; CHECK-NEXT: addi a0, a0, -1
899 ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
900 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
901 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
902 ; CHECK-NEXT: vslideup.vi v8, v10, 1
904 %res = call <vscale x 8 x i16> @llvm.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -1)
905 ret <vscale x 8 x i16> %res
908 define <vscale x 8 x i16> @splice_nxv8i16_offset_min(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
909 ; CHECK-LABEL: splice_nxv8i16_offset_min:
911 ; CHECK-NEXT: csrr a0, vlenb
912 ; CHECK-NEXT: addi a0, a0, -16
913 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
914 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
915 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
916 ; CHECK-NEXT: vslideup.vi v8, v10, 16
918 %res = call <vscale x 8 x i16> @llvm.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -16)
919 ret <vscale x 8 x i16> %res
922 define <vscale x 8 x i16> @splice_nxv8i16_offset_max(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
923 ; CHECK-LABEL: splice_nxv8i16_offset_max:
925 ; CHECK-NEXT: csrr a0, vlenb
926 ; CHECK-NEXT: addi a0, a0, -15
927 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
928 ; CHECK-NEXT: vslidedown.vi v8, v8, 15
929 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
930 ; CHECK-NEXT: vslideup.vx v8, v10, a0
932 %res = call <vscale x 8 x i16> @llvm.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 15)
933 ret <vscale x 8 x i16> %res
936 declare <vscale x 16 x i16> @llvm.vector.splice.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, i32)
938 define <vscale x 16 x i16> @splice_nxv16i16_offset_zero(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) #0 {
939 ; CHECK-LABEL: splice_nxv16i16_offset_zero:
942 %res = call <vscale x 16 x i16> @llvm.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 0)
943 ret <vscale x 16 x i16> %res
946 define <vscale x 16 x i16> @splice_nxv16i16_offset_negone(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) #0 {
947 ; CHECK-LABEL: splice_nxv16i16_offset_negone:
949 ; CHECK-NEXT: csrr a0, vlenb
950 ; CHECK-NEXT: slli a0, a0, 1
951 ; CHECK-NEXT: addi a0, a0, -1
952 ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma
953 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
954 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
955 ; CHECK-NEXT: vslideup.vi v8, v12, 1
957 %res = call <vscale x 16 x i16> @llvm.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 -1)
958 ret <vscale x 16 x i16> %res
961 define <vscale x 16 x i16> @splice_nxv16i16_offset_min(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) #0 {
962 ; CHECK-LABEL: splice_nxv16i16_offset_min:
964 ; CHECK-NEXT: csrr a0, vlenb
965 ; CHECK-NEXT: slli a0, a0, 1
966 ; CHECK-NEXT: addi a0, a0, -32
967 ; CHECK-NEXT: li a1, 32
968 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
969 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
970 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
971 ; CHECK-NEXT: vslideup.vx v8, v12, a1
973 %res = call <vscale x 16 x i16> @llvm.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 -32)
974 ret <vscale x 16 x i16> %res
977 define <vscale x 16 x i16> @splice_nxv16i16_offset_max(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) #0 {
978 ; CHECK-LABEL: splice_nxv16i16_offset_max:
980 ; CHECK-NEXT: csrr a0, vlenb
981 ; CHECK-NEXT: slli a0, a0, 1
982 ; CHECK-NEXT: addi a0, a0, -31
983 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
984 ; CHECK-NEXT: vslidedown.vi v8, v8, 31
985 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
986 ; CHECK-NEXT: vslideup.vx v8, v12, a0
988 %res = call <vscale x 16 x i16> @llvm.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 31)
989 ret <vscale x 16 x i16> %res
992 declare <vscale x 32 x i16> @llvm.vector.splice.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, i32)
994 define <vscale x 32 x i16> @splice_nxv32i16_offset_zero(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) #0 {
995 ; CHECK-LABEL: splice_nxv32i16_offset_zero:
998 %res = call <vscale x 32 x i16> @llvm.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 0)
999 ret <vscale x 32 x i16> %res
1002 define <vscale x 32 x i16> @splice_nxv32i16_offset_negone(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) #0 {
1003 ; CHECK-LABEL: splice_nxv32i16_offset_negone:
1005 ; CHECK-NEXT: csrr a0, vlenb
1006 ; CHECK-NEXT: slli a0, a0, 2
1007 ; CHECK-NEXT: addi a0, a0, -1
1008 ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma
1009 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1010 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
1011 ; CHECK-NEXT: vslideup.vi v8, v16, 1
1013 %res = call <vscale x 32 x i16> @llvm.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 -1)
1014 ret <vscale x 32 x i16> %res
1017 define <vscale x 32 x i16> @splice_nxv32i16_offset_min(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) #0 {
1018 ; CHECK-LABEL: splice_nxv32i16_offset_min:
1020 ; CHECK-NEXT: csrr a0, vlenb
1021 ; CHECK-NEXT: slli a0, a0, 2
1022 ; CHECK-NEXT: addi a0, a0, -64
1023 ; CHECK-NEXT: li a1, 64
1024 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1025 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1026 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
1027 ; CHECK-NEXT: vslideup.vx v8, v16, a1
1029 %res = call <vscale x 32 x i16> @llvm.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 -64)
1030 ret <vscale x 32 x i16> %res
1033 define <vscale x 32 x i16> @splice_nxv32i16_offset_max(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) #0 {
1034 ; CHECK-LABEL: splice_nxv32i16_offset_max:
1036 ; CHECK-NEXT: csrr a0, vlenb
1037 ; CHECK-NEXT: slli a0, a0, 2
1038 ; CHECK-NEXT: addi a0, a0, -63
1039 ; CHECK-NEXT: li a1, 63
1040 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
1041 ; CHECK-NEXT: vslidedown.vx v8, v8, a1
1042 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
1043 ; CHECK-NEXT: vslideup.vx v8, v16, a0
1045 %res = call <vscale x 32 x i16> @llvm.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 63)
1046 ret <vscale x 32 x i16> %res
1049 declare <vscale x 1 x i32> @llvm.vector.splice.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, i32)
1051 define <vscale x 1 x i32> @splice_nxv1i32_offset_zero(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b) #0 {
1052 ; CHECK-LABEL: splice_nxv1i32_offset_zero:
1055 %res = call <vscale x 1 x i32> @llvm.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 0)
1056 ret <vscale x 1 x i32> %res
1059 define <vscale x 1 x i32> @splice_nxv1i32_offset_negone(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b) #0 {
1060 ; CHECK-LABEL: splice_nxv1i32_offset_negone:
1062 ; CHECK-NEXT: csrr a0, vlenb
1063 ; CHECK-NEXT: srli a0, a0, 3
1064 ; CHECK-NEXT: addi a0, a0, -1
1065 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
1066 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1067 ; CHECK-NEXT: vslideup.vi v8, v9, 1
1069 %res = call <vscale x 1 x i32> @llvm.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 -1)
1070 ret <vscale x 1 x i32> %res
1073 define <vscale x 1 x i32> @splice_nxv1i32_offset_min(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b) #0 {
1074 ; CHECK-LABEL: splice_nxv1i32_offset_min:
1076 ; CHECK-NEXT: csrr a0, vlenb
1077 ; CHECK-NEXT: srli a0, a0, 3
1078 ; CHECK-NEXT: addi a0, a0, -2
1079 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1080 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1081 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1082 ; CHECK-NEXT: vslideup.vi v8, v9, 2
1084 %res = call <vscale x 1 x i32> @llvm.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 -2)
1085 ret <vscale x 1 x i32> %res
1088 define <vscale x 1 x i32> @splice_nxv1i32_offset_max(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b) #0 {
1089 ; CHECK-LABEL: splice_nxv1i32_offset_max:
1091 ; CHECK-NEXT: csrr a0, vlenb
1092 ; CHECK-NEXT: srli a0, a0, 3
1093 ; CHECK-NEXT: addi a0, a0, -1
1094 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1095 ; CHECK-NEXT: vslidedown.vi v8, v8, 1
1096 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
1097 ; CHECK-NEXT: vslideup.vx v8, v9, a0
1099 %res = call <vscale x 1 x i32> @llvm.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 1)
1100 ret <vscale x 1 x i32> %res
1103 declare <vscale x 2 x i32> @llvm.vector.splice.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32)
1105 define <vscale x 2 x i32> @splice_nxv2i32_offset_zero(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
1106 ; CHECK-LABEL: splice_nxv2i32_offset_zero:
1109 %res = call <vscale x 2 x i32> @llvm.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 0)
1110 ret <vscale x 2 x i32> %res
1113 define <vscale x 2 x i32> @splice_nxv2i32_offset_negone(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
1114 ; CHECK-LABEL: splice_nxv2i32_offset_negone:
1116 ; CHECK-NEXT: csrr a0, vlenb
1117 ; CHECK-NEXT: srli a0, a0, 2
1118 ; CHECK-NEXT: addi a0, a0, -1
1119 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
1120 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1121 ; CHECK-NEXT: vslideup.vi v8, v9, 1
1123 %res = call <vscale x 2 x i32> @llvm.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 -1)
1124 ret <vscale x 2 x i32> %res
1127 define <vscale x 2 x i32> @splice_nxv2i32_offset_min(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
1128 ; CHECK-LABEL: splice_nxv2i32_offset_min:
1130 ; CHECK-NEXT: csrr a0, vlenb
1131 ; CHECK-NEXT: srli a0, a0, 2
1132 ; CHECK-NEXT: addi a0, a0, -4
1133 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1134 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1135 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1136 ; CHECK-NEXT: vslideup.vi v8, v9, 4
1138 %res = call <vscale x 2 x i32> @llvm.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 -4)
1139 ret <vscale x 2 x i32> %res
1142 define <vscale x 2 x i32> @splice_nxv2i32_offset_max(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
1143 ; CHECK-LABEL: splice_nxv2i32_offset_max:
1145 ; CHECK-NEXT: csrr a0, vlenb
1146 ; CHECK-NEXT: srli a0, a0, 2
1147 ; CHECK-NEXT: addi a0, a0, -3
1148 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1149 ; CHECK-NEXT: vslidedown.vi v8, v8, 3
1150 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
1151 ; CHECK-NEXT: vslideup.vx v8, v9, a0
1153 %res = call <vscale x 2 x i32> @llvm.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 3)
1154 ret <vscale x 2 x i32> %res
1157 declare <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
1159 define <vscale x 4 x i32> @splice_nxv4i32_offset_zero(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
1160 ; CHECK-LABEL: splice_nxv4i32_offset_zero:
1163 %res = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 0)
1164 ret <vscale x 4 x i32> %res
1167 define <vscale x 4 x i32> @splice_nxv4i32_offset_negone(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
1168 ; CHECK-LABEL: splice_nxv4i32_offset_negone:
1170 ; CHECK-NEXT: csrr a0, vlenb
1171 ; CHECK-NEXT: srli a0, a0, 1
1172 ; CHECK-NEXT: addi a0, a0, -1
1173 ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
1174 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1175 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1176 ; CHECK-NEXT: vslideup.vi v8, v10, 1
1178 %res = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -1)
1179 ret <vscale x 4 x i32> %res
1182 define <vscale x 4 x i32> @splice_nxv4i32_offset_min(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
1183 ; CHECK-LABEL: splice_nxv4i32_offset_min:
1185 ; CHECK-NEXT: csrr a0, vlenb
1186 ; CHECK-NEXT: srli a0, a0, 1
1187 ; CHECK-NEXT: addi a0, a0, -8
1188 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1189 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1190 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1191 ; CHECK-NEXT: vslideup.vi v8, v10, 8
1193 %res = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -8)
1194 ret <vscale x 4 x i32> %res
1197 define <vscale x 4 x i32> @splice_nxv4i32_offset_max(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
1198 ; CHECK-LABEL: splice_nxv4i32_offset_max:
1200 ; CHECK-NEXT: csrr a0, vlenb
1201 ; CHECK-NEXT: srli a0, a0, 1
1202 ; CHECK-NEXT: addi a0, a0, -7
1203 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1204 ; CHECK-NEXT: vslidedown.vi v8, v8, 7
1205 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
1206 ; CHECK-NEXT: vslideup.vx v8, v10, a0
1208 %res = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 7)
1209 ret <vscale x 4 x i32> %res
1212 declare <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, i32)
1214 define <vscale x 8 x i32> @splice_nxv8i32_offset_zero(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) #0 {
1215 ; CHECK-LABEL: splice_nxv8i32_offset_zero:
1218 %res = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 0)
1219 ret <vscale x 8 x i32> %res
1222 define <vscale x 8 x i32> @splice_nxv8i32_offset_negone(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) #0 {
1223 ; CHECK-LABEL: splice_nxv8i32_offset_negone:
1225 ; CHECK-NEXT: csrr a0, vlenb
1226 ; CHECK-NEXT: addi a0, a0, -1
1227 ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma
1228 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1229 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1230 ; CHECK-NEXT: vslideup.vi v8, v12, 1
1232 %res = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 -1)
1233 ret <vscale x 8 x i32> %res
1236 define <vscale x 8 x i32> @splice_nxv8i32_offset_min(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) #0 {
1237 ; CHECK-LABEL: splice_nxv8i32_offset_min:
1239 ; CHECK-NEXT: csrr a0, vlenb
1240 ; CHECK-NEXT: addi a0, a0, -16
1241 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
1242 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1243 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1244 ; CHECK-NEXT: vslideup.vi v8, v12, 16
1246 %res = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 -16)
1247 ret <vscale x 8 x i32> %res
1250 define <vscale x 8 x i32> @splice_nxv8i32_offset_max(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) #0 {
1251 ; CHECK-LABEL: splice_nxv8i32_offset_max:
1253 ; CHECK-NEXT: csrr a0, vlenb
1254 ; CHECK-NEXT: addi a0, a0, -15
1255 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1256 ; CHECK-NEXT: vslidedown.vi v8, v8, 15
1257 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1258 ; CHECK-NEXT: vslideup.vx v8, v12, a0
1260 %res = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 15)
1261 ret <vscale x 8 x i32> %res
1264 declare <vscale x 16 x i32> @llvm.vector.splice.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, i32)
1266 define <vscale x 16 x i32> @splice_nxv16i32_offset_zero(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) #0 {
1267 ; CHECK-LABEL: splice_nxv16i32_offset_zero:
1270 %res = call <vscale x 16 x i32> @llvm.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 0)
1271 ret <vscale x 16 x i32> %res
1274 define <vscale x 16 x i32> @splice_nxv16i32_offset_negone(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) #0 {
1275 ; CHECK-LABEL: splice_nxv16i32_offset_negone:
1277 ; CHECK-NEXT: csrr a0, vlenb
1278 ; CHECK-NEXT: slli a0, a0, 1
1279 ; CHECK-NEXT: addi a0, a0, -1
1280 ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
1281 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1282 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
1283 ; CHECK-NEXT: vslideup.vi v8, v16, 1
1285 %res = call <vscale x 16 x i32> @llvm.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 -1)
1286 ret <vscale x 16 x i32> %res
1289 define <vscale x 16 x i32> @splice_nxv16i32_offset_min(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) #0 {
1290 ; CHECK-LABEL: splice_nxv16i32_offset_min:
1292 ; CHECK-NEXT: csrr a0, vlenb
1293 ; CHECK-NEXT: slli a0, a0, 1
1294 ; CHECK-NEXT: addi a0, a0, -32
1295 ; CHECK-NEXT: li a1, 32
1296 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1297 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1298 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
1299 ; CHECK-NEXT: vslideup.vx v8, v16, a1
1301 %res = call <vscale x 16 x i32> @llvm.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 -32)
1302 ret <vscale x 16 x i32> %res
1305 define <vscale x 16 x i32> @splice_nxv16i32_offset_max(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) #0 {
1306 ; CHECK-LABEL: splice_nxv16i32_offset_max:
1308 ; CHECK-NEXT: csrr a0, vlenb
1309 ; CHECK-NEXT: slli a0, a0, 1
1310 ; CHECK-NEXT: addi a0, a0, -31
1311 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1312 ; CHECK-NEXT: vslidedown.vi v8, v8, 31
1313 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
1314 ; CHECK-NEXT: vslideup.vx v8, v16, a0
1316 %res = call <vscale x 16 x i32> @llvm.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 31)
1317 ret <vscale x 16 x i32> %res
1320 declare <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i32)
1322 define <vscale x 1 x i64> @splice_nxv1i64_offset_zero(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b) #0 {
1323 ; CHECK-LABEL: splice_nxv1i64_offset_zero:
1326 %res = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 0)
1327 ret <vscale x 1 x i64> %res
1330 define <vscale x 1 x i64> @splice_nxv1i64_offset_negone(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b) #0 {
1331 ; CHECK-LABEL: splice_nxv1i64_offset_negone:
1333 ; CHECK-NEXT: csrr a0, vlenb
1334 ; CHECK-NEXT: srli a0, a0, 3
1335 ; CHECK-NEXT: addi a0, a0, -1
1336 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1337 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1338 ; CHECK-NEXT: vslideup.vi v8, v9, 1
1340 %res = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 -1)
1341 ret <vscale x 1 x i64> %res
1344 define <vscale x 1 x i64> @splice_nxv1i64_offset_min(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b) #0 {
1345 ; CHECK-LABEL: splice_nxv1i64_offset_min:
1347 ; CHECK-NEXT: csrr a0, vlenb
1348 ; CHECK-NEXT: srli a0, a0, 3
1349 ; CHECK-NEXT: addi a0, a0, -2
1350 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
1351 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1352 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
1353 ; CHECK-NEXT: vslideup.vi v8, v9, 2
1355 %res = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 -2)
1356 ret <vscale x 1 x i64> %res
1359 define <vscale x 1 x i64> @splice_nxv1i64_offset_max(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b) #0 {
1360 ; CHECK-LABEL: splice_nxv1i64_offset_max:
1362 ; CHECK-NEXT: csrr a0, vlenb
1363 ; CHECK-NEXT: srli a0, a0, 3
1364 ; CHECK-NEXT: addi a0, a0, -1
1365 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1366 ; CHECK-NEXT: vslidedown.vi v8, v8, 1
1367 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1368 ; CHECK-NEXT: vslideup.vx v8, v9, a0
1370 %res = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 1)
1371 ret <vscale x 1 x i64> %res
1374 declare <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i32)
1376 define <vscale x 2 x i64> @splice_nxv2i64_offset_zero(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
1377 ; CHECK-LABEL: splice_nxv2i64_offset_zero:
1380 %res = call <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 0)
1381 ret <vscale x 2 x i64> %res
1384 define <vscale x 2 x i64> @splice_nxv2i64_offset_negone(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
1385 ; CHECK-LABEL: splice_nxv2i64_offset_negone:
1387 ; CHECK-NEXT: csrr a0, vlenb
1388 ; CHECK-NEXT: srli a0, a0, 2
1389 ; CHECK-NEXT: addi a0, a0, -1
1390 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
1391 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1392 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
1393 ; CHECK-NEXT: vslideup.vi v8, v10, 1
1395 %res = call <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -1)
1396 ret <vscale x 2 x i64> %res
1399 define <vscale x 2 x i64> @splice_nxv2i64_offset_min(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
1400 ; CHECK-LABEL: splice_nxv2i64_offset_min:
1402 ; CHECK-NEXT: csrr a0, vlenb
1403 ; CHECK-NEXT: srli a0, a0, 2
1404 ; CHECK-NEXT: addi a0, a0, -4
1405 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1406 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1407 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
1408 ; CHECK-NEXT: vslideup.vi v8, v10, 4
1410 %res = call <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -4)
1411 ret <vscale x 2 x i64> %res
1414 define <vscale x 2 x i64> @splice_nxv2i64_offset_max(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
1415 ; CHECK-LABEL: splice_nxv2i64_offset_max:
1417 ; CHECK-NEXT: csrr a0, vlenb
1418 ; CHECK-NEXT: srli a0, a0, 2
1419 ; CHECK-NEXT: addi a0, a0, -3
1420 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1421 ; CHECK-NEXT: vslidedown.vi v8, v8, 3
1422 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1423 ; CHECK-NEXT: vslideup.vx v8, v10, a0
1425 %res = call <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 3)
1426 ret <vscale x 2 x i64> %res
1429 declare <vscale x 4 x i64> @llvm.vector.splice.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, i32)
1431 define <vscale x 4 x i64> @splice_nxv4i64_offset_zero(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) #0 {
1432 ; CHECK-LABEL: splice_nxv4i64_offset_zero:
1435 %res = call <vscale x 4 x i64> @llvm.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 0)
1436 ret <vscale x 4 x i64> %res
1439 define <vscale x 4 x i64> @splice_nxv4i64_offset_negone(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) #0 {
1440 ; CHECK-LABEL: splice_nxv4i64_offset_negone:
1442 ; CHECK-NEXT: csrr a0, vlenb
1443 ; CHECK-NEXT: srli a0, a0, 1
1444 ; CHECK-NEXT: addi a0, a0, -1
1445 ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma
1446 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1447 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
1448 ; CHECK-NEXT: vslideup.vi v8, v12, 1
1450 %res = call <vscale x 4 x i64> @llvm.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 -1)
1451 ret <vscale x 4 x i64> %res
1454 define <vscale x 4 x i64> @splice_nxv4i64_offset_min(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) #0 {
1455 ; CHECK-LABEL: splice_nxv4i64_offset_min:
1457 ; CHECK-NEXT: csrr a0, vlenb
1458 ; CHECK-NEXT: srli a0, a0, 1
1459 ; CHECK-NEXT: addi a0, a0, -8
1460 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
1461 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1462 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
1463 ; CHECK-NEXT: vslideup.vi v8, v12, 8
1465 %res = call <vscale x 4 x i64> @llvm.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 -8)
1466 ret <vscale x 4 x i64> %res
1469 define <vscale x 4 x i64> @splice_nxv4i64_offset_max(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) #0 {
1470 ; CHECK-LABEL: splice_nxv4i64_offset_max:
1472 ; CHECK-NEXT: csrr a0, vlenb
1473 ; CHECK-NEXT: srli a0, a0, 1
1474 ; CHECK-NEXT: addi a0, a0, -7
1475 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1476 ; CHECK-NEXT: vslidedown.vi v8, v8, 7
1477 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1478 ; CHECK-NEXT: vslideup.vx v8, v12, a0
1480 %res = call <vscale x 4 x i64> @llvm.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 7)
1481 ret <vscale x 4 x i64> %res
1484 declare <vscale x 8 x i64> @llvm.vector.splice.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, i32)
1486 define <vscale x 8 x i64> @splice_nxv8i64_offset_zero(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) #0 {
1487 ; CHECK-LABEL: splice_nxv8i64_offset_zero:
1490 %res = call <vscale x 8 x i64> @llvm.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 0)
1491 ret <vscale x 8 x i64> %res
1494 define <vscale x 8 x i64> @splice_nxv8i64_offset_negone(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) #0 {
1495 ; CHECK-LABEL: splice_nxv8i64_offset_negone:
1497 ; CHECK-NEXT: csrr a0, vlenb
1498 ; CHECK-NEXT: addi a0, a0, -1
1499 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
1500 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1501 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1502 ; CHECK-NEXT: vslideup.vi v8, v16, 1
1504 %res = call <vscale x 8 x i64> @llvm.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 -1)
1505 ret <vscale x 8 x i64> %res
1508 define <vscale x 8 x i64> @splice_nxv8i64_offset_min(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) #0 {
1509 ; CHECK-LABEL: splice_nxv8i64_offset_min:
1511 ; CHECK-NEXT: csrr a0, vlenb
1512 ; CHECK-NEXT: addi a0, a0, -16
1513 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
1514 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1515 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1516 ; CHECK-NEXT: vslideup.vi v8, v16, 16
1518 %res = call <vscale x 8 x i64> @llvm.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 -16)
1519 ret <vscale x 8 x i64> %res
1522 define <vscale x 8 x i64> @splice_nxv8i64_offset_max(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) #0 {
1523 ; CHECK-LABEL: splice_nxv8i64_offset_max:
1525 ; CHECK-NEXT: csrr a0, vlenb
1526 ; CHECK-NEXT: addi a0, a0, -15
1527 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1528 ; CHECK-NEXT: vslidedown.vi v8, v8, 15
1529 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1530 ; CHECK-NEXT: vslideup.vx v8, v16, a0
1532 %res = call <vscale x 8 x i64> @llvm.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 15)
1533 ret <vscale x 8 x i64> %res
1536 declare <vscale x 1 x half> @llvm.vector.splice.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, i32)
1538 define <vscale x 1 x half> @splice_nxv1f16_offset_zero(<vscale x 1 x half> %a, <vscale x 1 x half> %b) #0 {
1539 ; CHECK-LABEL: splice_nxv1f16_offset_zero:
1542 %res = call <vscale x 1 x half> @llvm.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 0)
1543 ret <vscale x 1 x half> %res
1546 define <vscale x 1 x half> @splice_nxv1f16_offset_negone(<vscale x 1 x half> %a, <vscale x 1 x half> %b) #0 {
1547 ; CHECK-LABEL: splice_nxv1f16_offset_negone:
1549 ; CHECK-NEXT: csrr a0, vlenb
1550 ; CHECK-NEXT: srli a0, a0, 3
1551 ; CHECK-NEXT: addi a0, a0, -1
1552 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
1553 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1554 ; CHECK-NEXT: vslideup.vi v8, v9, 1
1556 %res = call <vscale x 1 x half> @llvm.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 -1)
1557 ret <vscale x 1 x half> %res
1560 define <vscale x 1 x half> @splice_nxv1f16_offset_min(<vscale x 1 x half> %a, <vscale x 1 x half> %b) #0 {
1561 ; CHECK-LABEL: splice_nxv1f16_offset_min:
1563 ; CHECK-NEXT: csrr a0, vlenb
1564 ; CHECK-NEXT: srli a0, a0, 3
1565 ; CHECK-NEXT: addi a0, a0, -2
1566 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
1567 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1568 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
1569 ; CHECK-NEXT: vslideup.vi v8, v9, 2
1571 %res = call <vscale x 1 x half> @llvm.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 -2)
1572 ret <vscale x 1 x half> %res
1575 define <vscale x 1 x half> @splice_nxv1f16_offset_max(<vscale x 1 x half> %a, <vscale x 1 x half> %b) #0 {
1576 ; CHECK-LABEL: splice_nxv1f16_offset_max:
1578 ; CHECK-NEXT: csrr a0, vlenb
1579 ; CHECK-NEXT: srli a0, a0, 3
1580 ; CHECK-NEXT: addi a0, a0, -1
1581 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1582 ; CHECK-NEXT: vslidedown.vi v8, v8, 1
1583 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
1584 ; CHECK-NEXT: vslideup.vx v8, v9, a0
1586 %res = call <vscale x 1 x half> @llvm.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 1)
1587 ret <vscale x 1 x half> %res
1590 declare <vscale x 2 x half> @llvm.vector.splice.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, i32)
1592 define <vscale x 2 x half> @splice_nxv2f16_offset_zero(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
1593 ; CHECK-LABEL: splice_nxv2f16_offset_zero:
1596 %res = call <vscale x 2 x half> @llvm.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 0)
1597 ret <vscale x 2 x half> %res
1600 define <vscale x 2 x half> @splice_nxv2f16_offset_negone(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
1601 ; CHECK-LABEL: splice_nxv2f16_offset_negone:
1603 ; CHECK-NEXT: csrr a0, vlenb
1604 ; CHECK-NEXT: srli a0, a0, 2
1605 ; CHECK-NEXT: addi a0, a0, -1
1606 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
1607 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1608 ; CHECK-NEXT: vslideup.vi v8, v9, 1
1610 %res = call <vscale x 2 x half> @llvm.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -1)
1611 ret <vscale x 2 x half> %res
1614 define <vscale x 2 x half> @splice_nxv2f16_offset_min(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
1615 ; CHECK-LABEL: splice_nxv2f16_offset_min:
1617 ; CHECK-NEXT: csrr a0, vlenb
1618 ; CHECK-NEXT: srli a0, a0, 2
1619 ; CHECK-NEXT: addi a0, a0, -4
1620 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
1621 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1622 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
1623 ; CHECK-NEXT: vslideup.vi v8, v9, 4
1625 %res = call <vscale x 2 x half> @llvm.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -4)
1626 ret <vscale x 2 x half> %res
1629 define <vscale x 2 x half> @splice_nxv2f16_offset_max(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
1630 ; CHECK-LABEL: splice_nxv2f16_offset_max:
1632 ; CHECK-NEXT: csrr a0, vlenb
1633 ; CHECK-NEXT: srli a0, a0, 2
1634 ; CHECK-NEXT: addi a0, a0, -3
1635 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1636 ; CHECK-NEXT: vslidedown.vi v8, v8, 3
1637 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
1638 ; CHECK-NEXT: vslideup.vx v8, v9, a0
1640 %res = call <vscale x 2 x half> @llvm.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 3)
1641 ret <vscale x 2 x half> %res
1644 declare <vscale x 4 x half> @llvm.vector.splice.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, i32)
1646 define <vscale x 4 x half> @splice_nxv4f16_offset_zero(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
1647 ; CHECK-LABEL: splice_nxv4f16_offset_zero:
1650 %res = call <vscale x 4 x half> @llvm.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 0)
1651 ret <vscale x 4 x half> %res
1654 define <vscale x 4 x half> @splice_nxv4f16_offset_negone(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
1655 ; CHECK-LABEL: splice_nxv4f16_offset_negone:
1657 ; CHECK-NEXT: csrr a0, vlenb
1658 ; CHECK-NEXT: srli a0, a0, 1
1659 ; CHECK-NEXT: addi a0, a0, -1
1660 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
1661 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1662 ; CHECK-NEXT: vslideup.vi v8, v9, 1
1664 %res = call <vscale x 4 x half> @llvm.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -1)
1665 ret <vscale x 4 x half> %res
1668 define <vscale x 4 x half> @splice_nxv4f16_offset_min(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
1669 ; CHECK-LABEL: splice_nxv4f16_offset_min:
1671 ; CHECK-NEXT: csrr a0, vlenb
1672 ; CHECK-NEXT: srli a0, a0, 1
1673 ; CHECK-NEXT: addi a0, a0, -8
1674 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1675 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1676 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
1677 ; CHECK-NEXT: vslideup.vi v8, v9, 8
1679 %res = call <vscale x 4 x half> @llvm.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -8)
1680 ret <vscale x 4 x half> %res
1683 define <vscale x 4 x half> @splice_nxv4f16_offset_max(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
1684 ; CHECK-LABEL: splice_nxv4f16_offset_max:
1686 ; CHECK-NEXT: csrr a0, vlenb
1687 ; CHECK-NEXT: srli a0, a0, 1
1688 ; CHECK-NEXT: addi a0, a0, -7
1689 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1690 ; CHECK-NEXT: vslidedown.vi v8, v8, 7
1691 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
1692 ; CHECK-NEXT: vslideup.vx v8, v9, a0
1694 %res = call <vscale x 4 x half> @llvm.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 7)
1695 ret <vscale x 4 x half> %res
1698 declare <vscale x 8 x half> @llvm.vector.splice.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, i32)
1700 define <vscale x 8 x half> @splice_nxv8f16_offset_zero(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
1701 ; CHECK-LABEL: splice_nxv8f16_offset_zero:
1704 %res = call <vscale x 8 x half> @llvm.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 0)
1705 ret <vscale x 8 x half> %res
1708 define <vscale x 8 x half> @splice_nxv8f16_offset_negone(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
1709 ; CHECK-LABEL: splice_nxv8f16_offset_negone:
1711 ; CHECK-NEXT: csrr a0, vlenb
1712 ; CHECK-NEXT: addi a0, a0, -1
1713 ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
1714 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1715 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1716 ; CHECK-NEXT: vslideup.vi v8, v10, 1
1718 %res = call <vscale x 8 x half> @llvm.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -1)
1719 ret <vscale x 8 x half> %res
1722 define <vscale x 8 x half> @splice_nxv8f16_offset_min(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
1723 ; CHECK-LABEL: splice_nxv8f16_offset_min:
1725 ; CHECK-NEXT: csrr a0, vlenb
1726 ; CHECK-NEXT: addi a0, a0, -16
1727 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1728 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1729 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1730 ; CHECK-NEXT: vslideup.vi v8, v10, 16
1732 %res = call <vscale x 8 x half> @llvm.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -16)
1733 ret <vscale x 8 x half> %res
1736 define <vscale x 8 x half> @splice_nxv8f16_offset_max(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
1737 ; CHECK-LABEL: splice_nxv8f16_offset_max:
1739 ; CHECK-NEXT: csrr a0, vlenb
1740 ; CHECK-NEXT: addi a0, a0, -15
1741 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1742 ; CHECK-NEXT: vslidedown.vi v8, v8, 15
1743 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1744 ; CHECK-NEXT: vslideup.vx v8, v10, a0
1746 %res = call <vscale x 8 x half> @llvm.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 15)
1747 ret <vscale x 8 x half> %res
1750 declare <vscale x 16 x half> @llvm.vector.splice.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, i32)
1752 define <vscale x 16 x half> @splice_nxv16f16_offset_zero(<vscale x 16 x half> %a, <vscale x 16 x half> %b) #0 {
1753 ; CHECK-LABEL: splice_nxv16f16_offset_zero:
1756 %res = call <vscale x 16 x half> @llvm.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 0)
1757 ret <vscale x 16 x half> %res
1760 define <vscale x 16 x half> @splice_nxv16f16_offset_negone(<vscale x 16 x half> %a, <vscale x 16 x half> %b) #0 {
1761 ; CHECK-LABEL: splice_nxv16f16_offset_negone:
1763 ; CHECK-NEXT: csrr a0, vlenb
1764 ; CHECK-NEXT: slli a0, a0, 1
1765 ; CHECK-NEXT: addi a0, a0, -1
1766 ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma
1767 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1768 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
1769 ; CHECK-NEXT: vslideup.vi v8, v12, 1
1771 %res = call <vscale x 16 x half> @llvm.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 -1)
1772 ret <vscale x 16 x half> %res
1775 define <vscale x 16 x half> @splice_nxv16f16_offset_min(<vscale x 16 x half> %a, <vscale x 16 x half> %b) #0 {
1776 ; CHECK-LABEL: splice_nxv16f16_offset_min:
1778 ; CHECK-NEXT: csrr a0, vlenb
1779 ; CHECK-NEXT: slli a0, a0, 1
1780 ; CHECK-NEXT: addi a0, a0, -32
1781 ; CHECK-NEXT: li a1, 32
1782 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1783 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1784 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
1785 ; CHECK-NEXT: vslideup.vx v8, v12, a1
1787 %res = call <vscale x 16 x half> @llvm.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 -32)
1788 ret <vscale x 16 x half> %res
1791 define <vscale x 16 x half> @splice_nxv16f16_offset_max(<vscale x 16 x half> %a, <vscale x 16 x half> %b) #0 {
1792 ; CHECK-LABEL: splice_nxv16f16_offset_max:
1794 ; CHECK-NEXT: csrr a0, vlenb
1795 ; CHECK-NEXT: slli a0, a0, 1
1796 ; CHECK-NEXT: addi a0, a0, -31
1797 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1798 ; CHECK-NEXT: vslidedown.vi v8, v8, 31
1799 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
1800 ; CHECK-NEXT: vslideup.vx v8, v12, a0
1802 %res = call <vscale x 16 x half> @llvm.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 31)
1803 ret <vscale x 16 x half> %res
1806 declare <vscale x 32 x half> @llvm.vector.splice.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, i32)
1808 define <vscale x 32 x half> @splice_nxv32f16_offset_zero(<vscale x 32 x half> %a, <vscale x 32 x half> %b) #0 {
1809 ; CHECK-LABEL: splice_nxv32f16_offset_zero:
1812 %res = call <vscale x 32 x half> @llvm.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 0)
1813 ret <vscale x 32 x half> %res
1816 define <vscale x 32 x half> @splice_nxv32f16_offset_negone(<vscale x 32 x half> %a, <vscale x 32 x half> %b) #0 {
1817 ; CHECK-LABEL: splice_nxv32f16_offset_negone:
1819 ; CHECK-NEXT: csrr a0, vlenb
1820 ; CHECK-NEXT: slli a0, a0, 2
1821 ; CHECK-NEXT: addi a0, a0, -1
1822 ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma
1823 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1824 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
1825 ; CHECK-NEXT: vslideup.vi v8, v16, 1
1827 %res = call <vscale x 32 x half> @llvm.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 -1)
1828 ret <vscale x 32 x half> %res
1831 define <vscale x 32 x half> @splice_nxv32f16_offset_min(<vscale x 32 x half> %a, <vscale x 32 x half> %b) #0 {
1832 ; CHECK-LABEL: splice_nxv32f16_offset_min:
1834 ; CHECK-NEXT: csrr a0, vlenb
1835 ; CHECK-NEXT: slli a0, a0, 2
1836 ; CHECK-NEXT: addi a0, a0, -64
1837 ; CHECK-NEXT: li a1, 64
1838 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1839 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1840 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
1841 ; CHECK-NEXT: vslideup.vx v8, v16, a1
1843 %res = call <vscale x 32 x half> @llvm.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 -64)
1844 ret <vscale x 32 x half> %res
1847 define <vscale x 32 x half> @splice_nxv32f16_offset_max(<vscale x 32 x half> %a, <vscale x 32 x half> %b) #0 {
1848 ; CHECK-LABEL: splice_nxv32f16_offset_max:
1850 ; CHECK-NEXT: csrr a0, vlenb
1851 ; CHECK-NEXT: slli a0, a0, 2
1852 ; CHECK-NEXT: addi a0, a0, -63
1853 ; CHECK-NEXT: li a1, 63
1854 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
1855 ; CHECK-NEXT: vslidedown.vx v8, v8, a1
1856 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
1857 ; CHECK-NEXT: vslideup.vx v8, v16, a0
1859 %res = call <vscale x 32 x half> @llvm.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 63)
1860 ret <vscale x 32 x half> %res
1863 declare <vscale x 1 x float> @llvm.vector.splice.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, i32)
1865 define <vscale x 1 x float> @splice_nxv1f32_offset_zero(<vscale x 1 x float> %a, <vscale x 1 x float> %b) #0 {
1866 ; CHECK-LABEL: splice_nxv1f32_offset_zero:
1869 %res = call <vscale x 1 x float> @llvm.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 0)
1870 ret <vscale x 1 x float> %res
1873 define <vscale x 1 x float> @splice_nxv1f32_offset_negone(<vscale x 1 x float> %a, <vscale x 1 x float> %b) #0 {
1874 ; CHECK-LABEL: splice_nxv1f32_offset_negone:
1876 ; CHECK-NEXT: csrr a0, vlenb
1877 ; CHECK-NEXT: srli a0, a0, 3
1878 ; CHECK-NEXT: addi a0, a0, -1
1879 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
1880 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1881 ; CHECK-NEXT: vslideup.vi v8, v9, 1
1883 %res = call <vscale x 1 x float> @llvm.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 -1)
1884 ret <vscale x 1 x float> %res
1887 define <vscale x 1 x float> @splice_nxv1f32_offset_min(<vscale x 1 x float> %a, <vscale x 1 x float> %b) #0 {
1888 ; CHECK-LABEL: splice_nxv1f32_offset_min:
1890 ; CHECK-NEXT: csrr a0, vlenb
1891 ; CHECK-NEXT: srli a0, a0, 3
1892 ; CHECK-NEXT: addi a0, a0, -2
1893 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1894 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1895 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1896 ; CHECK-NEXT: vslideup.vi v8, v9, 2
1898 %res = call <vscale x 1 x float> @llvm.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 -2)
1899 ret <vscale x 1 x float> %res
1902 define <vscale x 1 x float> @splice_nxv1f32_offset_max(<vscale x 1 x float> %a, <vscale x 1 x float> %b) #0 {
1903 ; CHECK-LABEL: splice_nxv1f32_offset_max:
1905 ; CHECK-NEXT: csrr a0, vlenb
1906 ; CHECK-NEXT: srli a0, a0, 3
1907 ; CHECK-NEXT: addi a0, a0, -1
1908 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1909 ; CHECK-NEXT: vslidedown.vi v8, v8, 1
1910 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
1911 ; CHECK-NEXT: vslideup.vx v8, v9, a0
1913 %res = call <vscale x 1 x float> @llvm.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 1)
1914 ret <vscale x 1 x float> %res
1917 declare <vscale x 2 x float> @llvm.vector.splice.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i32)
1919 define <vscale x 2 x float> @splice_nxv2f32_offset_zero(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
1920 ; CHECK-LABEL: splice_nxv2f32_offset_zero:
1923 %res = call <vscale x 2 x float> @llvm.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 0)
1924 ret <vscale x 2 x float> %res
1927 define <vscale x 2 x float> @splice_nxv2f32_offset_negone(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
1928 ; CHECK-LABEL: splice_nxv2f32_offset_negone:
1930 ; CHECK-NEXT: csrr a0, vlenb
1931 ; CHECK-NEXT: srli a0, a0, 2
1932 ; CHECK-NEXT: addi a0, a0, -1
1933 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
1934 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1935 ; CHECK-NEXT: vslideup.vi v8, v9, 1
1937 %res = call <vscale x 2 x float> @llvm.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -1)
1938 ret <vscale x 2 x float> %res
1941 define <vscale x 2 x float> @splice_nxv2f32_offset_min(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
1942 ; CHECK-LABEL: splice_nxv2f32_offset_min:
1944 ; CHECK-NEXT: csrr a0, vlenb
1945 ; CHECK-NEXT: srli a0, a0, 2
1946 ; CHECK-NEXT: addi a0, a0, -4
1947 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1948 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1949 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1950 ; CHECK-NEXT: vslideup.vi v8, v9, 4
1952 %res = call <vscale x 2 x float> @llvm.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -4)
1953 ret <vscale x 2 x float> %res
1956 define <vscale x 2 x float> @splice_nxv2f32_offset_max(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
1957 ; CHECK-LABEL: splice_nxv2f32_offset_max:
1959 ; CHECK-NEXT: csrr a0, vlenb
1960 ; CHECK-NEXT: srli a0, a0, 2
1961 ; CHECK-NEXT: addi a0, a0, -3
1962 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1963 ; CHECK-NEXT: vslidedown.vi v8, v8, 3
1964 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
1965 ; CHECK-NEXT: vslideup.vx v8, v9, a0
1967 %res = call <vscale x 2 x float> @llvm.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 3)
1968 ret <vscale x 2 x float> %res
1971 declare <vscale x 4 x float> @llvm.vector.splice.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, i32)
1973 define <vscale x 4 x float> @splice_nxv4f32_offset_zero(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
1974 ; CHECK-LABEL: splice_nxv4f32_offset_zero:
1977 %res = call <vscale x 4 x float> @llvm.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 0)
1978 ret <vscale x 4 x float> %res
1981 define <vscale x 4 x float> @splice_nxv4f32_offset_negone(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
1982 ; CHECK-LABEL: splice_nxv4f32_offset_negone:
1984 ; CHECK-NEXT: csrr a0, vlenb
1985 ; CHECK-NEXT: srli a0, a0, 1
1986 ; CHECK-NEXT: addi a0, a0, -1
1987 ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
1988 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1989 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1990 ; CHECK-NEXT: vslideup.vi v8, v10, 1
1992 %res = call <vscale x 4 x float> @llvm.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -1)
1993 ret <vscale x 4 x float> %res
1996 define <vscale x 4 x float> @splice_nxv4f32_offset_min(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
1997 ; CHECK-LABEL: splice_nxv4f32_offset_min:
1999 ; CHECK-NEXT: csrr a0, vlenb
2000 ; CHECK-NEXT: srli a0, a0, 1
2001 ; CHECK-NEXT: addi a0, a0, -8
2002 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
2003 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
2004 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
2005 ; CHECK-NEXT: vslideup.vi v8, v10, 8
2007 %res = call <vscale x 4 x float> @llvm.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -8)
2008 ret <vscale x 4 x float> %res
2011 define <vscale x 4 x float> @splice_nxv4f32_offset_max(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
2012 ; CHECK-LABEL: splice_nxv4f32_offset_max:
2014 ; CHECK-NEXT: csrr a0, vlenb
2015 ; CHECK-NEXT: srli a0, a0, 1
2016 ; CHECK-NEXT: addi a0, a0, -7
2017 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2018 ; CHECK-NEXT: vslidedown.vi v8, v8, 7
2019 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
2020 ; CHECK-NEXT: vslideup.vx v8, v10, a0
2022 %res = call <vscale x 4 x float> @llvm.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 7)
2023 ret <vscale x 4 x float> %res
2026 declare <vscale x 8 x float> @llvm.vector.splice.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, i32)
2028 define <vscale x 8 x float> @splice_nxv8f32_offset_zero(<vscale x 8 x float> %a, <vscale x 8 x float> %b) #0 {
2029 ; CHECK-LABEL: splice_nxv8f32_offset_zero:
2032 %res = call <vscale x 8 x float> @llvm.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 0)
2033 ret <vscale x 8 x float> %res
2036 define <vscale x 8 x float> @splice_nxv8f32_offset_negone(<vscale x 8 x float> %a, <vscale x 8 x float> %b) #0 {
2037 ; CHECK-LABEL: splice_nxv8f32_offset_negone:
2039 ; CHECK-NEXT: csrr a0, vlenb
2040 ; CHECK-NEXT: addi a0, a0, -1
2041 ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma
2042 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
2043 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
2044 ; CHECK-NEXT: vslideup.vi v8, v12, 1
2046 %res = call <vscale x 8 x float> @llvm.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 -1)
2047 ret <vscale x 8 x float> %res
2050 define <vscale x 8 x float> @splice_nxv8f32_offset_min(<vscale x 8 x float> %a, <vscale x 8 x float> %b) #0 {
2051 ; CHECK-LABEL: splice_nxv8f32_offset_min:
2053 ; CHECK-NEXT: csrr a0, vlenb
2054 ; CHECK-NEXT: addi a0, a0, -16
2055 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
2056 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
2057 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
2058 ; CHECK-NEXT: vslideup.vi v8, v12, 16
2060 %res = call <vscale x 8 x float> @llvm.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 -16)
2061 ret <vscale x 8 x float> %res
2064 define <vscale x 8 x float> @splice_nxv8f32_offset_max(<vscale x 8 x float> %a, <vscale x 8 x float> %b) #0 {
2065 ; CHECK-LABEL: splice_nxv8f32_offset_max:
2067 ; CHECK-NEXT: csrr a0, vlenb
2068 ; CHECK-NEXT: addi a0, a0, -15
2069 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2070 ; CHECK-NEXT: vslidedown.vi v8, v8, 15
2071 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
2072 ; CHECK-NEXT: vslideup.vx v8, v12, a0
2074 %res = call <vscale x 8 x float> @llvm.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 15)
2075 ret <vscale x 8 x float> %res
2078 declare <vscale x 16 x float> @llvm.vector.splice.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, i32)
2080 define <vscale x 16 x float> @splice_nxv16f32_offset_zero(<vscale x 16 x float> %a, <vscale x 16 x float> %b) #0 {
2081 ; CHECK-LABEL: splice_nxv16f32_offset_zero:
2084 %res = call <vscale x 16 x float> @llvm.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 0)
2085 ret <vscale x 16 x float> %res
2088 define <vscale x 16 x float> @splice_nxv16f32_offset_negone(<vscale x 16 x float> %a, <vscale x 16 x float> %b) #0 {
2089 ; CHECK-LABEL: splice_nxv16f32_offset_negone:
2091 ; CHECK-NEXT: csrr a0, vlenb
2092 ; CHECK-NEXT: slli a0, a0, 1
2093 ; CHECK-NEXT: addi a0, a0, -1
2094 ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
2095 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
2096 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
2097 ; CHECK-NEXT: vslideup.vi v8, v16, 1
2099 %res = call <vscale x 16 x float> @llvm.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 -1)
2100 ret <vscale x 16 x float> %res
2103 define <vscale x 16 x float> @splice_nxv16f32_offset_min(<vscale x 16 x float> %a, <vscale x 16 x float> %b) #0 {
2104 ; CHECK-LABEL: splice_nxv16f32_offset_min:
2106 ; CHECK-NEXT: csrr a0, vlenb
2107 ; CHECK-NEXT: slli a0, a0, 1
2108 ; CHECK-NEXT: addi a0, a0, -32
2109 ; CHECK-NEXT: li a1, 32
2110 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
2111 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
2112 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
2113 ; CHECK-NEXT: vslideup.vx v8, v16, a1
2115 %res = call <vscale x 16 x float> @llvm.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 -32)
2116 ret <vscale x 16 x float> %res
2119 define <vscale x 16 x float> @splice_nxv16f32_offset_max(<vscale x 16 x float> %a, <vscale x 16 x float> %b) #0 {
2120 ; CHECK-LABEL: splice_nxv16f32_offset_max:
2122 ; CHECK-NEXT: csrr a0, vlenb
2123 ; CHECK-NEXT: slli a0, a0, 1
2124 ; CHECK-NEXT: addi a0, a0, -31
2125 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2126 ; CHECK-NEXT: vslidedown.vi v8, v8, 31
2127 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
2128 ; CHECK-NEXT: vslideup.vx v8, v16, a0
2130 %res = call <vscale x 16 x float> @llvm.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 31)
2131 ret <vscale x 16 x float> %res
2134 declare <vscale x 1 x double> @llvm.vector.splice.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, i32)
2136 define <vscale x 1 x double> @splice_nxv1f64_offset_zero(<vscale x 1 x double> %a, <vscale x 1 x double> %b) #0 {
2137 ; CHECK-LABEL: splice_nxv1f64_offset_zero:
2140 %res = call <vscale x 1 x double> @llvm.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 0)
2141 ret <vscale x 1 x double> %res
2144 define <vscale x 1 x double> @splice_nxv1f64_offset_negone(<vscale x 1 x double> %a, <vscale x 1 x double> %b) #0 {
2145 ; CHECK-LABEL: splice_nxv1f64_offset_negone:
2147 ; CHECK-NEXT: csrr a0, vlenb
2148 ; CHECK-NEXT: srli a0, a0, 3
2149 ; CHECK-NEXT: addi a0, a0, -1
2150 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
2151 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
2152 ; CHECK-NEXT: vslideup.vi v8, v9, 1
2154 %res = call <vscale x 1 x double> @llvm.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 -1)
2155 ret <vscale x 1 x double> %res
2158 define <vscale x 1 x double> @splice_nxv1f64_offset_min(<vscale x 1 x double> %a, <vscale x 1 x double> %b) #0 {
2159 ; CHECK-LABEL: splice_nxv1f64_offset_min:
2161 ; CHECK-NEXT: csrr a0, vlenb
2162 ; CHECK-NEXT: srli a0, a0, 3
2163 ; CHECK-NEXT: addi a0, a0, -2
2164 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
2165 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
2166 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
2167 ; CHECK-NEXT: vslideup.vi v8, v9, 2
2169 %res = call <vscale x 1 x double> @llvm.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 -2)
2170 ret <vscale x 1 x double> %res
2173 define <vscale x 1 x double> @splice_nxv1f64_offset_max(<vscale x 1 x double> %a, <vscale x 1 x double> %b) #0 {
2174 ; CHECK-LABEL: splice_nxv1f64_offset_max:
2176 ; CHECK-NEXT: csrr a0, vlenb
2177 ; CHECK-NEXT: srli a0, a0, 3
2178 ; CHECK-NEXT: addi a0, a0, -1
2179 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2180 ; CHECK-NEXT: vslidedown.vi v8, v8, 1
2181 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
2182 ; CHECK-NEXT: vslideup.vx v8, v9, a0
2184 %res = call <vscale x 1 x double> @llvm.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 1)
2185 ret <vscale x 1 x double> %res
2188 declare <vscale x 2 x double> @llvm.vector.splice.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, i32)
2190 define <vscale x 2 x double> @splice_nxv2f64_offset_zero(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
2191 ; CHECK-LABEL: splice_nxv2f64_offset_zero:
2194 %res = call <vscale x 2 x double> @llvm.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 0)
2195 ret <vscale x 2 x double> %res
2198 define <vscale x 2 x double> @splice_nxv2f64_offset_negone(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
2199 ; CHECK-LABEL: splice_nxv2f64_offset_negone:
2201 ; CHECK-NEXT: csrr a0, vlenb
2202 ; CHECK-NEXT: srli a0, a0, 2
2203 ; CHECK-NEXT: addi a0, a0, -1
2204 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
2205 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
2206 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
2207 ; CHECK-NEXT: vslideup.vi v8, v10, 1
2209 %res = call <vscale x 2 x double> @llvm.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -1)
2210 ret <vscale x 2 x double> %res
2213 define <vscale x 2 x double> @splice_nxv2f64_offset_min(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
2214 ; CHECK-LABEL: splice_nxv2f64_offset_min:
2216 ; CHECK-NEXT: csrr a0, vlenb
2217 ; CHECK-NEXT: srli a0, a0, 2
2218 ; CHECK-NEXT: addi a0, a0, -4
2219 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
2220 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
2221 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
2222 ; CHECK-NEXT: vslideup.vi v8, v10, 4
2224 %res = call <vscale x 2 x double> @llvm.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -4)
2225 ret <vscale x 2 x double> %res
2228 define <vscale x 2 x double> @splice_nxv2f64_offset_max(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
2229 ; CHECK-LABEL: splice_nxv2f64_offset_max:
2231 ; CHECK-NEXT: csrr a0, vlenb
2232 ; CHECK-NEXT: srli a0, a0, 2
2233 ; CHECK-NEXT: addi a0, a0, -3
2234 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2235 ; CHECK-NEXT: vslidedown.vi v8, v8, 3
2236 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
2237 ; CHECK-NEXT: vslideup.vx v8, v10, a0
2239 %res = call <vscale x 2 x double> @llvm.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 3)
2240 ret <vscale x 2 x double> %res
2243 declare <vscale x 4 x double> @llvm.vector.splice.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, i32)
2245 define <vscale x 4 x double> @splice_nxv4f64_offset_zero(<vscale x 4 x double> %a, <vscale x 4 x double> %b) #0 {
2246 ; CHECK-LABEL: splice_nxv4f64_offset_zero:
2249 %res = call <vscale x 4 x double> @llvm.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 0)
2250 ret <vscale x 4 x double> %res
2253 define <vscale x 4 x double> @splice_nxv4f64_offset_negone(<vscale x 4 x double> %a, <vscale x 4 x double> %b) #0 {
2254 ; CHECK-LABEL: splice_nxv4f64_offset_negone:
2256 ; CHECK-NEXT: csrr a0, vlenb
2257 ; CHECK-NEXT: srli a0, a0, 1
2258 ; CHECK-NEXT: addi a0, a0, -1
2259 ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma
2260 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
2261 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
2262 ; CHECK-NEXT: vslideup.vi v8, v12, 1
2264 %res = call <vscale x 4 x double> @llvm.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 -1)
2265 ret <vscale x 4 x double> %res
2268 define <vscale x 4 x double> @splice_nxv4f64_offset_min(<vscale x 4 x double> %a, <vscale x 4 x double> %b) #0 {
2269 ; CHECK-LABEL: splice_nxv4f64_offset_min:
2271 ; CHECK-NEXT: csrr a0, vlenb
2272 ; CHECK-NEXT: srli a0, a0, 1
2273 ; CHECK-NEXT: addi a0, a0, -8
2274 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
2275 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
2276 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
2277 ; CHECK-NEXT: vslideup.vi v8, v12, 8
2279 %res = call <vscale x 4 x double> @llvm.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 -8)
2280 ret <vscale x 4 x double> %res
2283 define <vscale x 4 x double> @splice_nxv4f64_offset_max(<vscale x 4 x double> %a, <vscale x 4 x double> %b) #0 {
2284 ; CHECK-LABEL: splice_nxv4f64_offset_max:
2286 ; CHECK-NEXT: csrr a0, vlenb
2287 ; CHECK-NEXT: srli a0, a0, 1
2288 ; CHECK-NEXT: addi a0, a0, -7
2289 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2290 ; CHECK-NEXT: vslidedown.vi v8, v8, 7
2291 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
2292 ; CHECK-NEXT: vslideup.vx v8, v12, a0
2294 %res = call <vscale x 4 x double> @llvm.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 7)
2295 ret <vscale x 4 x double> %res
2298 declare <vscale x 8 x double> @llvm.vector.splice.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, i32)
2300 define <vscale x 8 x double> @splice_nxv8f64_offset_zero(<vscale x 8 x double> %a, <vscale x 8 x double> %b) #0 {
2301 ; CHECK-LABEL: splice_nxv8f64_offset_zero:
2304 %res = call <vscale x 8 x double> @llvm.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 0)
2305 ret <vscale x 8 x double> %res
2308 define <vscale x 8 x double> @splice_nxv8f64_offset_negone(<vscale x 8 x double> %a, <vscale x 8 x double> %b) #0 {
2309 ; CHECK-LABEL: splice_nxv8f64_offset_negone:
2311 ; CHECK-NEXT: csrr a0, vlenb
2312 ; CHECK-NEXT: addi a0, a0, -1
2313 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
2314 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
2315 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2316 ; CHECK-NEXT: vslideup.vi v8, v16, 1
2318 %res = call <vscale x 8 x double> @llvm.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 -1)
2319 ret <vscale x 8 x double> %res
2322 define <vscale x 8 x double> @splice_nxv8f64_offset_min(<vscale x 8 x double> %a, <vscale x 8 x double> %b) #0 {
2323 ; CHECK-LABEL: splice_nxv8f64_offset_min:
2325 ; CHECK-NEXT: csrr a0, vlenb
2326 ; CHECK-NEXT: addi a0, a0, -16
2327 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
2328 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
2329 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2330 ; CHECK-NEXT: vslideup.vi v8, v16, 16
2332 %res = call <vscale x 8 x double> @llvm.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 -16)
2333 ret <vscale x 8 x double> %res
2336 define <vscale x 8 x double> @splice_nxv8f64_offset_max(<vscale x 8 x double> %a, <vscale x 8 x double> %b) #0 {
2337 ; CHECK-LABEL: splice_nxv8f64_offset_max:
2339 ; CHECK-NEXT: csrr a0, vlenb
2340 ; CHECK-NEXT: addi a0, a0, -15
2341 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2342 ; CHECK-NEXT: vslidedown.vi v8, v8, 15
2343 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2344 ; CHECK-NEXT: vslideup.vx v8, v16, a0
2346 %res = call <vscale x 8 x double> @llvm.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 15)
2347 ret <vscale x 8 x double> %res
2350 attributes #0 = { vscale_range(2,0) }