1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfh | FileCheck %s
3 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh | FileCheck %s
4 ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvbb,+zfh,+zvfh | FileCheck %s --check-prefix=ZVBB
5 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvbb,+zfh,+zvfh | FileCheck %s --check-prefix=ZVBB
9 define <vscale x 32 x i1> @vector_interleave_nxv32i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
10 ; CHECK-LABEL: vector_interleave_nxv32i1_nxv16i1:
12 ; CHECK-NEXT: vmv1r.v v9, v0
13 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
14 ; CHECK-NEXT: vmv.v.i v10, 0
15 ; CHECK-NEXT: vmv1r.v v0, v8
16 ; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
17 ; CHECK-NEXT: vmv1r.v v0, v9
18 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
19 ; CHECK-NEXT: vwaddu.vv v16, v8, v12
20 ; CHECK-NEXT: li a0, -1
21 ; CHECK-NEXT: vwmaccu.vx v16, a0, v12
22 ; CHECK-NEXT: vmsne.vi v8, v18, 0
23 ; CHECK-NEXT: vmsne.vi v0, v16, 0
24 ; CHECK-NEXT: csrr a0, vlenb
25 ; CHECK-NEXT: srli a0, a0, 2
26 ; CHECK-NEXT: add a1, a0, a0
27 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
28 ; CHECK-NEXT: vslideup.vx v0, v8, a0
31 ; ZVBB-LABEL: vector_interleave_nxv32i1_nxv16i1:
33 ; ZVBB-NEXT: vmv1r.v v9, v0
34 ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, mu
35 ; ZVBB-NEXT: vmv.v.i v10, 0
36 ; ZVBB-NEXT: vmv1r.v v0, v8
37 ; ZVBB-NEXT: vmerge.vim v10, v10, 1, v0
38 ; ZVBB-NEXT: vwsll.vi v12, v10, 8
40 ; ZVBB-NEXT: vmv1r.v v0, v9
41 ; ZVBB-NEXT: vwaddu.wx v12, v12, a0, v0.t
42 ; ZVBB-NEXT: vmsne.vi v8, v14, 0
43 ; ZVBB-NEXT: vmsne.vi v0, v12, 0
44 ; ZVBB-NEXT: csrr a0, vlenb
45 ; ZVBB-NEXT: srli a0, a0, 2
46 ; ZVBB-NEXT: add a1, a0, a0
47 ; ZVBB-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
48 ; ZVBB-NEXT: vslideup.vx v0, v8, a0
50 %res = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b)
51 ret <vscale x 32 x i1> %res
54 define <vscale x 32 x i8> @vector_interleave_nxv32i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
55 ; CHECK-LABEL: vector_interleave_nxv32i8_nxv16i8:
57 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
58 ; CHECK-NEXT: vwaddu.vv v12, v8, v10
59 ; CHECK-NEXT: li a0, -1
60 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10
61 ; CHECK-NEXT: vmv4r.v v8, v12
64 ; ZVBB-LABEL: vector_interleave_nxv32i8_nxv16i8:
66 ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
67 ; ZVBB-NEXT: vwsll.vi v12, v10, 8
68 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8
69 ; ZVBB-NEXT: vmv4r.v v8, v12
71 %res = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
72 ret <vscale x 32 x i8> %res
75 define <vscale x 16 x i16> @vector_interleave_nxv16i16_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
76 ; CHECK-LABEL: vector_interleave_nxv16i16_nxv8i16:
78 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
79 ; CHECK-NEXT: vwaddu.vv v12, v8, v10
80 ; CHECK-NEXT: li a0, -1
81 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10
82 ; CHECK-NEXT: vmv4r.v v8, v12
85 ; ZVBB-LABEL: vector_interleave_nxv16i16_nxv8i16:
87 ; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma
88 ; ZVBB-NEXT: vwsll.vi v12, v10, 16
89 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8
90 ; ZVBB-NEXT: vmv4r.v v8, v12
92 %res = call <vscale x 16 x i16> @llvm.vector.interleave2.nxv16i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
93 ret <vscale x 16 x i16> %res
96 define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
97 ; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32:
99 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
100 ; CHECK-NEXT: vwaddu.vv v12, v8, v10
101 ; CHECK-NEXT: li a0, -1
102 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10
103 ; CHECK-NEXT: vmv4r.v v8, v12
106 ; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32:
108 ; ZVBB-NEXT: li a0, 32
109 ; ZVBB-NEXT: vsetvli a1, zero, e32, m2, ta, ma
110 ; ZVBB-NEXT: vwsll.vx v12, v10, a0
111 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8
112 ; ZVBB-NEXT: vmv4r.v v8, v12
114 %res = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
115 ret <vscale x 8 x i32> %res
118 define <vscale x 4 x i64> @vector_interleave_nxv4i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
119 ; CHECK-LABEL: vector_interleave_nxv4i64_nxv2i64:
121 ; CHECK-NEXT: csrr a0, vlenb
122 ; CHECK-NEXT: srli a0, a0, 2
123 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
124 ; CHECK-NEXT: vid.v v12
125 ; CHECK-NEXT: vand.vi v13, v12, 1
126 ; CHECK-NEXT: vmsne.vi v0, v13, 0
127 ; CHECK-NEXT: vsrl.vi v16, v12, 1
128 ; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t
129 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
130 ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
131 ; CHECK-NEXT: vmv.v.v v8, v12
134 ; ZVBB-LABEL: vector_interleave_nxv4i64_nxv2i64:
136 ; ZVBB-NEXT: csrr a0, vlenb
137 ; ZVBB-NEXT: srli a0, a0, 2
138 ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu
139 ; ZVBB-NEXT: vid.v v12
140 ; ZVBB-NEXT: vand.vi v13, v12, 1
141 ; ZVBB-NEXT: vmsne.vi v0, v13, 0
142 ; ZVBB-NEXT: vsrl.vi v16, v12, 1
143 ; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t
144 ; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma
145 ; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
146 ; ZVBB-NEXT: vmv.v.v v8, v12
148 %res = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
149 ret <vscale x 4 x i64> %res
152 declare <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1>, <vscale x 16 x i1>)
153 declare <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
154 declare <vscale x 16 x i16> @llvm.vector.interleave2.nxv16i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
155 declare <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
156 declare <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
158 define <vscale x 128 x i1> @vector_interleave_nxv128i1_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) {
159 ; CHECK-LABEL: vector_interleave_nxv128i1_nxv64i1:
161 ; CHECK-NEXT: vmv1r.v v9, v0
162 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
163 ; CHECK-NEXT: vmv.v.i v24, 0
164 ; CHECK-NEXT: vmv1r.v v0, v8
165 ; CHECK-NEXT: vmerge.vim v16, v24, 1, v0
166 ; CHECK-NEXT: vmv1r.v v0, v9
167 ; CHECK-NEXT: vmerge.vim v8, v24, 1, v0
168 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
169 ; CHECK-NEXT: vwaddu.vv v24, v8, v16
170 ; CHECK-NEXT: li a0, -1
171 ; CHECK-NEXT: vwmaccu.vx v24, a0, v16
172 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
173 ; CHECK-NEXT: vmsne.vi v0, v24, 0
174 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
175 ; CHECK-NEXT: vwaddu.vv v24, v12, v20
176 ; CHECK-NEXT: vwmaccu.vx v24, a0, v20
177 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
178 ; CHECK-NEXT: vmsne.vi v8, v24, 0
181 ; ZVBB-LABEL: vector_interleave_nxv128i1_nxv64i1:
183 ; ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma
184 ; ZVBB-NEXT: vmv.v.i v24, 0
185 ; ZVBB-NEXT: vmerge.vim v16, v24, 1, v0
186 ; ZVBB-NEXT: vmv1r.v v0, v8
187 ; ZVBB-NEXT: vmerge.vim v8, v24, 1, v0
188 ; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma
189 ; ZVBB-NEXT: vwsll.vi v24, v8, 8
190 ; ZVBB-NEXT: vwaddu.wv v24, v24, v16
191 ; ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma
192 ; ZVBB-NEXT: vmsne.vi v0, v24, 0
193 ; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma
194 ; ZVBB-NEXT: vwsll.vi v24, v12, 8
195 ; ZVBB-NEXT: vwaddu.wv v24, v24, v20
196 ; ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma
197 ; ZVBB-NEXT: vmsne.vi v8, v24, 0
199 %res = call <vscale x 128 x i1> @llvm.vector.interleave2.nxv128i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b)
200 ret <vscale x 128 x i1> %res
203 define <vscale x 128 x i8> @vector_interleave_nxv128i8_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
204 ; CHECK-LABEL: vector_interleave_nxv128i8_nxv64i8:
206 ; CHECK-NEXT: vmv8r.v v24, v8
207 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
208 ; CHECK-NEXT: vwaddu.vv v8, v24, v16
209 ; CHECK-NEXT: li a0, -1
210 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16
211 ; CHECK-NEXT: vwaddu.vv v0, v28, v20
212 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20
213 ; CHECK-NEXT: vmv8r.v v16, v0
216 ; ZVBB-LABEL: vector_interleave_nxv128i8_nxv64i8:
218 ; ZVBB-NEXT: vmv8r.v v24, v8
219 ; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma
220 ; ZVBB-NEXT: vwsll.vi v8, v16, 8
221 ; ZVBB-NEXT: vwaddu.wv v8, v8, v24
222 ; ZVBB-NEXT: vwsll.vi v0, v20, 8
223 ; ZVBB-NEXT: vwaddu.wv v0, v0, v28
224 ; ZVBB-NEXT: vmv8r.v v16, v0
226 %res = call <vscale x 128 x i8> @llvm.vector.interleave2.nxv128i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b)
227 ret <vscale x 128 x i8> %res
230 define <vscale x 64 x i16> @vector_interleave_nxv64i16_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
231 ; CHECK-LABEL: vector_interleave_nxv64i16_nxv32i16:
233 ; CHECK-NEXT: vmv8r.v v24, v8
234 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
235 ; CHECK-NEXT: vwaddu.vv v8, v24, v16
236 ; CHECK-NEXT: li a0, -1
237 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16
238 ; CHECK-NEXT: vwaddu.vv v0, v28, v20
239 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20
240 ; CHECK-NEXT: vmv8r.v v16, v0
243 ; ZVBB-LABEL: vector_interleave_nxv64i16_nxv32i16:
245 ; ZVBB-NEXT: vmv8r.v v24, v8
246 ; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
247 ; ZVBB-NEXT: vwsll.vi v8, v16, 16
248 ; ZVBB-NEXT: vwaddu.wv v8, v8, v24
249 ; ZVBB-NEXT: vwsll.vi v0, v20, 16
250 ; ZVBB-NEXT: vwaddu.wv v0, v0, v28
251 ; ZVBB-NEXT: vmv8r.v v16, v0
253 %res = call <vscale x 64 x i16> @llvm.vector.interleave2.nxv64i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b)
254 ret <vscale x 64 x i16> %res
257 define <vscale x 32 x i32> @vector_interleave_nxv32i32_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) {
258 ; CHECK-LABEL: vector_interleave_nxv32i32_nxv16i32:
260 ; CHECK-NEXT: vmv8r.v v24, v8
261 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
262 ; CHECK-NEXT: vwaddu.vv v8, v24, v16
263 ; CHECK-NEXT: li a0, -1
264 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16
265 ; CHECK-NEXT: vwaddu.vv v0, v28, v20
266 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20
267 ; CHECK-NEXT: vmv8r.v v16, v0
270 ; ZVBB-LABEL: vector_interleave_nxv32i32_nxv16i32:
272 ; ZVBB-NEXT: vmv8r.v v24, v8
273 ; ZVBB-NEXT: li a0, 32
274 ; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
275 ; ZVBB-NEXT: vwsll.vx v8, v16, a0
276 ; ZVBB-NEXT: vwaddu.wv v8, v8, v24
277 ; ZVBB-NEXT: vwsll.vx v0, v20, a0
278 ; ZVBB-NEXT: vwaddu.wv v0, v0, v28
279 ; ZVBB-NEXT: vmv8r.v v16, v0
281 %res = call <vscale x 32 x i32> @llvm.vector.interleave2.nxv32i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b)
282 ret <vscale x 32 x i32> %res
285 define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) {
286 ; CHECK-LABEL: vector_interleave_nxv16i64_nxv8i64:
288 ; CHECK-NEXT: addi sp, sp, -16
289 ; CHECK-NEXT: .cfi_def_cfa_offset 16
290 ; CHECK-NEXT: csrr a0, vlenb
291 ; CHECK-NEXT: slli a0, a0, 3
292 ; CHECK-NEXT: sub sp, sp, a0
293 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
294 ; CHECK-NEXT: vmv8r.v v0, v8
295 ; CHECK-NEXT: csrr a0, vlenb
296 ; CHECK-NEXT: srli a0, a0, 1
297 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
298 ; CHECK-NEXT: vid.v v24
299 ; CHECK-NEXT: vand.vi v26, v24, 1
300 ; CHECK-NEXT: vmsne.vi v10, v26, 0
301 ; CHECK-NEXT: vsrl.vi v8, v24, 1
302 ; CHECK-NEXT: vmv8r.v v24, v0
303 ; CHECK-NEXT: vmv4r.v v12, v4
304 ; CHECK-NEXT: vmv1r.v v0, v10
305 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
306 ; CHECK-NEXT: vmv4r.v v28, v16
307 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
308 ; CHECK-NEXT: vrgatherei16.vv v0, v24, v8
309 ; CHECK-NEXT: addi a0, sp, 16
310 ; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
311 ; CHECK-NEXT: vmv4r.v v16, v12
312 ; CHECK-NEXT: vrgatherei16.vv v24, v16, v8
313 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
314 ; CHECK-NEXT: vmv.v.v v16, v24
315 ; CHECK-NEXT: csrr a0, vlenb
316 ; CHECK-NEXT: slli a0, a0, 3
317 ; CHECK-NEXT: add sp, sp, a0
318 ; CHECK-NEXT: addi sp, sp, 16
321 ; ZVBB-LABEL: vector_interleave_nxv16i64_nxv8i64:
323 ; ZVBB-NEXT: addi sp, sp, -16
324 ; ZVBB-NEXT: .cfi_def_cfa_offset 16
325 ; ZVBB-NEXT: csrr a0, vlenb
326 ; ZVBB-NEXT: slli a0, a0, 3
327 ; ZVBB-NEXT: sub sp, sp, a0
328 ; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
329 ; ZVBB-NEXT: vmv8r.v v0, v8
330 ; ZVBB-NEXT: csrr a0, vlenb
331 ; ZVBB-NEXT: srli a0, a0, 1
332 ; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu
333 ; ZVBB-NEXT: vid.v v24
334 ; ZVBB-NEXT: vand.vi v26, v24, 1
335 ; ZVBB-NEXT: vmsne.vi v10, v26, 0
336 ; ZVBB-NEXT: vsrl.vi v8, v24, 1
337 ; ZVBB-NEXT: vmv8r.v v24, v0
338 ; ZVBB-NEXT: vmv4r.v v12, v4
339 ; ZVBB-NEXT: vmv1r.v v0, v10
340 ; ZVBB-NEXT: vadd.vx v8, v8, a0, v0.t
341 ; ZVBB-NEXT: vmv4r.v v28, v16
342 ; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma
343 ; ZVBB-NEXT: vrgatherei16.vv v0, v24, v8
344 ; ZVBB-NEXT: addi a0, sp, 16
345 ; ZVBB-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
346 ; ZVBB-NEXT: vmv4r.v v16, v12
347 ; ZVBB-NEXT: vrgatherei16.vv v24, v16, v8
348 ; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
349 ; ZVBB-NEXT: vmv.v.v v16, v24
350 ; ZVBB-NEXT: csrr a0, vlenb
351 ; ZVBB-NEXT: slli a0, a0, 3
352 ; ZVBB-NEXT: add sp, sp, a0
353 ; ZVBB-NEXT: addi sp, sp, 16
355 %res = call <vscale x 16 x i64> @llvm.vector.interleave2.nxv16i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b)
356 ret <vscale x 16 x i64> %res
359 declare <vscale x 128 x i1> @llvm.vector.interleave2.nxv128i1(<vscale x 64 x i1>, <vscale x 64 x i1>)
360 declare <vscale x 128 x i8> @llvm.vector.interleave2.nxv128i8(<vscale x 64 x i8>, <vscale x 64 x i8>)
361 declare <vscale x 64 x i16> @llvm.vector.interleave2.nxv64i16(<vscale x 32 x i16>, <vscale x 32 x i16>)
362 declare <vscale x 32 x i32> @llvm.vector.interleave2.nxv32i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
363 declare <vscale x 16 x i64> @llvm.vector.interleave2.nxv16i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
367 define <vscale x 4 x half> @vector_interleave_nxv4f16_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
368 ; CHECK-LABEL: vector_interleave_nxv4f16_nxv2f16:
370 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
371 ; CHECK-NEXT: vwaddu.vv v10, v8, v9
372 ; CHECK-NEXT: li a0, -1
373 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9
374 ; CHECK-NEXT: csrr a0, vlenb
375 ; CHECK-NEXT: srli a0, a0, 2
376 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
377 ; CHECK-NEXT: vslidedown.vx v8, v10, a0
378 ; CHECK-NEXT: add a1, a0, a0
379 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
380 ; CHECK-NEXT: vslideup.vx v10, v8, a0
381 ; CHECK-NEXT: vmv.v.v v8, v10
384 ; ZVBB-LABEL: vector_interleave_nxv4f16_nxv2f16:
386 ; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
387 ; ZVBB-NEXT: vwsll.vi v10, v9, 16
388 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8
389 ; ZVBB-NEXT: csrr a0, vlenb
390 ; ZVBB-NEXT: srli a0, a0, 2
391 ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
392 ; ZVBB-NEXT: vslidedown.vx v8, v10, a0
393 ; ZVBB-NEXT: add a1, a0, a0
394 ; ZVBB-NEXT: vsetvli zero, a1, e16, m1, ta, ma
395 ; ZVBB-NEXT: vslideup.vx v10, v8, a0
396 ; ZVBB-NEXT: vmv.v.v v8, v10
398 %res = call <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
399 ret <vscale x 4 x half> %res
402 define <vscale x 8 x half> @vector_interleave_nxv8f16_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
403 ; CHECK-LABEL: vector_interleave_nxv8f16_nxv4f16:
405 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
406 ; CHECK-NEXT: vwaddu.vv v10, v8, v9
407 ; CHECK-NEXT: li a0, -1
408 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9
409 ; CHECK-NEXT: vmv2r.v v8, v10
412 ; ZVBB-LABEL: vector_interleave_nxv8f16_nxv4f16:
414 ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
415 ; ZVBB-NEXT: vwsll.vi v10, v9, 16
416 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8
417 ; ZVBB-NEXT: vmv2r.v v8, v10
419 %res = call <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
420 ret <vscale x 8 x half> %res
423 define <vscale x 4 x float> @vector_interleave_nxv4f32_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
424 ; CHECK-LABEL: vector_interleave_nxv4f32_nxv2f32:
426 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
427 ; CHECK-NEXT: vwaddu.vv v10, v8, v9
428 ; CHECK-NEXT: li a0, -1
429 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9
430 ; CHECK-NEXT: vmv2r.v v8, v10
433 ; ZVBB-LABEL: vector_interleave_nxv4f32_nxv2f32:
435 ; ZVBB-NEXT: li a0, 32
436 ; ZVBB-NEXT: vsetvli a1, zero, e32, m1, ta, ma
437 ; ZVBB-NEXT: vwsll.vx v10, v9, a0
438 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8
439 ; ZVBB-NEXT: vmv2r.v v8, v10
441 %res = call <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
442 ret <vscale x 4 x float> %res
445 define <vscale x 16 x half> @vector_interleave_nxv16f16_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
446 ; CHECK-LABEL: vector_interleave_nxv16f16_nxv8f16:
448 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
449 ; CHECK-NEXT: vwaddu.vv v12, v8, v10
450 ; CHECK-NEXT: li a0, -1
451 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10
452 ; CHECK-NEXT: vmv4r.v v8, v12
455 ; ZVBB-LABEL: vector_interleave_nxv16f16_nxv8f16:
457 ; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma
458 ; ZVBB-NEXT: vwsll.vi v12, v10, 16
459 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8
460 ; ZVBB-NEXT: vmv4r.v v8, v12
462 %res = call <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
463 ret <vscale x 16 x half> %res
466 define <vscale x 8 x float> @vector_interleave_nxv8f32_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
467 ; CHECK-LABEL: vector_interleave_nxv8f32_nxv4f32:
469 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
470 ; CHECK-NEXT: vwaddu.vv v12, v8, v10
471 ; CHECK-NEXT: li a0, -1
472 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10
473 ; CHECK-NEXT: vmv4r.v v8, v12
476 ; ZVBB-LABEL: vector_interleave_nxv8f32_nxv4f32:
478 ; ZVBB-NEXT: li a0, 32
479 ; ZVBB-NEXT: vsetvli a1, zero, e32, m2, ta, ma
480 ; ZVBB-NEXT: vwsll.vx v12, v10, a0
481 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8
482 ; ZVBB-NEXT: vmv4r.v v8, v12
484 %res = call <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
485 ret <vscale x 8 x float> %res
488 define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
489 ; CHECK-LABEL: vector_interleave_nxv4f64_nxv2f64:
491 ; CHECK-NEXT: csrr a0, vlenb
492 ; CHECK-NEXT: srli a0, a0, 2
493 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
494 ; CHECK-NEXT: vid.v v12
495 ; CHECK-NEXT: vand.vi v13, v12, 1
496 ; CHECK-NEXT: vmsne.vi v0, v13, 0
497 ; CHECK-NEXT: vsrl.vi v16, v12, 1
498 ; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t
499 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
500 ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
501 ; CHECK-NEXT: vmv.v.v v8, v12
504 ; ZVBB-LABEL: vector_interleave_nxv4f64_nxv2f64:
506 ; ZVBB-NEXT: csrr a0, vlenb
507 ; ZVBB-NEXT: srli a0, a0, 2
508 ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu
509 ; ZVBB-NEXT: vid.v v12
510 ; ZVBB-NEXT: vand.vi v13, v12, 1
511 ; ZVBB-NEXT: vmsne.vi v0, v13, 0
512 ; ZVBB-NEXT: vsrl.vi v16, v12, 1
513 ; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t
514 ; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma
515 ; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
516 ; ZVBB-NEXT: vmv.v.v v8, v12
518 %res = call <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
519 ret <vscale x 4 x double> %res
523 declare <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half>, <vscale x 2 x half>)
524 declare <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half>, <vscale x 4 x half>)
525 declare <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float>, <vscale x 2 x float>)
526 declare <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half>, <vscale x 8 x half>)
527 declare <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float>, <vscale x 4 x float>)
528 declare <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double>, <vscale x 2 x double>)
530 define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b) {
531 ; CHECK-LABEL: vector_interleave_nxv64f16_nxv32f16:
533 ; CHECK-NEXT: vmv8r.v v24, v8
534 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
535 ; CHECK-NEXT: vwaddu.vv v8, v24, v16
536 ; CHECK-NEXT: li a0, -1
537 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16
538 ; CHECK-NEXT: vwaddu.vv v0, v28, v20
539 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20
540 ; CHECK-NEXT: vmv8r.v v16, v0
543 ; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16:
545 ; ZVBB-NEXT: vmv8r.v v24, v8
546 ; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
547 ; ZVBB-NEXT: vwsll.vi v8, v16, 16
548 ; ZVBB-NEXT: vwaddu.wv v8, v8, v24
549 ; ZVBB-NEXT: vwsll.vi v0, v20, 16
550 ; ZVBB-NEXT: vwaddu.wv v0, v0, v28
551 ; ZVBB-NEXT: vmv8r.v v16, v0
553 %res = call <vscale x 64 x half> @llvm.vector.interleave2.nxv64f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b)
554 ret <vscale x 64 x half> %res
557 define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b) {
558 ; CHECK-LABEL: vector_interleave_nxv32f32_nxv16f32:
560 ; CHECK-NEXT: vmv8r.v v24, v8
561 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
562 ; CHECK-NEXT: vwaddu.vv v8, v24, v16
563 ; CHECK-NEXT: li a0, -1
564 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16
565 ; CHECK-NEXT: vwaddu.vv v0, v28, v20
566 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20
567 ; CHECK-NEXT: vmv8r.v v16, v0
570 ; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32:
572 ; ZVBB-NEXT: vmv8r.v v24, v8
573 ; ZVBB-NEXT: li a0, 32
574 ; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
575 ; ZVBB-NEXT: vwsll.vx v8, v16, a0
576 ; ZVBB-NEXT: vwaddu.wv v8, v8, v24
577 ; ZVBB-NEXT: vwsll.vx v0, v20, a0
578 ; ZVBB-NEXT: vwaddu.wv v0, v0, v28
579 ; ZVBB-NEXT: vmv8r.v v16, v0
581 %res = call <vscale x 32 x float> @llvm.vector.interleave2.nxv32f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b)
582 ret <vscale x 32 x float> %res
585 define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b) {
586 ; CHECK-LABEL: vector_interleave_nxv16f64_nxv8f64:
588 ; CHECK-NEXT: addi sp, sp, -16
589 ; CHECK-NEXT: .cfi_def_cfa_offset 16
590 ; CHECK-NEXT: csrr a0, vlenb
591 ; CHECK-NEXT: slli a0, a0, 3
592 ; CHECK-NEXT: sub sp, sp, a0
593 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
594 ; CHECK-NEXT: vmv8r.v v0, v8
595 ; CHECK-NEXT: csrr a0, vlenb
596 ; CHECK-NEXT: srli a0, a0, 1
597 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
598 ; CHECK-NEXT: vid.v v24
599 ; CHECK-NEXT: vand.vi v26, v24, 1
600 ; CHECK-NEXT: vmsne.vi v10, v26, 0
601 ; CHECK-NEXT: vsrl.vi v8, v24, 1
602 ; CHECK-NEXT: vmv8r.v v24, v0
603 ; CHECK-NEXT: vmv4r.v v12, v4
604 ; CHECK-NEXT: vmv1r.v v0, v10
605 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
606 ; CHECK-NEXT: vmv4r.v v28, v16
607 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
608 ; CHECK-NEXT: vrgatherei16.vv v0, v24, v8
609 ; CHECK-NEXT: addi a0, sp, 16
610 ; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
611 ; CHECK-NEXT: vmv4r.v v16, v12
612 ; CHECK-NEXT: vrgatherei16.vv v24, v16, v8
613 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
614 ; CHECK-NEXT: vmv.v.v v16, v24
615 ; CHECK-NEXT: csrr a0, vlenb
616 ; CHECK-NEXT: slli a0, a0, 3
617 ; CHECK-NEXT: add sp, sp, a0
618 ; CHECK-NEXT: addi sp, sp, 16
621 ; ZVBB-LABEL: vector_interleave_nxv16f64_nxv8f64:
623 ; ZVBB-NEXT: addi sp, sp, -16
624 ; ZVBB-NEXT: .cfi_def_cfa_offset 16
625 ; ZVBB-NEXT: csrr a0, vlenb
626 ; ZVBB-NEXT: slli a0, a0, 3
627 ; ZVBB-NEXT: sub sp, sp, a0
628 ; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
629 ; ZVBB-NEXT: vmv8r.v v0, v8
630 ; ZVBB-NEXT: csrr a0, vlenb
631 ; ZVBB-NEXT: srli a0, a0, 1
632 ; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu
633 ; ZVBB-NEXT: vid.v v24
634 ; ZVBB-NEXT: vand.vi v26, v24, 1
635 ; ZVBB-NEXT: vmsne.vi v10, v26, 0
636 ; ZVBB-NEXT: vsrl.vi v8, v24, 1
637 ; ZVBB-NEXT: vmv8r.v v24, v0
638 ; ZVBB-NEXT: vmv4r.v v12, v4
639 ; ZVBB-NEXT: vmv1r.v v0, v10
640 ; ZVBB-NEXT: vadd.vx v8, v8, a0, v0.t
641 ; ZVBB-NEXT: vmv4r.v v28, v16
642 ; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma
643 ; ZVBB-NEXT: vrgatherei16.vv v0, v24, v8
644 ; ZVBB-NEXT: addi a0, sp, 16
645 ; ZVBB-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
646 ; ZVBB-NEXT: vmv4r.v v16, v12
647 ; ZVBB-NEXT: vrgatherei16.vv v24, v16, v8
648 ; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
649 ; ZVBB-NEXT: vmv.v.v v16, v24
650 ; ZVBB-NEXT: csrr a0, vlenb
651 ; ZVBB-NEXT: slli a0, a0, 3
652 ; ZVBB-NEXT: add sp, sp, a0
653 ; ZVBB-NEXT: addi sp, sp, 16
655 %res = call <vscale x 16 x double> @llvm.vector.interleave2.nxv16f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b)
656 ret <vscale x 16 x double> %res
659 define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32_poison(<vscale x 4 x i32> %a) {
660 ; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
662 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
663 ; CHECK-NEXT: vzext.vf2 v12, v8
664 ; CHECK-NEXT: vmv.v.v v8, v12
667 ; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
669 ; ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma
670 ; ZVBB-NEXT: vzext.vf2 v12, v8
671 ; ZVBB-NEXT: vmv.v.v v8, v12
673 %res = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> poison)
674 ret <vscale x 8 x i32> %res
677 define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32_poison2(<vscale x 4 x i32> %a) {
678 ; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2:
680 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
681 ; CHECK-NEXT: vzext.vf2 v12, v8
682 ; CHECK-NEXT: li a0, 32
683 ; CHECK-NEXT: vsll.vx v8, v12, a0
686 ; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2:
688 ; ZVBB-NEXT: li a0, 32
689 ; ZVBB-NEXT: vsetvli a1, zero, e32, m2, ta, ma
690 ; ZVBB-NEXT: vwsll.vx v12, v8, a0
691 ; ZVBB-NEXT: vmv4r.v v8, v12
693 %res = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a)
694 ret <vscale x 8 x i32> %res
697 declare <vscale x 64 x half> @llvm.vector.interleave2.nxv64f16(<vscale x 32 x half>, <vscale x 32 x half>)
698 declare <vscale x 32 x float> @llvm.vector.interleave2.nxv32f32(<vscale x 16 x float>, <vscale x 16 x float>)
699 declare <vscale x 16 x double> @llvm.vector.interleave2.nxv16f64(<vscale x 8 x double>, <vscale x 8 x double>)