1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfh | FileCheck %s
3 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh | FileCheck %s
4 ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+experimental-zvbb,+zfh,+zvfh | FileCheck %s --check-prefix=ZVBB
5 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+experimental-zvbb,+zfh,+zvfh | FileCheck %s --check-prefix=ZVBB
9 define <vscale x 32 x i1> @vector_interleave_nxv32i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
10 ; CHECK-LABEL: vector_interleave_nxv32i1_nxv16i1:
12 ; CHECK-NEXT: vmv1r.v v9, v0
13 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
14 ; CHECK-NEXT: vmv.v.i v10, 0
15 ; CHECK-NEXT: vmv1r.v v0, v8
16 ; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
17 ; CHECK-NEXT: vmv1r.v v0, v9
18 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
19 ; CHECK-NEXT: vwaddu.vv v16, v8, v12
20 ; CHECK-NEXT: li a0, -1
21 ; CHECK-NEXT: vwmaccu.vx v16, a0, v12
22 ; CHECK-NEXT: vmsne.vi v8, v18, 0
23 ; CHECK-NEXT: vmsne.vi v0, v16, 0
24 ; CHECK-NEXT: csrr a0, vlenb
25 ; CHECK-NEXT: srli a0, a0, 2
26 ; CHECK-NEXT: add a1, a0, a0
27 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
28 ; CHECK-NEXT: vslideup.vx v0, v8, a0
31 ; ZVBB-LABEL: vector_interleave_nxv32i1_nxv16i1:
33 ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
34 ; ZVBB-NEXT: vmv.v.i v10, 0
35 ; ZVBB-NEXT: vmerge.vim v12, v10, 1, v0
36 ; ZVBB-NEXT: vmv1r.v v0, v8
37 ; ZVBB-NEXT: vmerge.vim v8, v10, 1, v0
38 ; ZVBB-NEXT: vwsll.vi v16, v8, 8
39 ; ZVBB-NEXT: vwaddu.wv v16, v16, v12
40 ; ZVBB-NEXT: vmsne.vi v8, v18, 0
41 ; ZVBB-NEXT: vmsne.vi v0, v16, 0
42 ; ZVBB-NEXT: csrr a0, vlenb
43 ; ZVBB-NEXT: srli a0, a0, 2
44 ; ZVBB-NEXT: add a1, a0, a0
45 ; ZVBB-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
46 ; ZVBB-NEXT: vslideup.vx v0, v8, a0
48 %res = call <vscale x 32 x i1> @llvm.experimental.vector.interleave2.nxv32i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b)
49 ret <vscale x 32 x i1> %res
52 define <vscale x 32 x i8> @vector_interleave_nxv32i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
53 ; CHECK-LABEL: vector_interleave_nxv32i8_nxv16i8:
55 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
56 ; CHECK-NEXT: vwaddu.vv v12, v8, v10
57 ; CHECK-NEXT: li a0, -1
58 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10
59 ; CHECK-NEXT: vmv4r.v v8, v12
62 ; ZVBB-LABEL: vector_interleave_nxv32i8_nxv16i8:
64 ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
65 ; ZVBB-NEXT: vwsll.vi v12, v10, 8
66 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8
67 ; ZVBB-NEXT: vmv4r.v v8, v12
69 %res = call <vscale x 32 x i8> @llvm.experimental.vector.interleave2.nxv32i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
70 ret <vscale x 32 x i8> %res
73 define <vscale x 16 x i16> @vector_interleave_nxv16i16_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
74 ; CHECK-LABEL: vector_interleave_nxv16i16_nxv8i16:
76 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
77 ; CHECK-NEXT: vwaddu.vv v12, v8, v10
78 ; CHECK-NEXT: li a0, -1
79 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10
80 ; CHECK-NEXT: vmv4r.v v8, v12
83 ; ZVBB-LABEL: vector_interleave_nxv16i16_nxv8i16:
85 ; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma
86 ; ZVBB-NEXT: vwsll.vi v12, v10, 16
87 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8
88 ; ZVBB-NEXT: vmv4r.v v8, v12
90 %res = call <vscale x 16 x i16> @llvm.experimental.vector.interleave2.nxv16i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
91 ret <vscale x 16 x i16> %res
94 define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
95 ; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32:
97 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
98 ; CHECK-NEXT: vwaddu.vv v12, v8, v10
99 ; CHECK-NEXT: li a0, -1
100 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10
101 ; CHECK-NEXT: vmv4r.v v8, v12
104 ; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32:
106 ; ZVBB-NEXT: li a0, 32
107 ; ZVBB-NEXT: vsetvli a1, zero, e32, m2, ta, ma
108 ; ZVBB-NEXT: vwsll.vx v12, v10, a0
109 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8
110 ; ZVBB-NEXT: vmv4r.v v8, v12
112 %res = call <vscale x 8 x i32> @llvm.experimental.vector.interleave2.nxv8i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
113 ret <vscale x 8 x i32> %res
116 define <vscale x 4 x i64> @vector_interleave_nxv4i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
117 ; CHECK-LABEL: vector_interleave_nxv4i64_nxv2i64:
119 ; CHECK-NEXT: csrr a0, vlenb
120 ; CHECK-NEXT: srli a0, a0, 2
121 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
122 ; CHECK-NEXT: vid.v v12
123 ; CHECK-NEXT: vand.vi v13, v12, 1
124 ; CHECK-NEXT: vmsne.vi v0, v13, 0
125 ; CHECK-NEXT: vsrl.vi v16, v12, 1
126 ; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t
127 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
128 ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
129 ; CHECK-NEXT: vmv.v.v v8, v12
132 ; ZVBB-LABEL: vector_interleave_nxv4i64_nxv2i64:
134 ; ZVBB-NEXT: csrr a0, vlenb
135 ; ZVBB-NEXT: srli a0, a0, 2
136 ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu
137 ; ZVBB-NEXT: vid.v v12
138 ; ZVBB-NEXT: vand.vi v13, v12, 1
139 ; ZVBB-NEXT: vmsne.vi v0, v13, 0
140 ; ZVBB-NEXT: vsrl.vi v16, v12, 1
141 ; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t
142 ; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma
143 ; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
144 ; ZVBB-NEXT: vmv.v.v v8, v12
146 %res = call <vscale x 4 x i64> @llvm.experimental.vector.interleave2.nxv4i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
147 ret <vscale x 4 x i64> %res
150 declare <vscale x 32 x i1> @llvm.experimental.vector.interleave2.nxv32i1(<vscale x 16 x i1>, <vscale x 16 x i1>)
151 declare <vscale x 32 x i8> @llvm.experimental.vector.interleave2.nxv32i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
152 declare <vscale x 16 x i16> @llvm.experimental.vector.interleave2.nxv16i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
153 declare <vscale x 8 x i32> @llvm.experimental.vector.interleave2.nxv8i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
154 declare <vscale x 4 x i64> @llvm.experimental.vector.interleave2.nxv4i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
156 define <vscale x 128 x i1> @vector_interleave_nxv128i1_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) {
157 ; CHECK-LABEL: vector_interleave_nxv128i1_nxv64i1:
159 ; CHECK-NEXT: vmv1r.v v9, v0
160 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
161 ; CHECK-NEXT: vmv.v.i v24, 0
162 ; CHECK-NEXT: vmv1r.v v0, v8
163 ; CHECK-NEXT: vmerge.vim v16, v24, 1, v0
164 ; CHECK-NEXT: vmv1r.v v0, v9
165 ; CHECK-NEXT: vmerge.vim v8, v24, 1, v0
166 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
167 ; CHECK-NEXT: vwaddu.vv v24, v8, v16
168 ; CHECK-NEXT: li a0, -1
169 ; CHECK-NEXT: vwmaccu.vx v24, a0, v16
170 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
171 ; CHECK-NEXT: vmsne.vi v0, v24, 0
172 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
173 ; CHECK-NEXT: vwaddu.vv v24, v12, v20
174 ; CHECK-NEXT: vwmaccu.vx v24, a0, v20
175 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
176 ; CHECK-NEXT: vmsne.vi v8, v24, 0
179 ; ZVBB-LABEL: vector_interleave_nxv128i1_nxv64i1:
181 ; ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma
182 ; ZVBB-NEXT: vmv.v.i v24, 0
183 ; ZVBB-NEXT: vmerge.vim v16, v24, 1, v0
184 ; ZVBB-NEXT: vmv1r.v v0, v8
185 ; ZVBB-NEXT: vmerge.vim v8, v24, 1, v0
186 ; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma
187 ; ZVBB-NEXT: vwsll.vi v24, v8, 8
188 ; ZVBB-NEXT: vwaddu.wv v24, v24, v16
189 ; ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma
190 ; ZVBB-NEXT: vmsne.vi v0, v24, 0
191 ; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma
192 ; ZVBB-NEXT: vwsll.vi v24, v12, 8
193 ; ZVBB-NEXT: vwaddu.wv v24, v24, v20
194 ; ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma
195 ; ZVBB-NEXT: vmsne.vi v8, v24, 0
197 %res = call <vscale x 128 x i1> @llvm.experimental.vector.interleave2.nxv128i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b)
198 ret <vscale x 128 x i1> %res
201 define <vscale x 128 x i8> @vector_interleave_nxv128i8_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
202 ; CHECK-LABEL: vector_interleave_nxv128i8_nxv64i8:
204 ; CHECK-NEXT: vmv8r.v v24, v8
205 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
206 ; CHECK-NEXT: vwaddu.vv v8, v24, v16
207 ; CHECK-NEXT: li a0, -1
208 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16
209 ; CHECK-NEXT: vwaddu.vv v0, v28, v20
210 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20
211 ; CHECK-NEXT: vmv8r.v v16, v0
214 ; ZVBB-LABEL: vector_interleave_nxv128i8_nxv64i8:
216 ; ZVBB-NEXT: vmv8r.v v24, v8
217 ; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma
218 ; ZVBB-NEXT: vwsll.vi v8, v16, 8
219 ; ZVBB-NEXT: vwaddu.wv v8, v8, v24
220 ; ZVBB-NEXT: vwsll.vi v0, v20, 8
221 ; ZVBB-NEXT: vwaddu.wv v0, v0, v28
222 ; ZVBB-NEXT: vmv8r.v v16, v0
224 %res = call <vscale x 128 x i8> @llvm.experimental.vector.interleave2.nxv128i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b)
225 ret <vscale x 128 x i8> %res
228 define <vscale x 64 x i16> @vector_interleave_nxv64i16_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
229 ; CHECK-LABEL: vector_interleave_nxv64i16_nxv32i16:
231 ; CHECK-NEXT: vmv8r.v v24, v8
232 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
233 ; CHECK-NEXT: vwaddu.vv v8, v24, v16
234 ; CHECK-NEXT: li a0, -1
235 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16
236 ; CHECK-NEXT: vwaddu.vv v0, v28, v20
237 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20
238 ; CHECK-NEXT: vmv8r.v v16, v0
241 ; ZVBB-LABEL: vector_interleave_nxv64i16_nxv32i16:
243 ; ZVBB-NEXT: vmv8r.v v24, v8
244 ; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
245 ; ZVBB-NEXT: vwsll.vi v8, v16, 16
246 ; ZVBB-NEXT: vwaddu.wv v8, v8, v24
247 ; ZVBB-NEXT: vwsll.vi v0, v20, 16
248 ; ZVBB-NEXT: vwaddu.wv v0, v0, v28
249 ; ZVBB-NEXT: vmv8r.v v16, v0
251 %res = call <vscale x 64 x i16> @llvm.experimental.vector.interleave2.nxv64i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b)
252 ret <vscale x 64 x i16> %res
255 define <vscale x 32 x i32> @vector_interleave_nxv32i32_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) {
256 ; CHECK-LABEL: vector_interleave_nxv32i32_nxv16i32:
258 ; CHECK-NEXT: vmv8r.v v24, v8
259 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
260 ; CHECK-NEXT: vwaddu.vv v8, v24, v16
261 ; CHECK-NEXT: li a0, -1
262 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16
263 ; CHECK-NEXT: vwaddu.vv v0, v28, v20
264 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20
265 ; CHECK-NEXT: vmv8r.v v16, v0
268 ; ZVBB-LABEL: vector_interleave_nxv32i32_nxv16i32:
270 ; ZVBB-NEXT: li a0, 32
271 ; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
272 ; ZVBB-NEXT: vwsll.vx v24, v16, a0
273 ; ZVBB-NEXT: vwaddu.wv v24, v24, v8
274 ; ZVBB-NEXT: vwsll.vx v0, v20, a0
275 ; ZVBB-NEXT: vwaddu.wv v0, v0, v12
276 ; ZVBB-NEXT: vmv8r.v v8, v24
277 ; ZVBB-NEXT: vmv8r.v v16, v0
279 %res = call <vscale x 32 x i32> @llvm.experimental.vector.interleave2.nxv32i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b)
280 ret <vscale x 32 x i32> %res
283 define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) {
284 ; CHECK-LABEL: vector_interleave_nxv16i64_nxv8i64:
286 ; CHECK-NEXT: addi sp, sp, -16
287 ; CHECK-NEXT: .cfi_def_cfa_offset 16
288 ; CHECK-NEXT: csrr a0, vlenb
289 ; CHECK-NEXT: slli a0, a0, 3
290 ; CHECK-NEXT: sub sp, sp, a0
291 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
292 ; CHECK-NEXT: vmv8r.v v0, v8
293 ; CHECK-NEXT: csrr a0, vlenb
294 ; CHECK-NEXT: srli a0, a0, 1
295 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
296 ; CHECK-NEXT: vid.v v24
297 ; CHECK-NEXT: vand.vi v26, v24, 1
298 ; CHECK-NEXT: vmsne.vi v10, v26, 0
299 ; CHECK-NEXT: vsrl.vi v8, v24, 1
300 ; CHECK-NEXT: vmv8r.v v24, v0
301 ; CHECK-NEXT: vmv4r.v v12, v4
302 ; CHECK-NEXT: vmv1r.v v0, v10
303 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
304 ; CHECK-NEXT: vmv4r.v v28, v16
305 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
306 ; CHECK-NEXT: vrgatherei16.vv v0, v24, v8
307 ; CHECK-NEXT: addi a0, sp, 16
308 ; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
309 ; CHECK-NEXT: vmv4r.v v16, v12
310 ; CHECK-NEXT: vrgatherei16.vv v24, v16, v8
311 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
312 ; CHECK-NEXT: vmv.v.v v16, v24
313 ; CHECK-NEXT: csrr a0, vlenb
314 ; CHECK-NEXT: slli a0, a0, 3
315 ; CHECK-NEXT: add sp, sp, a0
316 ; CHECK-NEXT: addi sp, sp, 16
319 ; ZVBB-LABEL: vector_interleave_nxv16i64_nxv8i64:
321 ; ZVBB-NEXT: addi sp, sp, -16
322 ; ZVBB-NEXT: .cfi_def_cfa_offset 16
323 ; ZVBB-NEXT: csrr a0, vlenb
324 ; ZVBB-NEXT: slli a0, a0, 3
325 ; ZVBB-NEXT: sub sp, sp, a0
326 ; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
327 ; ZVBB-NEXT: vmv8r.v v0, v8
328 ; ZVBB-NEXT: csrr a0, vlenb
329 ; ZVBB-NEXT: srli a0, a0, 1
330 ; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu
331 ; ZVBB-NEXT: vid.v v24
332 ; ZVBB-NEXT: vand.vi v26, v24, 1
333 ; ZVBB-NEXT: vmsne.vi v10, v26, 0
334 ; ZVBB-NEXT: vsrl.vi v8, v24, 1
335 ; ZVBB-NEXT: vmv8r.v v24, v0
336 ; ZVBB-NEXT: vmv4r.v v12, v4
337 ; ZVBB-NEXT: vmv1r.v v0, v10
338 ; ZVBB-NEXT: vadd.vx v8, v8, a0, v0.t
339 ; ZVBB-NEXT: vmv4r.v v28, v16
340 ; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma
341 ; ZVBB-NEXT: vrgatherei16.vv v0, v24, v8
342 ; ZVBB-NEXT: addi a0, sp, 16
343 ; ZVBB-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
344 ; ZVBB-NEXT: vmv4r.v v16, v12
345 ; ZVBB-NEXT: vrgatherei16.vv v24, v16, v8
346 ; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
347 ; ZVBB-NEXT: vmv.v.v v16, v24
348 ; ZVBB-NEXT: csrr a0, vlenb
349 ; ZVBB-NEXT: slli a0, a0, 3
350 ; ZVBB-NEXT: add sp, sp, a0
351 ; ZVBB-NEXT: addi sp, sp, 16
353 %res = call <vscale x 16 x i64> @llvm.experimental.vector.interleave2.nxv16i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b)
354 ret <vscale x 16 x i64> %res
357 declare <vscale x 128 x i1> @llvm.experimental.vector.interleave2.nxv128i1(<vscale x 64 x i1>, <vscale x 64 x i1>)
358 declare <vscale x 128 x i8> @llvm.experimental.vector.interleave2.nxv128i8(<vscale x 64 x i8>, <vscale x 64 x i8>)
359 declare <vscale x 64 x i16> @llvm.experimental.vector.interleave2.nxv64i16(<vscale x 32 x i16>, <vscale x 32 x i16>)
360 declare <vscale x 32 x i32> @llvm.experimental.vector.interleave2.nxv32i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
361 declare <vscale x 16 x i64> @llvm.experimental.vector.interleave2.nxv16i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
365 define <vscale x 4 x half> @vector_interleave_nxv4f16_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
366 ; CHECK-LABEL: vector_interleave_nxv4f16_nxv2f16:
368 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
369 ; CHECK-NEXT: vwaddu.vv v10, v8, v9
370 ; CHECK-NEXT: li a0, -1
371 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9
372 ; CHECK-NEXT: csrr a0, vlenb
373 ; CHECK-NEXT: srli a0, a0, 2
374 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
375 ; CHECK-NEXT: vslidedown.vx v8, v10, a0
376 ; CHECK-NEXT: add a1, a0, a0
377 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
378 ; CHECK-NEXT: vslideup.vx v10, v8, a0
379 ; CHECK-NEXT: vmv1r.v v8, v10
382 ; ZVBB-LABEL: vector_interleave_nxv4f16_nxv2f16:
384 ; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
385 ; ZVBB-NEXT: vwsll.vi v10, v9, 16
386 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8
387 ; ZVBB-NEXT: csrr a0, vlenb
388 ; ZVBB-NEXT: srli a0, a0, 2
389 ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
390 ; ZVBB-NEXT: vslidedown.vx v8, v10, a0
391 ; ZVBB-NEXT: add a1, a0, a0
392 ; ZVBB-NEXT: vsetvli zero, a1, e16, m1, tu, ma
393 ; ZVBB-NEXT: vslideup.vx v10, v8, a0
394 ; ZVBB-NEXT: vmv1r.v v8, v10
396 %res = call <vscale x 4 x half> @llvm.experimental.vector.interleave2.nxv4f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
397 ret <vscale x 4 x half> %res
400 define <vscale x 8 x half> @vector_interleave_nxv8f16_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
401 ; CHECK-LABEL: vector_interleave_nxv8f16_nxv4f16:
403 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
404 ; CHECK-NEXT: vwaddu.vv v10, v8, v9
405 ; CHECK-NEXT: li a0, -1
406 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9
407 ; CHECK-NEXT: vmv2r.v v8, v10
410 ; ZVBB-LABEL: vector_interleave_nxv8f16_nxv4f16:
412 ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
413 ; ZVBB-NEXT: vwsll.vi v10, v9, 16
414 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8
415 ; ZVBB-NEXT: vmv2r.v v8, v10
417 %res = call <vscale x 8 x half> @llvm.experimental.vector.interleave2.nxv8f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
418 ret <vscale x 8 x half> %res
421 define <vscale x 4 x float> @vector_interleave_nxv4f32_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
422 ; CHECK-LABEL: vector_interleave_nxv4f32_nxv2f32:
424 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
425 ; CHECK-NEXT: vwaddu.vv v10, v8, v9
426 ; CHECK-NEXT: li a0, -1
427 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9
428 ; CHECK-NEXT: vmv2r.v v8, v10
431 ; ZVBB-LABEL: vector_interleave_nxv4f32_nxv2f32:
433 ; ZVBB-NEXT: li a0, 32
434 ; ZVBB-NEXT: vsetvli a1, zero, e32, m1, ta, ma
435 ; ZVBB-NEXT: vwsll.vx v10, v9, a0
436 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8
437 ; ZVBB-NEXT: vmv2r.v v8, v10
439 %res = call <vscale x 4 x float> @llvm.experimental.vector.interleave2.nxv4f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
440 ret <vscale x 4 x float> %res
443 define <vscale x 16 x half> @vector_interleave_nxv16f16_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
444 ; CHECK-LABEL: vector_interleave_nxv16f16_nxv8f16:
446 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
447 ; CHECK-NEXT: vwaddu.vv v12, v8, v10
448 ; CHECK-NEXT: li a0, -1
449 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10
450 ; CHECK-NEXT: vmv4r.v v8, v12
453 ; ZVBB-LABEL: vector_interleave_nxv16f16_nxv8f16:
455 ; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma
456 ; ZVBB-NEXT: vwsll.vi v12, v10, 16
457 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8
458 ; ZVBB-NEXT: vmv4r.v v8, v12
460 %res = call <vscale x 16 x half> @llvm.experimental.vector.interleave2.nxv16f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
461 ret <vscale x 16 x half> %res
464 define <vscale x 8 x float> @vector_interleave_nxv8f32_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
465 ; CHECK-LABEL: vector_interleave_nxv8f32_nxv4f32:
467 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
468 ; CHECK-NEXT: vwaddu.vv v12, v8, v10
469 ; CHECK-NEXT: li a0, -1
470 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10
471 ; CHECK-NEXT: vmv4r.v v8, v12
474 ; ZVBB-LABEL: vector_interleave_nxv8f32_nxv4f32:
476 ; ZVBB-NEXT: li a0, 32
477 ; ZVBB-NEXT: vsetvli a1, zero, e32, m2, ta, ma
478 ; ZVBB-NEXT: vwsll.vx v12, v10, a0
479 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8
480 ; ZVBB-NEXT: vmv4r.v v8, v12
482 %res = call <vscale x 8 x float> @llvm.experimental.vector.interleave2.nxv8f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
483 ret <vscale x 8 x float> %res
486 define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
487 ; CHECK-LABEL: vector_interleave_nxv4f64_nxv2f64:
489 ; CHECK-NEXT: csrr a0, vlenb
490 ; CHECK-NEXT: srli a0, a0, 2
491 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
492 ; CHECK-NEXT: vid.v v12
493 ; CHECK-NEXT: vand.vi v13, v12, 1
494 ; CHECK-NEXT: vmsne.vi v0, v13, 0
495 ; CHECK-NEXT: vsrl.vi v16, v12, 1
496 ; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t
497 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
498 ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
499 ; CHECK-NEXT: vmv.v.v v8, v12
502 ; ZVBB-LABEL: vector_interleave_nxv4f64_nxv2f64:
504 ; ZVBB-NEXT: csrr a0, vlenb
505 ; ZVBB-NEXT: srli a0, a0, 2
506 ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu
507 ; ZVBB-NEXT: vid.v v12
508 ; ZVBB-NEXT: vand.vi v13, v12, 1
509 ; ZVBB-NEXT: vmsne.vi v0, v13, 0
510 ; ZVBB-NEXT: vsrl.vi v16, v12, 1
511 ; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t
512 ; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma
513 ; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
514 ; ZVBB-NEXT: vmv.v.v v8, v12
516 %res = call <vscale x 4 x double> @llvm.experimental.vector.interleave2.nxv4f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
517 ret <vscale x 4 x double> %res
521 declare <vscale x 4 x half> @llvm.experimental.vector.interleave2.nxv4f16(<vscale x 2 x half>, <vscale x 2 x half>)
522 declare <vscale x 8 x half> @llvm.experimental.vector.interleave2.nxv8f16(<vscale x 4 x half>, <vscale x 4 x half>)
523 declare <vscale x 4 x float> @llvm.experimental.vector.interleave2.nxv4f32(<vscale x 2 x float>, <vscale x 2 x float>)
524 declare <vscale x 16 x half> @llvm.experimental.vector.interleave2.nxv16f16(<vscale x 8 x half>, <vscale x 8 x half>)
525 declare <vscale x 8 x float> @llvm.experimental.vector.interleave2.nxv8f32(<vscale x 4 x float>, <vscale x 4 x float>)
526 declare <vscale x 4 x double> @llvm.experimental.vector.interleave2.nxv4f64(<vscale x 2 x double>, <vscale x 2 x double>)
528 define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b) {
529 ; CHECK-LABEL: vector_interleave_nxv64f16_nxv32f16:
531 ; CHECK-NEXT: vmv8r.v v24, v8
532 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
533 ; CHECK-NEXT: vwaddu.vv v8, v24, v16
534 ; CHECK-NEXT: li a0, -1
535 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16
536 ; CHECK-NEXT: vwaddu.vv v0, v28, v20
537 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20
538 ; CHECK-NEXT: vmv8r.v v16, v0
541 ; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16:
543 ; ZVBB-NEXT: vmv8r.v v24, v8
544 ; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
545 ; ZVBB-NEXT: vwsll.vi v8, v16, 16
546 ; ZVBB-NEXT: vwaddu.wv v8, v8, v24
547 ; ZVBB-NEXT: vwsll.vi v0, v20, 16
548 ; ZVBB-NEXT: vwaddu.wv v0, v0, v28
549 ; ZVBB-NEXT: vmv8r.v v16, v0
551 %res = call <vscale x 64 x half> @llvm.experimental.vector.interleave2.nxv64f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b)
552 ret <vscale x 64 x half> %res
555 define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b) {
556 ; CHECK-LABEL: vector_interleave_nxv32f32_nxv16f32:
558 ; CHECK-NEXT: vmv8r.v v24, v8
559 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
560 ; CHECK-NEXT: vwaddu.vv v8, v24, v16
561 ; CHECK-NEXT: li a0, -1
562 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16
563 ; CHECK-NEXT: vwaddu.vv v0, v28, v20
564 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20
565 ; CHECK-NEXT: vmv8r.v v16, v0
568 ; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32:
570 ; ZVBB-NEXT: li a0, 32
571 ; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
572 ; ZVBB-NEXT: vwsll.vx v24, v16, a0
573 ; ZVBB-NEXT: vwaddu.wv v24, v24, v8
574 ; ZVBB-NEXT: vwsll.vx v0, v20, a0
575 ; ZVBB-NEXT: vwaddu.wv v0, v0, v12
576 ; ZVBB-NEXT: vmv8r.v v8, v24
577 ; ZVBB-NEXT: vmv8r.v v16, v0
579 %res = call <vscale x 32 x float> @llvm.experimental.vector.interleave2.nxv32f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b)
580 ret <vscale x 32 x float> %res
583 define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b) {
584 ; CHECK-LABEL: vector_interleave_nxv16f64_nxv8f64:
586 ; CHECK-NEXT: addi sp, sp, -16
587 ; CHECK-NEXT: .cfi_def_cfa_offset 16
588 ; CHECK-NEXT: csrr a0, vlenb
589 ; CHECK-NEXT: slli a0, a0, 3
590 ; CHECK-NEXT: sub sp, sp, a0
591 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
592 ; CHECK-NEXT: vmv8r.v v0, v8
593 ; CHECK-NEXT: csrr a0, vlenb
594 ; CHECK-NEXT: srli a0, a0, 1
595 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
596 ; CHECK-NEXT: vid.v v24
597 ; CHECK-NEXT: vand.vi v26, v24, 1
598 ; CHECK-NEXT: vmsne.vi v10, v26, 0
599 ; CHECK-NEXT: vsrl.vi v8, v24, 1
600 ; CHECK-NEXT: vmv8r.v v24, v0
601 ; CHECK-NEXT: vmv4r.v v12, v4
602 ; CHECK-NEXT: vmv1r.v v0, v10
603 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
604 ; CHECK-NEXT: vmv4r.v v28, v16
605 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
606 ; CHECK-NEXT: vrgatherei16.vv v0, v24, v8
607 ; CHECK-NEXT: addi a0, sp, 16
608 ; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
609 ; CHECK-NEXT: vmv4r.v v16, v12
610 ; CHECK-NEXT: vrgatherei16.vv v24, v16, v8
611 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
612 ; CHECK-NEXT: vmv.v.v v16, v24
613 ; CHECK-NEXT: csrr a0, vlenb
614 ; CHECK-NEXT: slli a0, a0, 3
615 ; CHECK-NEXT: add sp, sp, a0
616 ; CHECK-NEXT: addi sp, sp, 16
619 ; ZVBB-LABEL: vector_interleave_nxv16f64_nxv8f64:
621 ; ZVBB-NEXT: addi sp, sp, -16
622 ; ZVBB-NEXT: .cfi_def_cfa_offset 16
623 ; ZVBB-NEXT: csrr a0, vlenb
624 ; ZVBB-NEXT: slli a0, a0, 3
625 ; ZVBB-NEXT: sub sp, sp, a0
626 ; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
627 ; ZVBB-NEXT: vmv8r.v v0, v8
628 ; ZVBB-NEXT: csrr a0, vlenb
629 ; ZVBB-NEXT: srli a0, a0, 1
630 ; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu
631 ; ZVBB-NEXT: vid.v v24
632 ; ZVBB-NEXT: vand.vi v26, v24, 1
633 ; ZVBB-NEXT: vmsne.vi v10, v26, 0
634 ; ZVBB-NEXT: vsrl.vi v8, v24, 1
635 ; ZVBB-NEXT: vmv8r.v v24, v0
636 ; ZVBB-NEXT: vmv4r.v v12, v4
637 ; ZVBB-NEXT: vmv1r.v v0, v10
638 ; ZVBB-NEXT: vadd.vx v8, v8, a0, v0.t
639 ; ZVBB-NEXT: vmv4r.v v28, v16
640 ; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma
641 ; ZVBB-NEXT: vrgatherei16.vv v0, v24, v8
642 ; ZVBB-NEXT: addi a0, sp, 16
643 ; ZVBB-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
644 ; ZVBB-NEXT: vmv4r.v v16, v12
645 ; ZVBB-NEXT: vrgatherei16.vv v24, v16, v8
646 ; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
647 ; ZVBB-NEXT: vmv.v.v v16, v24
648 ; ZVBB-NEXT: csrr a0, vlenb
649 ; ZVBB-NEXT: slli a0, a0, 3
650 ; ZVBB-NEXT: add sp, sp, a0
651 ; ZVBB-NEXT: addi sp, sp, 16
653 %res = call <vscale x 16 x double> @llvm.experimental.vector.interleave2.nxv16f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b)
654 ret <vscale x 16 x double> %res
657 declare <vscale x 64 x half> @llvm.experimental.vector.interleave2.nxv64f16(<vscale x 32 x half>, <vscale x 32 x half>)
658 declare <vscale x 32 x float> @llvm.experimental.vector.interleave2.nxv32f32(<vscale x 16 x float>, <vscale x 16 x float>)
659 declare <vscale x 16 x double> @llvm.experimental.vector.interleave2.nxv16f64(<vscale x 8 x double>, <vscale x 8 x double>)