1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfh | FileCheck %s
3 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh | FileCheck %s
7 define void @vector_interleave_store_nxv32i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, ptr %p) {
8 ; CHECK-LABEL: vector_interleave_store_nxv32i1_nxv16i1:
10 ; CHECK-NEXT: vmv1r.v v9, v0
11 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
12 ; CHECK-NEXT: vmv.v.i v10, 0
13 ; CHECK-NEXT: vmv1r.v v0, v8
14 ; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
15 ; CHECK-NEXT: vmv1r.v v0, v9
16 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
17 ; CHECK-NEXT: vwaddu.vv v16, v8, v12
18 ; CHECK-NEXT: li a1, -1
19 ; CHECK-NEXT: vwmaccu.vx v16, a1, v12
20 ; CHECK-NEXT: vmsne.vi v8, v18, 0
21 ; CHECK-NEXT: vmsne.vi v9, v16, 0
22 ; CHECK-NEXT: csrr a1, vlenb
23 ; CHECK-NEXT: srli a1, a1, 2
24 ; CHECK-NEXT: add a2, a1, a1
25 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
26 ; CHECK-NEXT: vslideup.vx v9, v8, a1
27 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
28 ; CHECK-NEXT: vsm.v v9, (a0)
30 %res = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b)
31 store <vscale x 32 x i1> %res, ptr %p
35 ; Shouldn't be lowered to vsseg because it's unaligned
36 define void @vector_interleave_store_nxv16i16_nxv8i16_align1(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, ptr %p) {
37 ; CHECK-LABEL: vector_interleave_store_nxv16i16_nxv8i16_align1:
39 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
40 ; CHECK-NEXT: vwaddu.vv v12, v8, v10
41 ; CHECK-NEXT: li a1, -1
42 ; CHECK-NEXT: vwmaccu.vx v12, a1, v10
43 ; CHECK-NEXT: vs4r.v v12, (a0)
45 %res = call <vscale x 16 x i16> @llvm.vector.interleave2.nxv16i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
46 store <vscale x 16 x i16> %res, ptr %p, align 1
50 define void @vector_interleave_store_nxv16i16_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, ptr %p) {
51 ; CHECK-LABEL: vector_interleave_store_nxv16i16_nxv8i16:
53 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
54 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
56 %res = call <vscale x 16 x i16> @llvm.vector.interleave2.nxv16i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
57 store <vscale x 16 x i16> %res, ptr %p
61 define void @vector_interleave_store_nxv8i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, ptr %p) {
62 ; CHECK-LABEL: vector_interleave_store_nxv8i32_nxv4i32:
64 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
65 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
67 %res = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
68 store <vscale x 8 x i32> %res, ptr %p
72 define void @vector_interleave_store_nxv4i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, ptr %p) {
73 ; CHECK-LABEL: vector_interleave_store_nxv4i64_nxv2i64:
75 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
76 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
78 %res = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
79 store <vscale x 4 x i64> %res, ptr %p
83 define void @vector_interleave_store_nxv8i64_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, ptr %p) {
84 ; CHECK-LABEL: vector_interleave_store_nxv8i64_nxv4i64:
86 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
87 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
89 %res = call <vscale x 8 x i64> @llvm.vector.interleave2.nxv8i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b)
90 store <vscale x 8 x i64> %res, ptr %p
94 ; This shouldn't be lowered to a vsseg because EMUL * NFIELDS >= 8
95 define void @vector_interleave_store_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, ptr %p) {
96 ; CHECK-LABEL: vector_interleave_store_nxv16i64_nxv8i64:
98 ; CHECK-NEXT: addi sp, sp, -16
99 ; CHECK-NEXT: .cfi_def_cfa_offset 16
100 ; CHECK-NEXT: csrr a1, vlenb
101 ; CHECK-NEXT: slli a1, a1, 4
102 ; CHECK-NEXT: sub sp, sp, a1
103 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
104 ; CHECK-NEXT: addi a1, sp, 16
105 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
106 ; CHECK-NEXT: csrr a1, vlenb
107 ; CHECK-NEXT: srli a2, a1, 1
108 ; CHECK-NEXT: vsetvli a3, zero, e16, m2, ta, mu
109 ; CHECK-NEXT: vid.v v24
110 ; CHECK-NEXT: vand.vi v26, v24, 1
111 ; CHECK-NEXT: vmsne.vi v28, v26, 0
112 ; CHECK-NEXT: vsrl.vi v24, v24, 1
113 ; CHECK-NEXT: vmv1r.v v0, v28
114 ; CHECK-NEXT: vadd.vx v24, v24, a2, v0.t
115 ; CHECK-NEXT: vmv4r.v v12, v16
116 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
117 ; CHECK-NEXT: vrgatherei16.vv v0, v8, v24
118 ; CHECK-NEXT: csrr a2, vlenb
119 ; CHECK-NEXT: slli a2, a2, 3
120 ; CHECK-NEXT: add a2, sp, a2
121 ; CHECK-NEXT: addi a2, a2, 16
122 ; CHECK-NEXT: vs8r.v v0, (a2) # Unknown-size Folded Spill
123 ; CHECK-NEXT: addi a2, sp, 16
124 ; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
125 ; CHECK-NEXT: vmv4r.v v16, v12
126 ; CHECK-NEXT: vrgatherei16.vv v8, v16, v24
127 ; CHECK-NEXT: slli a1, a1, 3
128 ; CHECK-NEXT: add a1, a0, a1
129 ; CHECK-NEXT: vs8r.v v8, (a1)
130 ; CHECK-NEXT: csrr a1, vlenb
131 ; CHECK-NEXT: slli a1, a1, 3
132 ; CHECK-NEXT: add a1, sp, a1
133 ; CHECK-NEXT: addi a1, a1, 16
134 ; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
135 ; CHECK-NEXT: vs8r.v v8, (a0)
136 ; CHECK-NEXT: csrr a0, vlenb
137 ; CHECK-NEXT: slli a0, a0, 4
138 ; CHECK-NEXT: add sp, sp, a0
139 ; CHECK-NEXT: addi sp, sp, 16
141 %res = call <vscale x 16 x i64> @llvm.vector.interleave2.nxv16i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b)
142 store <vscale x 16 x i64> %res, ptr %p
146 declare <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1>, <vscale x 16 x i1>)
147 declare <vscale x 16 x i16> @llvm.vector.interleave2.nxv16i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
148 declare <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
149 declare <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
150 declare <vscale x 8 x i64> @llvm.vector.interleave2.nxv8i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
151 declare <vscale x 16 x i64> @llvm.vector.interleave2.nxv16i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
155 define void @vector_interleave_store_nxv4f16_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, ptr %p) {
156 ; CHECK-LABEL: vector_interleave_store_nxv4f16_nxv2f16:
158 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
159 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
161 %res = call <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
162 store <vscale x 4 x half> %res, ptr %p
166 define void @vector_interleave_store_nxv8f16_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, ptr %p) {
167 ; CHECK-LABEL: vector_interleave_store_nxv8f16_nxv4f16:
169 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
170 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
172 %res = call <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
173 store <vscale x 8 x half> %res, ptr %p
177 define void @vector_interleave_store_nxv4f32_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, ptr %p) {
178 ; CHECK-LABEL: vector_interleave_store_nxv4f32_nxv2f32:
180 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
181 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
183 %res = call <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
184 store <vscale x 4 x float> %res, ptr %p
188 define void @vector_interleave_store_nxv16f16_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, ptr %p) {
189 ; CHECK-LABEL: vector_interleave_store_nxv16f16_nxv8f16:
191 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
192 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
194 %res = call <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
195 store <vscale x 16 x half> %res, ptr %p
199 define void @vector_interleave_store_nxv8f32_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, ptr %p) {
200 ; CHECK-LABEL: vector_interleave_store_nxv8f32_nxv4f32:
202 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
203 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
205 %res = call <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
206 store <vscale x 8 x float> %res, ptr %p
210 define void @vector_interleave_store_nxv4f64_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, ptr %p) {
211 ; CHECK-LABEL: vector_interleave_store_nxv4f64_nxv2f64:
213 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
214 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
216 %res = call <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
217 store <vscale x 4 x double> %res, ptr %p
222 declare <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half>, <vscale x 2 x half>)
223 declare <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half>, <vscale x 4 x half>)
224 declare <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float>, <vscale x 2 x float>)
225 declare <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half>, <vscale x 8 x half>)
226 declare <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float>, <vscale x 4 x float>)
227 declare <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double>, <vscale x 2 x double>)