1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfh,+m | FileCheck %s
3 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+m | FileCheck %s
7 define {<vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_load_nxv16i1_nxv32i1(ptr %p) {
8 ; CHECK-LABEL: vector_deinterleave_load_nxv16i1_nxv32i1:
10 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
11 ; CHECK-NEXT: vlm.v v8, (a0)
12 ; CHECK-NEXT: csrr a0, vlenb
13 ; CHECK-NEXT: srli a0, a0, 2
14 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
15 ; CHECK-NEXT: vslidedown.vx v0, v8, a0
16 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
17 ; CHECK-NEXT: vmv.v.i v10, 0
18 ; CHECK-NEXT: vmerge.vim v14, v10, 1, v0
19 ; CHECK-NEXT: vmv1r.v v0, v8
20 ; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
21 ; CHECK-NEXT: vnsrl.wi v8, v12, 0
22 ; CHECK-NEXT: vmsne.vi v0, v8, 0
23 ; CHECK-NEXT: vnsrl.wi v10, v12, 8
24 ; CHECK-NEXT: vmsne.vi v8, v10, 0
26 %vec = load <vscale x 32 x i1>, ptr %p
27 %retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.experimental.vector.deinterleave2.nxv32i1(<vscale x 32 x i1> %vec)
28 ret {<vscale x 16 x i1>, <vscale x 16 x i1>} %retval
31 define {<vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_load_nxv16i8_nxv32i8(ptr %p) {
32 ; CHECK-LABEL: vector_deinterleave_load_nxv16i8_nxv32i8:
34 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
35 ; CHECK-NEXT: vlseg2e8.v v8, (a0)
37 %vec = load <vscale x 32 x i8>, ptr %p
38 %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.experimental.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> %vec)
39 ret {<vscale x 16 x i8>, <vscale x 16 x i8>} %retval
42 ; Shouldn't be lowered to vlseg because it's unaligned
43 define {<vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_load_nxv8i16_nxv16i16_align1(ptr %p) {
44 ; CHECK-LABEL: vector_deinterleave_load_nxv8i16_nxv16i16_align1:
46 ; CHECK-NEXT: vl4r.v v12, (a0)
47 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
48 ; CHECK-NEXT: vnsrl.wi v8, v12, 0
49 ; CHECK-NEXT: vnsrl.wi v10, v12, 16
51 %vec = load <vscale x 16 x i16>, ptr %p, align 1
52 %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.experimental.vector.deinterleave2.nxv16i16(<vscale x 16 x i16> %vec)
53 ret {<vscale x 8 x i16>, <vscale x 8 x i16>} %retval
56 define {<vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_load_nxv8i16_nxv16i16(ptr %p) {
57 ; CHECK-LABEL: vector_deinterleave_load_nxv8i16_nxv16i16:
59 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
60 ; CHECK-NEXT: vlseg2e16.v v8, (a0)
62 %vec = load <vscale x 16 x i16>, ptr %p
63 %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.experimental.vector.deinterleave2.nxv16i16(<vscale x 16 x i16> %vec)
64 ret {<vscale x 8 x i16>, <vscale x 8 x i16>} %retval
67 define {<vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_load_nxv4i32_nxvv8i32(ptr %p) {
68 ; CHECK-LABEL: vector_deinterleave_load_nxv4i32_nxvv8i32:
70 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
71 ; CHECK-NEXT: vlseg2e32.v v8, (a0)
73 %vec = load <vscale x 8 x i32>, ptr %p
74 %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %vec)
75 ret {<vscale x 4 x i32>, <vscale x 4 x i32>} %retval
78 define {<vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_load_nxv2i64_nxv4i64(ptr %p) {
79 ; CHECK-LABEL: vector_deinterleave_load_nxv2i64_nxv4i64:
81 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
82 ; CHECK-NEXT: vlseg2e64.v v8, (a0)
84 %vec = load <vscale x 4 x i64>, ptr %p
85 %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.experimental.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> %vec)
86 ret {<vscale x 2 x i64>, <vscale x 2 x i64>} %retval
89 define {<vscale x 4 x i64>, <vscale x 4 x i64>} @vector_deinterleave_load_nxv4i64_nxv8i64(ptr %p) {
90 ; CHECK-LABEL: vector_deinterleave_load_nxv4i64_nxv8i64:
92 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
93 ; CHECK-NEXT: vlseg2e64.v v8, (a0)
95 %vec = load <vscale x 8 x i64>, ptr %p
96 %retval = call {<vscale x 4 x i64>, <vscale x 4 x i64>} @llvm.experimental.vector.deinterleave2.nxv8i64(<vscale x 8 x i64> %vec)
97 ret {<vscale x 4 x i64>, <vscale x 4 x i64>} %retval
100 ; This shouldn't be lowered to a vlseg because EMUL * NFIELDS >= 8
101 define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_load_nxv8i64_nxv16i64(ptr %p) {
102 ; CHECK-LABEL: vector_deinterleave_load_nxv8i64_nxv16i64:
104 ; CHECK-NEXT: addi sp, sp, -16
105 ; CHECK-NEXT: .cfi_def_cfa_offset 16
106 ; CHECK-NEXT: csrr a1, vlenb
107 ; CHECK-NEXT: slli a1, a1, 5
108 ; CHECK-NEXT: sub sp, sp, a1
109 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
110 ; CHECK-NEXT: csrr a1, vlenb
111 ; CHECK-NEXT: slli a1, a1, 3
112 ; CHECK-NEXT: add a1, a0, a1
113 ; CHECK-NEXT: vl8re64.v v8, (a1)
114 ; CHECK-NEXT: csrr a1, vlenb
115 ; CHECK-NEXT: li a2, 24
116 ; CHECK-NEXT: mul a1, a1, a2
117 ; CHECK-NEXT: add a1, sp, a1
118 ; CHECK-NEXT: addi a1, a1, 16
119 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
120 ; CHECK-NEXT: vl8re64.v v0, (a0)
121 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
122 ; CHECK-NEXT: vid.v v8
123 ; CHECK-NEXT: vadd.vv v16, v8, v8
124 ; CHECK-NEXT: vrgather.vv v8, v0, v16
125 ; CHECK-NEXT: csrr a0, vlenb
126 ; CHECK-NEXT: slli a0, a0, 3
127 ; CHECK-NEXT: add a0, sp, a0
128 ; CHECK-NEXT: addi a0, a0, 16
129 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
130 ; CHECK-NEXT: csrr a0, vlenb
131 ; CHECK-NEXT: li a1, 24
132 ; CHECK-NEXT: mul a0, a0, a1
133 ; CHECK-NEXT: add a0, sp, a0
134 ; CHECK-NEXT: addi a0, a0, 16
135 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
136 ; CHECK-NEXT: vrgather.vv v24, v8, v16
137 ; CHECK-NEXT: csrr a0, vlenb
138 ; CHECK-NEXT: slli a0, a0, 4
139 ; CHECK-NEXT: add a0, sp, a0
140 ; CHECK-NEXT: addi a0, a0, 16
141 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
142 ; CHECK-NEXT: vadd.vi v8, v16, 1
143 ; CHECK-NEXT: vrgather.vv v16, v0, v8
144 ; CHECK-NEXT: csrr a0, vlenb
145 ; CHECK-NEXT: li a1, 24
146 ; CHECK-NEXT: mul a0, a0, a1
147 ; CHECK-NEXT: add a0, sp, a0
148 ; CHECK-NEXT: addi a0, a0, 16
149 ; CHECK-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
150 ; CHECK-NEXT: vrgather.vv v24, v0, v8
151 ; CHECK-NEXT: addi a0, sp, 16
152 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
153 ; CHECK-NEXT: csrr a0, vlenb
154 ; CHECK-NEXT: slli a0, a0, 4
155 ; CHECK-NEXT: add a0, sp, a0
156 ; CHECK-NEXT: addi a0, a0, 16
157 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
158 ; CHECK-NEXT: csrr a0, vlenb
159 ; CHECK-NEXT: slli a0, a0, 3
160 ; CHECK-NEXT: add a0, sp, a0
161 ; CHECK-NEXT: addi a0, a0, 16
162 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
163 ; CHECK-NEXT: vmv4r.v v28, v8
164 ; CHECK-NEXT: addi a0, sp, 16
165 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
166 ; CHECK-NEXT: vmv4r.v v20, v8
167 ; CHECK-NEXT: vmv8r.v v8, v24
168 ; CHECK-NEXT: csrr a0, vlenb
169 ; CHECK-NEXT: slli a0, a0, 5
170 ; CHECK-NEXT: add sp, sp, a0
171 ; CHECK-NEXT: addi sp, sp, 16
173 %vec = load <vscale x 16 x i64>, ptr %p
174 %retval = call {<vscale x 8 x i64>, <vscale x 8 x i64>} @llvm.experimental.vector.deinterleave2.nxv16i64(<vscale x 16 x i64> %vec)
175 ret {<vscale x 8 x i64>, <vscale x 8 x i64>} %retval
178 declare {<vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.experimental.vector.deinterleave2.nxv32i1(<vscale x 32 x i1>)
179 declare {<vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.experimental.vector.deinterleave2.nxv32i8(<vscale x 32 x i8>)
180 declare {<vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.experimental.vector.deinterleave2.nxv16i16(<vscale x 16 x i16>)
181 declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32>)
182 declare {<vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.experimental.vector.deinterleave2.nxv4i64(<vscale x 4 x i64>)
183 declare {<vscale x 4 x i64>, <vscale x 4 x i64>} @llvm.experimental.vector.deinterleave2.nxv8i64(<vscale x 8 x i64>)
184 declare {<vscale x 8 x i64>, <vscale x 8 x i64>} @llvm.experimental.vector.deinterleave2.nxv16i64(<vscale x 16 x i64>)
188 define {<vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_load_nxv2f16_nxv4f16(ptr %p) {
189 ; CHECK-LABEL: vector_deinterleave_load_nxv2f16_nxv4f16:
191 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
192 ; CHECK-NEXT: vlseg2e16.v v8, (a0)
194 %vec = load <vscale x 4 x half>, ptr %p
195 %retval = call {<vscale x 2 x half>, <vscale x 2 x half>} @llvm.experimental.vector.deinterleave2.nxv4f16(<vscale x 4 x half> %vec)
196 ret {<vscale x 2 x half>, <vscale x 2 x half>} %retval
199 define {<vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_load_nxv4f16_nxv8f16(ptr %p) {
200 ; CHECK-LABEL: vector_deinterleave_load_nxv4f16_nxv8f16:
202 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
203 ; CHECK-NEXT: vlseg2e16.v v8, (a0)
205 %vec = load <vscale x 8 x half>, ptr %p
206 %retval = call {<vscale x 4 x half>, <vscale x 4 x half>} @llvm.experimental.vector.deinterleave2.nxv8f16(<vscale x 8 x half> %vec)
207 ret {<vscale x 4 x half>, <vscale x 4 x half>} %retval
210 define {<vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_load_nxv2f32_nxv4f32(ptr %p) {
211 ; CHECK-LABEL: vector_deinterleave_load_nxv2f32_nxv4f32:
213 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
214 ; CHECK-NEXT: vlseg2e32.v v8, (a0)
216 %vec = load <vscale x 4 x float>, ptr %p
217 %retval = call {<vscale x 2 x float>, <vscale x 2 x float>} @llvm.experimental.vector.deinterleave2.nxv4f32(<vscale x 4 x float> %vec)
218 ret {<vscale x 2 x float>, <vscale x 2 x float>} %retval
221 define {<vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_load_nxv8f16_nxv16f16(ptr %p) {
222 ; CHECK-LABEL: vector_deinterleave_load_nxv8f16_nxv16f16:
224 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
225 ; CHECK-NEXT: vlseg2e16.v v8, (a0)
227 %vec = load <vscale x 16 x half>, ptr %p
228 %retval = call {<vscale x 8 x half>, <vscale x 8 x half>} @llvm.experimental.vector.deinterleave2.nxv16f16(<vscale x 16 x half> %vec)
229 ret {<vscale x 8 x half>, <vscale x 8 x half>} %retval
232 define {<vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_load_nxv4f32_nxv8f32(ptr %p) {
233 ; CHECK-LABEL: vector_deinterleave_load_nxv4f32_nxv8f32:
235 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
236 ; CHECK-NEXT: vlseg2e32.v v8, (a0)
238 %vec = load <vscale x 8 x float>, ptr %p
239 %retval = call {<vscale x 4 x float>, <vscale x 4 x float>} @llvm.experimental.vector.deinterleave2.nxv8f32(<vscale x 8 x float> %vec)
240 ret {<vscale x 4 x float>, <vscale x 4 x float>} %retval
243 define {<vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_load_nxv2f64_nxv4f64(ptr %p) {
244 ; CHECK-LABEL: vector_deinterleave_load_nxv2f64_nxv4f64:
246 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
247 ; CHECK-NEXT: vlseg2e64.v v8, (a0)
249 %vec = load <vscale x 4 x double>, ptr %p
250 %retval = call {<vscale x 2 x double>, <vscale x 2 x double>} @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %vec)
251 ret {<vscale x 2 x double>, <vscale x 2 x double>} %retval
254 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.experimental.vector.deinterleave2.nxv4f16(<vscale x 4 x half>)
255 declare {<vscale x 4 x half>, <vscale x 4 x half>} @llvm.experimental.vector.deinterleave2.nxv8f16(<vscale x 8 x half>)
256 declare {<vscale x 2 x float>, <vscale x 2 x float>} @llvm.experimental.vector.deinterleave2.nxv4f32(<vscale x 4 x float>)
257 declare {<vscale x 8 x half>, <vscale x 8 x half>} @llvm.experimental.vector.deinterleave2.nxv16f16(<vscale x 16 x half>)
258 declare {<vscale x 4 x float>, <vscale x 4 x float>} @llvm.experimental.vector.deinterleave2.nxv8f32(<vscale x 8 x float>)
259 declare {<vscale x 2 x double>, <vscale x 2 x double>} @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double>)