1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v,+m -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
6 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+m -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
7 ; RUN: -verify-machineinstrs < %s | FileCheck %s
8 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \
9 ; RUN: -verify-machineinstrs < %s | FileCheck %s
11 declare <1 x i1> @llvm.vp.select.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32)
13 define <1 x i1> @select_v1i1(<1 x i1> %a, <1 x i1> %b, <1 x i1> %c, i32 zeroext %evl) {
14 ; CHECK-LABEL: select_v1i1:
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
17 ; CHECK-NEXT: vmandn.mm v9, v9, v0
18 ; CHECK-NEXT: vmand.mm v8, v8, v0
19 ; CHECK-NEXT: vmor.mm v0, v8, v9
21 %v = call <1 x i1> @llvm.vp.select.v1i1(<1 x i1> %a, <1 x i1> %b, <1 x i1> %c, i32 %evl)
25 declare <2 x i1> @llvm.vp.select.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32)
27 define <2 x i1> @select_v2i1(<2 x i1> %a, <2 x i1> %b, <2 x i1> %c, i32 zeroext %evl) {
28 ; CHECK-LABEL: select_v2i1:
30 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
31 ; CHECK-NEXT: vmandn.mm v9, v9, v0
32 ; CHECK-NEXT: vmand.mm v8, v8, v0
33 ; CHECK-NEXT: vmor.mm v0, v8, v9
35 %v = call <2 x i1> @llvm.vp.select.v2i1(<2 x i1> %a, <2 x i1> %b, <2 x i1> %c, i32 %evl)
39 declare <4 x i1> @llvm.vp.select.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32)
41 define <4 x i1> @select_v4i1(<4 x i1> %a, <4 x i1> %b, <4 x i1> %c, i32 zeroext %evl) {
42 ; CHECK-LABEL: select_v4i1:
44 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
45 ; CHECK-NEXT: vmandn.mm v9, v9, v0
46 ; CHECK-NEXT: vmand.mm v8, v8, v0
47 ; CHECK-NEXT: vmor.mm v0, v8, v9
49 %v = call <4 x i1> @llvm.vp.select.v4i1(<4 x i1> %a, <4 x i1> %b, <4 x i1> %c, i32 %evl)
53 declare <8 x i1> @llvm.vp.select.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32)
55 define <8 x i1> @select_v8i1(<8 x i1> %a, <8 x i1> %b, <8 x i1> %c, i32 zeroext %evl) {
56 ; CHECK-LABEL: select_v8i1:
58 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
59 ; CHECK-NEXT: vmandn.mm v9, v9, v0
60 ; CHECK-NEXT: vmand.mm v8, v8, v0
61 ; CHECK-NEXT: vmor.mm v0, v8, v9
63 %v = call <8 x i1> @llvm.vp.select.v8i1(<8 x i1> %a, <8 x i1> %b, <8 x i1> %c, i32 %evl)
67 declare <16 x i1> @llvm.vp.select.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32)
69 define <16 x i1> @select_v16i1(<16 x i1> %a, <16 x i1> %b, <16 x i1> %c, i32 zeroext %evl) {
70 ; CHECK-LABEL: select_v16i1:
72 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
73 ; CHECK-NEXT: vmandn.mm v9, v9, v0
74 ; CHECK-NEXT: vmand.mm v8, v8, v0
75 ; CHECK-NEXT: vmor.mm v0, v8, v9
77 %v = call <16 x i1> @llvm.vp.select.v16i1(<16 x i1> %a, <16 x i1> %b, <16 x i1> %c, i32 %evl)
81 declare <8 x i7> @llvm.vp.select.v8i7(<8 x i1>, <8 x i7>, <8 x i7>, i32)
83 define <8 x i7> @select_v8i7(<8 x i1> %a, <8 x i7> %b, <8 x i7> %c, i32 zeroext %evl) {
84 ; CHECK-LABEL: select_v8i7:
86 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
87 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
89 %v = call <8 x i7> @llvm.vp.select.v8i7(<8 x i1> %a, <8 x i7> %b, <8 x i7> %c, i32 %evl)
93 declare <2 x i8> @llvm.vp.select.v2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32)
95 define <2 x i8> @select_v2i8(<2 x i1> %a, <2 x i8> %b, <2 x i8> %c, i32 zeroext %evl) {
96 ; CHECK-LABEL: select_v2i8:
98 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
99 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
101 %v = call <2 x i8> @llvm.vp.select.v2i8(<2 x i1> %a, <2 x i8> %b, <2 x i8> %c, i32 %evl)
105 declare <4 x i8> @llvm.vp.select.v4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32)
107 define <4 x i8> @select_v4i8(<4 x i1> %a, <4 x i8> %b, <4 x i8> %c, i32 zeroext %evl) {
108 ; CHECK-LABEL: select_v4i8:
110 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
111 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
113 %v = call <4 x i8> @llvm.vp.select.v4i8(<4 x i1> %a, <4 x i8> %b, <4 x i8> %c, i32 %evl)
117 declare <5 x i8> @llvm.vp.select.v5i8(<5 x i1>, <5 x i8>, <5 x i8>, i32)
119 define <5 x i8> @select_v5i8(<5 x i1> %a, <5 x i8> %b, <5 x i8> %c, i32 zeroext %evl) {
120 ; CHECK-LABEL: select_v5i8:
122 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
123 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
125 %v = call <5 x i8> @llvm.vp.select.v5i8(<5 x i1> %a, <5 x i8> %b, <5 x i8> %c, i32 %evl)
129 declare <8 x i8> @llvm.vp.select.v8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32)
131 define <8 x i8> @select_v8i8(<8 x i1> %a, <8 x i8> %b, <8 x i8> %c, i32 zeroext %evl) {
132 ; CHECK-LABEL: select_v8i8:
134 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
135 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
137 %v = call <8 x i8> @llvm.vp.select.v8i8(<8 x i1> %a, <8 x i8> %b, <8 x i8> %c, i32 %evl)
141 declare <16 x i8> @llvm.vp.select.v16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32)
143 define <16 x i8> @select_v16i8(<16 x i1> %a, <16 x i8> %b, <16 x i8> %c, i32 zeroext %evl) {
144 ; CHECK-LABEL: select_v16i8:
146 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
147 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
149 %v = call <16 x i8> @llvm.vp.select.v16i8(<16 x i1> %a, <16 x i8> %b, <16 x i8> %c, i32 %evl)
153 declare <256 x i8> @llvm.vp.select.v256i8(<256 x i1>, <256 x i8>, <256 x i8>, i32)
155 define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 zeroext %evl) {
156 ; CHECK-LABEL: select_v256i8:
158 ; CHECK-NEXT: addi sp, sp, -16
159 ; CHECK-NEXT: .cfi_def_cfa_offset 16
160 ; CHECK-NEXT: csrr a2, vlenb
161 ; CHECK-NEXT: slli a2, a2, 3
162 ; CHECK-NEXT: sub sp, sp, a2
163 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
164 ; CHECK-NEXT: addi a2, sp, 16
165 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
166 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
167 ; CHECK-NEXT: vmv1r.v v6, v8
168 ; CHECK-NEXT: vmv1r.v v7, v0
169 ; CHECK-NEXT: li a2, 128
170 ; CHECK-NEXT: addi a4, a1, 128
171 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
172 ; CHECK-NEXT: vle8.v v24, (a0)
173 ; CHECK-NEXT: addi a0, a3, -128
174 ; CHECK-NEXT: vle8.v v8, (a4)
175 ; CHECK-NEXT: sltu a4, a3, a0
176 ; CHECK-NEXT: vle8.v v16, (a1)
177 ; CHECK-NEXT: addi a4, a4, -1
178 ; CHECK-NEXT: and a0, a4, a0
179 ; CHECK-NEXT: vmv1r.v v0, v6
180 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
181 ; CHECK-NEXT: vmerge.vvm v24, v8, v24, v0
182 ; CHECK-NEXT: bltu a3, a2, .LBB11_2
183 ; CHECK-NEXT: # %bb.1:
184 ; CHECK-NEXT: li a3, 128
185 ; CHECK-NEXT: .LBB11_2:
186 ; CHECK-NEXT: vmv1r.v v0, v7
187 ; CHECK-NEXT: addi a0, sp, 16
188 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
189 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
190 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
191 ; CHECK-NEXT: vmv8r.v v16, v24
192 ; CHECK-NEXT: csrr a0, vlenb
193 ; CHECK-NEXT: slli a0, a0, 3
194 ; CHECK-NEXT: add sp, sp, a0
195 ; CHECK-NEXT: .cfi_def_cfa sp, 16
196 ; CHECK-NEXT: addi sp, sp, 16
197 ; CHECK-NEXT: .cfi_def_cfa_offset 0
199 %v = call <256 x i8> @llvm.vp.select.v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 %evl)
203 define <256 x i8> @select_evl_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c) {
204 ; CHECK-LABEL: select_evl_v256i8:
206 ; CHECK-NEXT: addi sp, sp, -16
207 ; CHECK-NEXT: .cfi_def_cfa_offset 16
208 ; CHECK-NEXT: csrr a2, vlenb
209 ; CHECK-NEXT: li a3, 24
210 ; CHECK-NEXT: mul a2, a2, a3
211 ; CHECK-NEXT: sub sp, sp, a2
212 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
213 ; CHECK-NEXT: csrr a2, vlenb
214 ; CHECK-NEXT: slli a2, a2, 4
215 ; CHECK-NEXT: add a2, sp, a2
216 ; CHECK-NEXT: addi a2, a2, 16
217 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
218 ; CHECK-NEXT: li a2, 128
219 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
220 ; CHECK-NEXT: vle8.v v16, (a0)
221 ; CHECK-NEXT: csrr a0, vlenb
222 ; CHECK-NEXT: slli a0, a0, 3
223 ; CHECK-NEXT: add a0, sp, a0
224 ; CHECK-NEXT: addi a0, a0, 16
225 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
226 ; CHECK-NEXT: addi a0, a1, 128
227 ; CHECK-NEXT: vle8.v v24, (a0)
228 ; CHECK-NEXT: vle8.v v16, (a1)
229 ; CHECK-NEXT: addi a0, sp, 16
230 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
231 ; CHECK-NEXT: vmv1r.v v9, v0
232 ; CHECK-NEXT: vmv1r.v v0, v8
233 ; CHECK-NEXT: csrr a0, vlenb
234 ; CHECK-NEXT: slli a0, a0, 3
235 ; CHECK-NEXT: add a0, sp, a0
236 ; CHECK-NEXT: addi a0, a0, 16
237 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
238 ; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
239 ; CHECK-NEXT: vmerge.vvm v24, v24, v16, v0
240 ; CHECK-NEXT: vmv1r.v v0, v9
241 ; CHECK-NEXT: csrr a0, vlenb
242 ; CHECK-NEXT: slli a0, a0, 4
243 ; CHECK-NEXT: add a0, sp, a0
244 ; CHECK-NEXT: addi a0, a0, 16
245 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
246 ; CHECK-NEXT: addi a0, sp, 16
247 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
248 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
249 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
250 ; CHECK-NEXT: vmv8r.v v16, v24
251 ; CHECK-NEXT: csrr a0, vlenb
252 ; CHECK-NEXT: li a1, 24
253 ; CHECK-NEXT: mul a0, a0, a1
254 ; CHECK-NEXT: add sp, sp, a0
255 ; CHECK-NEXT: .cfi_def_cfa sp, 16
256 ; CHECK-NEXT: addi sp, sp, 16
257 ; CHECK-NEXT: .cfi_def_cfa_offset 0
259 %v = call <256 x i8> @llvm.vp.select.v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 129)
263 declare <2 x i16> @llvm.vp.select.v2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32)
265 define <2 x i16> @select_v2i16(<2 x i1> %a, <2 x i16> %b, <2 x i16> %c, i32 zeroext %evl) {
266 ; CHECK-LABEL: select_v2i16:
268 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
269 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
271 %v = call <2 x i16> @llvm.vp.select.v2i16(<2 x i1> %a, <2 x i16> %b, <2 x i16> %c, i32 %evl)
275 declare <4 x i16> @llvm.vp.select.v4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32)
277 define <4 x i16> @select_v4i16(<4 x i1> %a, <4 x i16> %b, <4 x i16> %c, i32 zeroext %evl) {
278 ; CHECK-LABEL: select_v4i16:
280 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
281 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
283 %v = call <4 x i16> @llvm.vp.select.v4i16(<4 x i1> %a, <4 x i16> %b, <4 x i16> %c, i32 %evl)
287 declare <8 x i16> @llvm.vp.select.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32)
289 define <8 x i16> @select_v8i16(<8 x i1> %a, <8 x i16> %b, <8 x i16> %c, i32 zeroext %evl) {
290 ; CHECK-LABEL: select_v8i16:
292 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
293 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
295 %v = call <8 x i16> @llvm.vp.select.v8i16(<8 x i1> %a, <8 x i16> %b, <8 x i16> %c, i32 %evl)
299 declare <16 x i16> @llvm.vp.select.v16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32)
301 define <16 x i16> @select_v16i16(<16 x i1> %a, <16 x i16> %b, <16 x i16> %c, i32 zeroext %evl) {
302 ; CHECK-LABEL: select_v16i16:
304 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
305 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
307 %v = call <16 x i16> @llvm.vp.select.v16i16(<16 x i1> %a, <16 x i16> %b, <16 x i16> %c, i32 %evl)
311 declare <2 x i32> @llvm.vp.select.v2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32)
313 define <2 x i32> @select_v2i32(<2 x i1> %a, <2 x i32> %b, <2 x i32> %c, i32 zeroext %evl) {
314 ; CHECK-LABEL: select_v2i32:
316 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
317 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
319 %v = call <2 x i32> @llvm.vp.select.v2i32(<2 x i1> %a, <2 x i32> %b, <2 x i32> %c, i32 %evl)
323 declare <4 x i32> @llvm.vp.select.v4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32)
325 define <4 x i32> @select_v4i32(<4 x i1> %a, <4 x i32> %b, <4 x i32> %c, i32 zeroext %evl) {
326 ; CHECK-LABEL: select_v4i32:
328 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
329 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
331 %v = call <4 x i32> @llvm.vp.select.v4i32(<4 x i1> %a, <4 x i32> %b, <4 x i32> %c, i32 %evl)
335 declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32)
337 define <8 x i32> @select_v8i32(<8 x i1> %a, <8 x i32> %b, <8 x i32> %c, i32 zeroext %evl) {
338 ; CHECK-LABEL: select_v8i32:
340 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
341 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
343 %v = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %a, <8 x i32> %b, <8 x i32> %c, i32 %evl)
347 declare <16 x i32> @llvm.vp.select.v16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32)
349 define <16 x i32> @select_v16i32(<16 x i1> %a, <16 x i32> %b, <16 x i32> %c, i32 zeroext %evl) {
350 ; CHECK-LABEL: select_v16i32:
352 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
353 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
355 %v = call <16 x i32> @llvm.vp.select.v16i32(<16 x i1> %a, <16 x i32> %b, <16 x i32> %c, i32 %evl)
359 declare <2 x i64> @llvm.vp.select.v2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32)
361 define <2 x i64> @select_v2i64(<2 x i1> %a, <2 x i64> %b, <2 x i64> %c, i32 zeroext %evl) {
362 ; CHECK-LABEL: select_v2i64:
364 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
365 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
367 %v = call <2 x i64> @llvm.vp.select.v2i64(<2 x i1> %a, <2 x i64> %b, <2 x i64> %c, i32 %evl)
371 declare <4 x i64> @llvm.vp.select.v4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32)
373 define <4 x i64> @select_v4i64(<4 x i1> %a, <4 x i64> %b, <4 x i64> %c, i32 zeroext %evl) {
374 ; CHECK-LABEL: select_v4i64:
376 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
377 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
379 %v = call <4 x i64> @llvm.vp.select.v4i64(<4 x i1> %a, <4 x i64> %b, <4 x i64> %c, i32 %evl)
383 declare <8 x i64> @llvm.vp.select.v8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32)
385 define <8 x i64> @select_v8i64(<8 x i1> %a, <8 x i64> %b, <8 x i64> %c, i32 zeroext %evl) {
386 ; CHECK-LABEL: select_v8i64:
388 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
389 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
391 %v = call <8 x i64> @llvm.vp.select.v8i64(<8 x i1> %a, <8 x i64> %b, <8 x i64> %c, i32 %evl)
395 declare <16 x i64> @llvm.vp.select.v16i64(<16 x i1>, <16 x i64>, <16 x i64>, i32)
397 define <16 x i64> @select_v16i64(<16 x i1> %a, <16 x i64> %b, <16 x i64> %c, i32 zeroext %evl) {
398 ; CHECK-LABEL: select_v16i64:
400 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
401 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
403 %v = call <16 x i64> @llvm.vp.select.v16i64(<16 x i1> %a, <16 x i64> %b, <16 x i64> %c, i32 %evl)
407 declare <32 x i64> @llvm.vp.select.v32i64(<32 x i1>, <32 x i64>, <32 x i64>, i32)
409 define <32 x i64> @select_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 zeroext %evl) {
410 ; CHECK-LABEL: select_v32i64:
412 ; CHECK-NEXT: addi sp, sp, -16
413 ; CHECK-NEXT: .cfi_def_cfa_offset 16
414 ; CHECK-NEXT: csrr a1, vlenb
415 ; CHECK-NEXT: slli a1, a1, 3
416 ; CHECK-NEXT: sub sp, sp, a1
417 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
418 ; CHECK-NEXT: addi a1, sp, 16
419 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
420 ; CHECK-NEXT: addi a1, a0, 128
421 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
422 ; CHECK-NEXT: vle64.v v16, (a1)
423 ; CHECK-NEXT: vle64.v v24, (a0)
424 ; CHECK-NEXT: li a1, 16
425 ; CHECK-NEXT: mv a0, a2
426 ; CHECK-NEXT: bltu a2, a1, .LBB25_2
427 ; CHECK-NEXT: # %bb.1:
428 ; CHECK-NEXT: li a0, 16
429 ; CHECK-NEXT: .LBB25_2:
430 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
431 ; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
432 ; CHECK-NEXT: addi a0, a2, -16
433 ; CHECK-NEXT: sltu a1, a2, a0
434 ; CHECK-NEXT: addi a1, a1, -1
435 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
436 ; CHECK-NEXT: vslidedown.vi v0, v0, 2
437 ; CHECK-NEXT: and a0, a1, a0
438 ; CHECK-NEXT: addi a1, sp, 16
439 ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
440 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
441 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0
442 ; CHECK-NEXT: csrr a0, vlenb
443 ; CHECK-NEXT: slli a0, a0, 3
444 ; CHECK-NEXT: add sp, sp, a0
445 ; CHECK-NEXT: .cfi_def_cfa sp, 16
446 ; CHECK-NEXT: addi sp, sp, 16
447 ; CHECK-NEXT: .cfi_def_cfa_offset 0
449 %v = call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 %evl)
453 define <32 x i64> @select_evl_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c) {
454 ; CHECK-LABEL: select_evl_v32i64:
456 ; CHECK-NEXT: addi sp, sp, -16
457 ; CHECK-NEXT: .cfi_def_cfa_offset 16
458 ; CHECK-NEXT: csrr a1, vlenb
459 ; CHECK-NEXT: li a2, 24
460 ; CHECK-NEXT: mul a1, a1, a2
461 ; CHECK-NEXT: sub sp, sp, a1
462 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
463 ; CHECK-NEXT: csrr a1, vlenb
464 ; CHECK-NEXT: slli a1, a1, 4
465 ; CHECK-NEXT: add a1, sp, a1
466 ; CHECK-NEXT: addi a1, a1, 16
467 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
468 ; CHECK-NEXT: csrr a1, vlenb
469 ; CHECK-NEXT: slli a1, a1, 3
470 ; CHECK-NEXT: add a1, sp, a1
471 ; CHECK-NEXT: addi a1, a1, 16
472 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
473 ; CHECK-NEXT: addi a1, a0, 128
474 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
475 ; CHECK-NEXT: vle64.v v8, (a0)
476 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
477 ; CHECK-NEXT: vle64.v v16, (a1)
478 ; CHECK-NEXT: addi a0, sp, 16
479 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
480 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
481 ; CHECK-NEXT: vslidedown.vi v24, v0, 2
482 ; CHECK-NEXT: csrr a0, vlenb
483 ; CHECK-NEXT: slli a0, a0, 3
484 ; CHECK-NEXT: add a0, sp, a0
485 ; CHECK-NEXT: addi a0, a0, 16
486 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
487 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
488 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
489 ; CHECK-NEXT: vmv1r.v v0, v24
490 ; CHECK-NEXT: csrr a0, vlenb
491 ; CHECK-NEXT: slli a0, a0, 4
492 ; CHECK-NEXT: add a0, sp, a0
493 ; CHECK-NEXT: addi a0, a0, 16
494 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
495 ; CHECK-NEXT: addi a0, sp, 16
496 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
497 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
498 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0
499 ; CHECK-NEXT: csrr a0, vlenb
500 ; CHECK-NEXT: li a1, 24
501 ; CHECK-NEXT: mul a0, a0, a1
502 ; CHECK-NEXT: add sp, sp, a0
503 ; CHECK-NEXT: .cfi_def_cfa sp, 16
504 ; CHECK-NEXT: addi sp, sp, 16
505 ; CHECK-NEXT: .cfi_def_cfa_offset 0
507 %v = call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 17)
511 declare <2 x half> @llvm.vp.select.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32)
513 define <2 x half> @select_v2f16(<2 x i1> %a, <2 x half> %b, <2 x half> %c, i32 zeroext %evl) {
514 ; CHECK-LABEL: select_v2f16:
516 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
517 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
519 %v = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %a, <2 x half> %b, <2 x half> %c, i32 %evl)
523 declare <4 x half> @llvm.vp.select.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32)
525 define <4 x half> @select_v4f16(<4 x i1> %a, <4 x half> %b, <4 x half> %c, i32 zeroext %evl) {
526 ; CHECK-LABEL: select_v4f16:
528 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
529 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
531 %v = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %a, <4 x half> %b, <4 x half> %c, i32 %evl)
535 declare <8 x half> @llvm.vp.select.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32)
537 define <8 x half> @select_v8f16(<8 x i1> %a, <8 x half> %b, <8 x half> %c, i32 zeroext %evl) {
538 ; CHECK-LABEL: select_v8f16:
540 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
541 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
543 %v = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %a, <8 x half> %b, <8 x half> %c, i32 %evl)
547 declare <16 x half> @llvm.vp.select.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32)
549 define <16 x half> @select_v16f16(<16 x i1> %a, <16 x half> %b, <16 x half> %c, i32 zeroext %evl) {
550 ; CHECK-LABEL: select_v16f16:
552 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
553 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
555 %v = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %a, <16 x half> %b, <16 x half> %c, i32 %evl)
559 declare <2 x float> @llvm.vp.select.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32)
561 define <2 x float> @select_v2f32(<2 x i1> %a, <2 x float> %b, <2 x float> %c, i32 zeroext %evl) {
562 ; CHECK-LABEL: select_v2f32:
564 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
565 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
567 %v = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %a, <2 x float> %b, <2 x float> %c, i32 %evl)
571 declare <4 x float> @llvm.vp.select.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32)
573 define <4 x float> @select_v4f32(<4 x i1> %a, <4 x float> %b, <4 x float> %c, i32 zeroext %evl) {
574 ; CHECK-LABEL: select_v4f32:
576 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
577 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
579 %v = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %a, <4 x float> %b, <4 x float> %c, i32 %evl)
583 declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32)
585 define <8 x float> @select_v8f32(<8 x i1> %a, <8 x float> %b, <8 x float> %c, i32 zeroext %evl) {
586 ; CHECK-LABEL: select_v8f32:
588 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
589 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
591 %v = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %a, <8 x float> %b, <8 x float> %c, i32 %evl)
595 declare <16 x float> @llvm.vp.select.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32)
597 define <16 x float> @select_v16f32(<16 x i1> %a, <16 x float> %b, <16 x float> %c, i32 zeroext %evl) {
598 ; CHECK-LABEL: select_v16f32:
600 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
601 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
603 %v = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %a, <16 x float> %b, <16 x float> %c, i32 %evl)
607 declare <64 x float> @llvm.vp.select.v64f32(<64 x i1>, <64 x float>, <64 x float>, i32)
609 define <64 x float> @select_v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %c, i32 zeroext %evl) {
610 ; CHECK-LABEL: select_v64f32:
612 ; CHECK-NEXT: addi sp, sp, -16
613 ; CHECK-NEXT: .cfi_def_cfa_offset 16
614 ; CHECK-NEXT: csrr a1, vlenb
615 ; CHECK-NEXT: slli a1, a1, 3
616 ; CHECK-NEXT: sub sp, sp, a1
617 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
618 ; CHECK-NEXT: addi a1, sp, 16
619 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
620 ; CHECK-NEXT: addi a1, a0, 128
621 ; CHECK-NEXT: li a3, 32
622 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
623 ; CHECK-NEXT: vle32.v v16, (a1)
624 ; CHECK-NEXT: vle32.v v24, (a0)
625 ; CHECK-NEXT: mv a0, a2
626 ; CHECK-NEXT: bltu a2, a3, .LBB35_2
627 ; CHECK-NEXT: # %bb.1:
628 ; CHECK-NEXT: li a0, 32
629 ; CHECK-NEXT: .LBB35_2:
630 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
631 ; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
632 ; CHECK-NEXT: addi a0, a2, -32
633 ; CHECK-NEXT: sltu a1, a2, a0
634 ; CHECK-NEXT: addi a1, a1, -1
635 ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
636 ; CHECK-NEXT: vslidedown.vi v0, v0, 4
637 ; CHECK-NEXT: and a0, a1, a0
638 ; CHECK-NEXT: addi a1, sp, 16
639 ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
640 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
641 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0
642 ; CHECK-NEXT: csrr a0, vlenb
643 ; CHECK-NEXT: slli a0, a0, 3
644 ; CHECK-NEXT: add sp, sp, a0
645 ; CHECK-NEXT: .cfi_def_cfa sp, 16
646 ; CHECK-NEXT: addi sp, sp, 16
647 ; CHECK-NEXT: .cfi_def_cfa_offset 0
649 %v = call <64 x float> @llvm.vp.select.v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %c, i32 %evl)
653 declare <2 x double> @llvm.vp.select.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32)
655 define <2 x double> @select_v2f64(<2 x i1> %a, <2 x double> %b, <2 x double> %c, i32 zeroext %evl) {
656 ; CHECK-LABEL: select_v2f64:
658 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
659 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
661 %v = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %a, <2 x double> %b, <2 x double> %c, i32 %evl)
665 declare <4 x double> @llvm.vp.select.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32)
667 define <4 x double> @select_v4f64(<4 x i1> %a, <4 x double> %b, <4 x double> %c, i32 zeroext %evl) {
668 ; CHECK-LABEL: select_v4f64:
670 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
671 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
673 %v = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %a, <4 x double> %b, <4 x double> %c, i32 %evl)
677 declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32)
679 define <8 x double> @select_v8f64(<8 x i1> %a, <8 x double> %b, <8 x double> %c, i32 zeroext %evl) {
680 ; CHECK-LABEL: select_v8f64:
682 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
683 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
685 %v = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %a, <8 x double> %b, <8 x double> %c, i32 %evl)
689 declare <16 x double> @llvm.vp.select.v16f64(<16 x i1>, <16 x double>, <16 x double>, i32)
691 define <16 x double> @select_v16f64(<16 x i1> %a, <16 x double> %b, <16 x double> %c, i32 zeroext %evl) {
692 ; CHECK-LABEL: select_v16f64:
694 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
695 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
697 %v = call <16 x double> @llvm.vp.select.v16f64(<16 x i1> %a, <16 x double> %b, <16 x double> %c, i32 %evl)