1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s
5 declare <2 x i7> @llvm.vp.trunc.v2i7.v2i16(<2 x i16>, <2 x i1>, i32)
7 define <2 x i7> @vtrunc_v2i7_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) {
8 ; CHECK-LABEL: vtrunc_v2i7_v2i16:
10 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
11 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
13 %v = call <2 x i7> @llvm.vp.trunc.v2i7.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl)
17 declare <2 x i8> @llvm.vp.trunc.v2i8.v2i15(<2 x i15>, <2 x i1>, i32)
19 define <2 x i8> @vtrunc_v2i8_v2i15(<2 x i15> %a, <2 x i1> %m, i32 zeroext %vl) {
20 ; CHECK-LABEL: vtrunc_v2i8_v2i15:
22 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
23 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
25 %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i15(<2 x i15> %a, <2 x i1> %m, i32 %vl)
29 declare <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16>, <2 x i1>, i32)
31 define <2 x i8> @vtrunc_v2i8_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) {
32 ; CHECK-LABEL: vtrunc_v2i8_v2i16:
34 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
35 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
37 %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl)
41 define <2 x i8> @vtrunc_v2i8_v2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) {
42 ; CHECK-LABEL: vtrunc_v2i8_v2i16_unmasked:
44 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
45 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
47 %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
51 declare <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16>, <128 x i1>, i32)
53 define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) {
54 ; CHECK-LABEL: vtrunc_v128i7_v128i16:
56 ; CHECK-NEXT: vmv8r.v v24, v8
57 ; CHECK-NEXT: li a1, 64
58 ; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
59 ; CHECK-NEXT: vslidedown.vi v12, v0, 8
60 ; CHECK-NEXT: mv a2, a0
61 ; CHECK-NEXT: bltu a0, a1, .LBB4_2
62 ; CHECK-NEXT: # %bb.1:
63 ; CHECK-NEXT: li a2, 64
64 ; CHECK-NEXT: .LBB4_2:
65 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
66 ; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t
67 ; CHECK-NEXT: addi a2, a0, -64
68 ; CHECK-NEXT: sltu a0, a0, a2
69 ; CHECK-NEXT: addi a0, a0, -1
70 ; CHECK-NEXT: and a0, a0, a2
71 ; CHECK-NEXT: vmv1r.v v0, v12
72 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
73 ; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t
74 ; CHECK-NEXT: li a0, 128
75 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
76 ; CHECK-NEXT: vslideup.vx v8, v24, a1
78 %v = call <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16> %a, <128 x i1> %m, i32 %vl)
82 declare <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32>, <2 x i1>, i32)
84 define <2 x i8> @vtrunc_v2i8_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) {
85 ; CHECK-LABEL: vtrunc_v2i8_v2i32:
87 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
88 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
89 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
90 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
92 %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl)
96 define <2 x i8> @vtrunc_v2i8_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) {
97 ; CHECK-LABEL: vtrunc_v2i8_v2i32_unmasked:
99 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
100 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
101 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
102 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
104 %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
108 declare <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64>, <2 x i1>, i32)
110 define <2 x i8> @vtrunc_v2i8_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) {
111 ; CHECK-LABEL: vtrunc_v2i8_v2i64:
113 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
114 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
115 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
116 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
117 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
118 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
120 %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl)
124 define <2 x i8> @vtrunc_v2i8_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
125 ; CHECK-LABEL: vtrunc_v2i8_v2i64_unmasked:
127 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
128 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
129 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
130 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
131 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
132 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
134 %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
138 declare <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32>, <2 x i1>, i32)
140 define <2 x i16> @vtrunc_v2i16_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) {
141 ; CHECK-LABEL: vtrunc_v2i16_v2i32:
143 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
144 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
146 %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl)
150 define <2 x i16> @vtrunc_v2i16_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) {
151 ; CHECK-LABEL: vtrunc_v2i16_v2i32_unmasked:
153 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
154 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
156 %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
160 declare <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64>, <2 x i1>, i32)
162 define <2 x i16> @vtrunc_v2i16_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) {
163 ; CHECK-LABEL: vtrunc_v2i16_v2i64:
165 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
166 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
167 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
168 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
170 %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl)
174 define <2 x i16> @vtrunc_v2i16_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
175 ; CHECK-LABEL: vtrunc_v2i16_v2i64_unmasked:
177 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
178 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
179 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
180 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
182 %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
186 declare <15 x i16> @llvm.vp.trunc.v15i16.v15i64(<15 x i64>, <15 x i1>, i32)
188 define <15 x i16> @vtrunc_v15i16_v15i64(<15 x i64> %a, <15 x i1> %m, i32 zeroext %vl) {
189 ; CHECK-LABEL: vtrunc_v15i16_v15i64:
191 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
192 ; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
193 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
194 ; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
196 %v = call <15 x i16> @llvm.vp.trunc.v15i16.v15i64(<15 x i64> %a, <15 x i1> %m, i32 %vl)
200 declare <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64>, <2 x i1>, i32)
202 define <2 x i32> @vtrunc_v2i32_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) {
203 ; CHECK-LABEL: vtrunc_v2i32_v2i64:
205 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
206 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
208 %v = call <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl)
212 define <2 x i32> @vtrunc_v2i32_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
213 ; CHECK-LABEL: vtrunc_v2i32_v2i64_unmasked:
215 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
216 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
218 %v = call <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
222 declare <128 x i32> @llvm.vp.trunc.v128i32.v128i64(<128 x i64>, <128 x i1>, i32)
224 define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 zeroext %vl) {
225 ; CHECK-LABEL: vtrunc_v128i32_v128i64:
227 ; CHECK-NEXT: addi sp, sp, -16
228 ; CHECK-NEXT: .cfi_def_cfa_offset 16
229 ; CHECK-NEXT: csrr a2, vlenb
230 ; CHECK-NEXT: slli a2, a2, 6
231 ; CHECK-NEXT: sub sp, sp, a2
232 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 64 * vlenb
233 ; CHECK-NEXT: vmv1r.v v7, v0
234 ; CHECK-NEXT: csrr a2, vlenb
235 ; CHECK-NEXT: slli a2, a2, 5
236 ; CHECK-NEXT: add a2, sp, a2
237 ; CHECK-NEXT: addi a2, a2, 16
238 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
239 ; CHECK-NEXT: csrr a2, vlenb
240 ; CHECK-NEXT: li a3, 40
241 ; CHECK-NEXT: mul a2, a2, a3
242 ; CHECK-NEXT: add a2, sp, a2
243 ; CHECK-NEXT: addi a2, a2, 16
244 ; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
245 ; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
246 ; CHECK-NEXT: vslidedown.vi v25, v0, 8
247 ; CHECK-NEXT: addi a2, a1, 512
248 ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
249 ; CHECK-NEXT: vslidedown.vi v27, v25, 4
250 ; CHECK-NEXT: addi a3, a1, 640
251 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
252 ; CHECK-NEXT: vle64.v v8, (a3)
253 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
254 ; CHECK-NEXT: vslidedown.vi v0, v27, 2
255 ; CHECK-NEXT: addi a3, a7, -64
256 ; CHECK-NEXT: sltu a4, a7, a3
257 ; CHECK-NEXT: addi a4, a4, -1
258 ; CHECK-NEXT: and a4, a4, a3
259 ; CHECK-NEXT: addi a3, a4, -32
260 ; CHECK-NEXT: sltu a5, a4, a3
261 ; CHECK-NEXT: addi a5, a5, -1
262 ; CHECK-NEXT: and a3, a5, a3
263 ; CHECK-NEXT: addi a5, a3, -16
264 ; CHECK-NEXT: sltu a6, a3, a5
265 ; CHECK-NEXT: addi a6, a6, -1
266 ; CHECK-NEXT: and a5, a6, a5
267 ; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
268 ; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
269 ; CHECK-NEXT: csrr a5, vlenb
270 ; CHECK-NEXT: li a6, 24
271 ; CHECK-NEXT: mul a5, a5, a6
272 ; CHECK-NEXT: add a5, sp, a5
273 ; CHECK-NEXT: addi a5, a5, 16
274 ; CHECK-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
275 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
276 ; CHECK-NEXT: vle64.v v8, (a2)
277 ; CHECK-NEXT: addi a5, a1, 128
278 ; CHECK-NEXT: li a2, 16
279 ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
280 ; CHECK-NEXT: vslidedown.vi v26, v7, 4
281 ; CHECK-NEXT: bltu a3, a2, .LBB16_2
282 ; CHECK-NEXT: # %bb.1:
283 ; CHECK-NEXT: li a3, 16
284 ; CHECK-NEXT: .LBB16_2:
285 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
286 ; CHECK-NEXT: vle64.v v16, (a5)
287 ; CHECK-NEXT: csrr a5, vlenb
288 ; CHECK-NEXT: li a6, 48
289 ; CHECK-NEXT: mul a5, a5, a6
290 ; CHECK-NEXT: add a5, sp, a5
291 ; CHECK-NEXT: addi a5, a5, 16
292 ; CHECK-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
293 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
294 ; CHECK-NEXT: vslidedown.vi v28, v26, 2
295 ; CHECK-NEXT: li a5, 64
296 ; CHECK-NEXT: vmv1r.v v0, v27
297 ; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma
298 ; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
299 ; CHECK-NEXT: csrr a3, vlenb
300 ; CHECK-NEXT: li a6, 56
301 ; CHECK-NEXT: mul a3, a3, a6
302 ; CHECK-NEXT: add a3, sp, a3
303 ; CHECK-NEXT: addi a3, a3, 16
304 ; CHECK-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
305 ; CHECK-NEXT: mv a6, a7
306 ; CHECK-NEXT: bltu a7, a5, .LBB16_4
307 ; CHECK-NEXT: # %bb.3:
308 ; CHECK-NEXT: li a6, 64
309 ; CHECK-NEXT: .LBB16_4:
310 ; CHECK-NEXT: addi a5, a1, 384
311 ; CHECK-NEXT: li a3, 32
312 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
313 ; CHECK-NEXT: vle64.v v8, (a1)
314 ; CHECK-NEXT: csrr t0, vlenb
315 ; CHECK-NEXT: slli t0, t0, 3
316 ; CHECK-NEXT: add t0, sp, t0
317 ; CHECK-NEXT: addi t0, t0, 16
318 ; CHECK-NEXT: vs8r.v v8, (t0) # Unknown-size Folded Spill
319 ; CHECK-NEXT: addi t0, a6, -32
320 ; CHECK-NEXT: sltu a6, a6, t0
321 ; CHECK-NEXT: addi a6, a6, -1
322 ; CHECK-NEXT: and a6, a6, t0
323 ; CHECK-NEXT: addi t0, a6, -16
324 ; CHECK-NEXT: sltu t1, a6, t0
325 ; CHECK-NEXT: addi t1, t1, -1
326 ; CHECK-NEXT: and t0, t1, t0
327 ; CHECK-NEXT: vmv1r.v v0, v28
328 ; CHECK-NEXT: csrr t1, vlenb
329 ; CHECK-NEXT: li t2, 48
330 ; CHECK-NEXT: mul t1, t1, t2
331 ; CHECK-NEXT: add t1, sp, t1
332 ; CHECK-NEXT: addi t1, t1, 16
333 ; CHECK-NEXT: vl8r.v v16, (t1) # Unknown-size Folded Reload
334 ; CHECK-NEXT: vsetvli zero, t0, e32, m4, ta, ma
335 ; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
336 ; CHECK-NEXT: csrr t0, vlenb
337 ; CHECK-NEXT: slli t0, t0, 4
338 ; CHECK-NEXT: add t0, sp, t0
339 ; CHECK-NEXT: addi t0, t0, 16
340 ; CHECK-NEXT: vs8r.v v8, (t0) # Unknown-size Folded Spill
341 ; CHECK-NEXT: bltu a6, a2, .LBB16_6
342 ; CHECK-NEXT: # %bb.5:
343 ; CHECK-NEXT: li a6, 16
344 ; CHECK-NEXT: .LBB16_6:
345 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
346 ; CHECK-NEXT: vle64.v v8, (a5)
347 ; CHECK-NEXT: addi a5, sp, 16
348 ; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
349 ; CHECK-NEXT: addi a1, a1, 256
350 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
351 ; CHECK-NEXT: vslidedown.vi v27, v25, 2
352 ; CHECK-NEXT: vmv1r.v v0, v26
353 ; CHECK-NEXT: csrr a5, vlenb
354 ; CHECK-NEXT: slli a5, a5, 3
355 ; CHECK-NEXT: add a5, sp, a5
356 ; CHECK-NEXT: addi a5, a5, 16
357 ; CHECK-NEXT: vl8r.v v8, (a5) # Unknown-size Folded Reload
358 ; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma
359 ; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
360 ; CHECK-NEXT: csrr a5, vlenb
361 ; CHECK-NEXT: li a6, 48
362 ; CHECK-NEXT: mul a5, a5, a6
363 ; CHECK-NEXT: add a5, sp, a5
364 ; CHECK-NEXT: addi a5, a5, 16
365 ; CHECK-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
366 ; CHECK-NEXT: mv a5, a4
367 ; CHECK-NEXT: bltu a4, a3, .LBB16_8
368 ; CHECK-NEXT: # %bb.7:
369 ; CHECK-NEXT: li a5, 32
370 ; CHECK-NEXT: .LBB16_8:
371 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
372 ; CHECK-NEXT: vle64.v v8, (a1)
373 ; CHECK-NEXT: csrr a1, vlenb
374 ; CHECK-NEXT: slli a1, a1, 3
375 ; CHECK-NEXT: add a1, sp, a1
376 ; CHECK-NEXT: addi a1, a1, 16
377 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
378 ; CHECK-NEXT: addi a1, a5, -16
379 ; CHECK-NEXT: sltu a5, a5, a1
380 ; CHECK-NEXT: addi a5, a5, -1
381 ; CHECK-NEXT: and a1, a5, a1
382 ; CHECK-NEXT: vmv1r.v v0, v27
383 ; CHECK-NEXT: addi a5, sp, 16
384 ; CHECK-NEXT: vl8r.v v8, (a5) # Unknown-size Folded Reload
385 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
386 ; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
387 ; CHECK-NEXT: bltu a4, a2, .LBB16_10
388 ; CHECK-NEXT: # %bb.9:
389 ; CHECK-NEXT: li a4, 16
390 ; CHECK-NEXT: .LBB16_10:
391 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
392 ; CHECK-NEXT: vslidedown.vi v6, v7, 2
393 ; CHECK-NEXT: vmv1r.v v0, v25
394 ; CHECK-NEXT: csrr a1, vlenb
395 ; CHECK-NEXT: slli a1, a1, 3
396 ; CHECK-NEXT: add a1, sp, a1
397 ; CHECK-NEXT: addi a1, a1, 16
398 ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
399 ; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, ma
400 ; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t
401 ; CHECK-NEXT: vmv.v.v v0, v8
402 ; CHECK-NEXT: mv a1, a7
403 ; CHECK-NEXT: bltu a7, a3, .LBB16_12
404 ; CHECK-NEXT: # %bb.11:
405 ; CHECK-NEXT: li a1, 32
406 ; CHECK-NEXT: .LBB16_12:
407 ; CHECK-NEXT: csrr a4, vlenb
408 ; CHECK-NEXT: li a5, 24
409 ; CHECK-NEXT: mul a4, a4, a5
410 ; CHECK-NEXT: add a4, sp, a4
411 ; CHECK-NEXT: addi a4, a4, 16
412 ; CHECK-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
413 ; CHECK-NEXT: vmv4r.v v24, v8
414 ; CHECK-NEXT: csrr a4, vlenb
415 ; CHECK-NEXT: li a5, 56
416 ; CHECK-NEXT: mul a4, a4, a5
417 ; CHECK-NEXT: add a4, sp, a4
418 ; CHECK-NEXT: addi a4, a4, 16
419 ; CHECK-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
420 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
421 ; CHECK-NEXT: vslideup.vi v8, v24, 16
422 ; CHECK-NEXT: csrr a4, vlenb
423 ; CHECK-NEXT: li a5, 56
424 ; CHECK-NEXT: mul a4, a4, a5
425 ; CHECK-NEXT: add a4, sp, a4
426 ; CHECK-NEXT: addi a4, a4, 16
427 ; CHECK-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
428 ; CHECK-NEXT: csrr a4, vlenb
429 ; CHECK-NEXT: slli a4, a4, 4
430 ; CHECK-NEXT: add a4, sp, a4
431 ; CHECK-NEXT: addi a4, a4, 16
432 ; CHECK-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
433 ; CHECK-NEXT: vmv4r.v v24, v8
434 ; CHECK-NEXT: csrr a4, vlenb
435 ; CHECK-NEXT: li a5, 48
436 ; CHECK-NEXT: mul a4, a4, a5
437 ; CHECK-NEXT: add a4, sp, a4
438 ; CHECK-NEXT: addi a4, a4, 16
439 ; CHECK-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
440 ; CHECK-NEXT: vslideup.vi v8, v24, 16
441 ; CHECK-NEXT: csrr a4, vlenb
442 ; CHECK-NEXT: li a5, 48
443 ; CHECK-NEXT: mul a4, a4, a5
444 ; CHECK-NEXT: add a4, sp, a4
445 ; CHECK-NEXT: addi a4, a4, 16
446 ; CHECK-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
447 ; CHECK-NEXT: vmv4r.v v8, v0
448 ; CHECK-NEXT: vslideup.vi v8, v16, 16
449 ; CHECK-NEXT: csrr a4, vlenb
450 ; CHECK-NEXT: li a5, 24
451 ; CHECK-NEXT: mul a4, a4, a5
452 ; CHECK-NEXT: add a4, sp, a4
453 ; CHECK-NEXT: addi a4, a4, 16
454 ; CHECK-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
455 ; CHECK-NEXT: addi a4, a1, -16
456 ; CHECK-NEXT: sltu a1, a1, a4
457 ; CHECK-NEXT: addi a1, a1, -1
458 ; CHECK-NEXT: and a1, a1, a4
459 ; CHECK-NEXT: vmv1r.v v0, v6
460 ; CHECK-NEXT: csrr a4, vlenb
461 ; CHECK-NEXT: slli a4, a4, 5
462 ; CHECK-NEXT: add a4, sp, a4
463 ; CHECK-NEXT: addi a4, a4, 16
464 ; CHECK-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
465 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
466 ; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
467 ; CHECK-NEXT: bltu a7, a2, .LBB16_14
468 ; CHECK-NEXT: # %bb.13:
469 ; CHECK-NEXT: li a7, 16
470 ; CHECK-NEXT: .LBB16_14:
471 ; CHECK-NEXT: vmv1r.v v0, v7
472 ; CHECK-NEXT: csrr a1, vlenb
473 ; CHECK-NEXT: li a2, 40
474 ; CHECK-NEXT: mul a1, a1, a2
475 ; CHECK-NEXT: add a1, sp, a1
476 ; CHECK-NEXT: addi a1, a1, 16
477 ; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
478 ; CHECK-NEXT: vsetvli zero, a7, e32, m4, ta, ma
479 ; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
480 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
481 ; CHECK-NEXT: vslideup.vi v16, v8, 16
482 ; CHECK-NEXT: vse32.v v16, (a0)
483 ; CHECK-NEXT: addi a1, a0, 256
484 ; CHECK-NEXT: csrr a2, vlenb
485 ; CHECK-NEXT: li a3, 24
486 ; CHECK-NEXT: mul a2, a2, a3
487 ; CHECK-NEXT: add a2, sp, a2
488 ; CHECK-NEXT: addi a2, a2, 16
489 ; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
490 ; CHECK-NEXT: vse32.v v8, (a1)
491 ; CHECK-NEXT: addi a1, a0, 128
492 ; CHECK-NEXT: csrr a2, vlenb
493 ; CHECK-NEXT: li a3, 48
494 ; CHECK-NEXT: mul a2, a2, a3
495 ; CHECK-NEXT: add a2, sp, a2
496 ; CHECK-NEXT: addi a2, a2, 16
497 ; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
498 ; CHECK-NEXT: vse32.v v8, (a1)
499 ; CHECK-NEXT: addi a0, a0, 384
500 ; CHECK-NEXT: csrr a1, vlenb
501 ; CHECK-NEXT: li a2, 56
502 ; CHECK-NEXT: mul a1, a1, a2
503 ; CHECK-NEXT: add a1, sp, a1
504 ; CHECK-NEXT: addi a1, a1, 16
505 ; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
506 ; CHECK-NEXT: vse32.v v8, (a0)
507 ; CHECK-NEXT: csrr a0, vlenb
508 ; CHECK-NEXT: slli a0, a0, 6
509 ; CHECK-NEXT: add sp, sp, a0
510 ; CHECK-NEXT: addi sp, sp, 16
512 %v = call <128 x i32> @llvm.vp.trunc.v128i32.v128i64(<128 x i64> %a, <128 x i1> %m, i32 %vl)
516 declare <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64>, <32 x i1>, i32)
518 define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) {
519 ; CHECK-LABEL: vtrunc_v32i32_v32i64:
521 ; CHECK-NEXT: vmv8r.v v24, v8
522 ; CHECK-NEXT: li a2, 16
523 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
524 ; CHECK-NEXT: vslidedown.vi v12, v0, 2
525 ; CHECK-NEXT: mv a1, a0
526 ; CHECK-NEXT: bltu a0, a2, .LBB17_2
527 ; CHECK-NEXT: # %bb.1:
528 ; CHECK-NEXT: li a1, 16
529 ; CHECK-NEXT: .LBB17_2:
530 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
531 ; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t
532 ; CHECK-NEXT: addi a1, a0, -16
533 ; CHECK-NEXT: sltu a0, a0, a1
534 ; CHECK-NEXT: addi a0, a0, -1
535 ; CHECK-NEXT: and a0, a0, a1
536 ; CHECK-NEXT: vmv1r.v v0, v12
537 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
538 ; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t
539 ; CHECK-NEXT: li a0, 32
540 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
541 ; CHECK-NEXT: vslideup.vi v8, v24, 16
543 %v = call <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64> %a, <32 x i1> %m, i32 %vl)
547 declare <2 x i7> @llvm.vp.trunc.v2i7.v2i8(<2 x i8>, <2 x i1>, i32)
549 define <2 x i7> @vtrunc_v2i7_v2i8(<2 x i8> %a, <2 x i1> %m, i32 zeroext %vl) {
550 ; CHECK-LABEL: vtrunc_v2i7_v2i8:
553 %v = call <2 x i7> @llvm.vp.trunc.v2i7.v2i8(<2 x i8> %a, <2 x i1> %m, i32 %vl)