1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s
5 declare <vscale x 2 x i7> @llvm.vp.trunc.nxv2i7.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
7 define <vscale x 2 x i7> @vtrunc_nxv2i7_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
8 ; CHECK-LABEL: vtrunc_nxv2i7_nxv2i16:
10 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
11 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
13 %v = call <vscale x 2 x i7> @llvm.vp.trunc.nxv2i7.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 %vl)
14 ret <vscale x 2 x i7> %v
17 declare <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i15(<vscale x 2 x i15>, <vscale x 2 x i1>, i32)
19 define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i15(<vscale x 2 x i15> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
20 ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i15:
22 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
23 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
25 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i15(<vscale x 2 x i15> %a, <vscale x 2 x i1> %m, i32 %vl)
26 ret <vscale x 2 x i8> %v
29 declare <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
31 define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
32 ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16:
34 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
35 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
37 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 %vl)
38 ret <vscale x 2 x i8> %v
41 define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i16_unmasked(<vscale x 2 x i16> %a, i32 zeroext %vl) {
42 ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16_unmasked:
44 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
45 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
47 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
48 ret <vscale x 2 x i8> %v
51 declare <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
53 define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
54 ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32:
56 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
57 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
58 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
59 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
61 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 %vl)
62 ret <vscale x 2 x i8> %v
65 define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i32_unmasked(<vscale x 2 x i32> %a, i32 zeroext %vl) {
66 ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32_unmasked:
68 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
69 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
70 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
71 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
73 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
74 ret <vscale x 2 x i8> %v
77 declare <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
79 define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
80 ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64:
82 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
83 ; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t
84 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
85 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
86 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
87 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
89 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 %vl)
90 ret <vscale x 2 x i8> %v
93 define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i64_unmasked(<vscale x 2 x i64> %a, i32 zeroext %vl) {
94 ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64_unmasked:
96 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
97 ; CHECK-NEXT: vnsrl.wi v10, v8, 0
98 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
99 ; CHECK-NEXT: vnsrl.wi v8, v10, 0
100 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
101 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
103 %v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
104 ret <vscale x 2 x i8> %v
107 declare <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
109 define <vscale x 2 x i16> @vtrunc_nxv2i16_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
110 ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32:
112 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
113 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
115 %v = call <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 %vl)
116 ret <vscale x 2 x i16> %v
119 define <vscale x 2 x i16> @vtrunc_nxv2i16_nxv2i32_unmasked(<vscale x 2 x i32> %a, i32 zeroext %vl) {
120 ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32_unmasked:
122 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
123 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
125 %v = call <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
126 ret <vscale x 2 x i16> %v
129 declare <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
131 define <vscale x 2 x i16> @vtrunc_nxv2i16_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
132 ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64:
134 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
135 ; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t
136 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
137 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
139 %v = call <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 %vl)
140 ret <vscale x 2 x i16> %v
143 define <vscale x 2 x i16> @vtrunc_nxv2i16_nxv2i64_unmasked(<vscale x 2 x i64> %a, i32 zeroext %vl) {
144 ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64_unmasked:
146 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
147 ; CHECK-NEXT: vnsrl.wi v10, v8, 0
148 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
149 ; CHECK-NEXT: vnsrl.wi v8, v10, 0
151 %v = call <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
152 ret <vscale x 2 x i16> %v
155 declare <vscale x 15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<vscale x 15 x i64>, <vscale x 15 x i1>, i32)
157 define <vscale x 15 x i16> @vtrunc_nxv15i16_nxv15i64(<vscale x 15 x i64> %a, <vscale x 15 x i1> %m, i32 zeroext %vl) {
158 ; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64:
160 ; CHECK-NEXT: vmv1r.v v24, v0
161 ; CHECK-NEXT: csrr a1, vlenb
162 ; CHECK-NEXT: srli a2, a1, 3
163 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
164 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
165 ; CHECK-NEXT: sub a2, a0, a1
166 ; CHECK-NEXT: sltu a3, a0, a2
167 ; CHECK-NEXT: addi a3, a3, -1
168 ; CHECK-NEXT: and a2, a3, a2
169 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
170 ; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t
171 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
172 ; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t
173 ; CHECK-NEXT: bltu a0, a1, .LBB12_2
174 ; CHECK-NEXT: # %bb.1:
175 ; CHECK-NEXT: mv a0, a1
176 ; CHECK-NEXT: .LBB12_2:
177 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
178 ; CHECK-NEXT: vmv1r.v v0, v24
179 ; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
180 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
181 ; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t
182 ; CHECK-NEXT: vmv4r.v v8, v16
184 %v = call <vscale x 15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<vscale x 15 x i64> %a, <vscale x 15 x i1> %m, i32 %vl)
185 ret <vscale x 15 x i16> %v
188 declare <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
190 define <vscale x 2 x i32> @vtrunc_nxv2i32_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
191 ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64:
193 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
194 ; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t
195 ; CHECK-NEXT: vmv.v.v v8, v10
197 %v = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 %vl)
198 ret <vscale x 2 x i32> %v
201 define <vscale x 2 x i32> @vtrunc_nxv2i32_nxv2i64_unmasked(<vscale x 2 x i64> %a, i32 zeroext %vl) {
202 ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64_unmasked:
204 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
205 ; CHECK-NEXT: vnsrl.wi v10, v8, 0
206 ; CHECK-NEXT: vmv.v.v v8, v10
208 %v = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
209 ret <vscale x 2 x i32> %v
212 declare <vscale x 32 x i7> @llvm.vp.trunc.nxv32i7.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i1>, i32)
214 define <vscale x 32 x i7> @vtrunc_nxv32i7_nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
215 ; CHECK-LABEL: vtrunc_nxv32i7_nxv32i32:
217 ; CHECK-NEXT: vmv1r.v v24, v0
218 ; CHECK-NEXT: csrr a1, vlenb
219 ; CHECK-NEXT: srli a2, a1, 2
220 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
221 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
222 ; CHECK-NEXT: slli a1, a1, 1
223 ; CHECK-NEXT: sub a2, a0, a1
224 ; CHECK-NEXT: sltu a3, a0, a2
225 ; CHECK-NEXT: addi a3, a3, -1
226 ; CHECK-NEXT: and a2, a3, a2
227 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
228 ; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t
229 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
230 ; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t
231 ; CHECK-NEXT: bltu a0, a1, .LBB15_2
232 ; CHECK-NEXT: # %bb.1:
233 ; CHECK-NEXT: mv a0, a1
234 ; CHECK-NEXT: .LBB15_2:
235 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
236 ; CHECK-NEXT: vmv1r.v v0, v24
237 ; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
238 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
239 ; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t
240 ; CHECK-NEXT: vmv4r.v v8, v16
242 %v = call <vscale x 32 x i7> @llvm.vp.trunc.nxv32i7.nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 %vl)
243 ret <vscale x 32 x i7> %v
246 declare <vscale x 32 x i8> @llvm.vp.trunc.nxv32i8.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i1>, i32)
248 define <vscale x 32 x i8> @vtrunc_nxv32i8_nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
249 ; CHECK-LABEL: vtrunc_nxv32i8_nxv32i32:
251 ; CHECK-NEXT: vmv1r.v v24, v0
252 ; CHECK-NEXT: csrr a1, vlenb
253 ; CHECK-NEXT: srli a2, a1, 2
254 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
255 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
256 ; CHECK-NEXT: slli a1, a1, 1
257 ; CHECK-NEXT: sub a2, a0, a1
258 ; CHECK-NEXT: sltu a3, a0, a2
259 ; CHECK-NEXT: addi a3, a3, -1
260 ; CHECK-NEXT: and a2, a3, a2
261 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
262 ; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t
263 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
264 ; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t
265 ; CHECK-NEXT: bltu a0, a1, .LBB16_2
266 ; CHECK-NEXT: # %bb.1:
267 ; CHECK-NEXT: mv a0, a1
268 ; CHECK-NEXT: .LBB16_2:
269 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
270 ; CHECK-NEXT: vmv1r.v v0, v24
271 ; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
272 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
273 ; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t
274 ; CHECK-NEXT: vmv4r.v v8, v16
276 %v = call <vscale x 32 x i8> @llvm.vp.trunc.nxv32i8.nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 %vl)
277 ret <vscale x 32 x i8> %v
280 declare <vscale x 32 x i32> @llvm.vp.trunc.nxv32i32.nxv32i64(<vscale x 32 x i64>, <vscale x 32 x i1>, i32)
282 define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
283 ; CHECK-LABEL: vtrunc_nxv32i64_nxv32i32:
285 ; CHECK-NEXT: addi sp, sp, -16
286 ; CHECK-NEXT: .cfi_def_cfa_offset 16
287 ; CHECK-NEXT: csrr a1, vlenb
288 ; CHECK-NEXT: slli a1, a1, 4
289 ; CHECK-NEXT: sub sp, sp, a1
290 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
291 ; CHECK-NEXT: vmv1r.v v1, v0
292 ; CHECK-NEXT: addi a1, sp, 16
293 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
294 ; CHECK-NEXT: csrr a1, vlenb
295 ; CHECK-NEXT: slli a1, a1, 3
296 ; CHECK-NEXT: add a1, sp, a1
297 ; CHECK-NEXT: addi a1, a1, 16
298 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
299 ; CHECK-NEXT: csrr a1, vlenb
300 ; CHECK-NEXT: srli a3, a1, 3
301 ; CHECK-NEXT: srli a4, a1, 2
302 ; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
303 ; CHECK-NEXT: vslidedown.vx v16, v0, a4
304 ; CHECK-NEXT: slli a4, a1, 3
305 ; CHECK-NEXT: add a4, a0, a4
306 ; CHECK-NEXT: vl8re64.v v8, (a4)
307 ; CHECK-NEXT: slli a4, a1, 1
308 ; CHECK-NEXT: sub a5, a2, a4
309 ; CHECK-NEXT: sltu a6, a2, a5
310 ; CHECK-NEXT: addi a6, a6, -1
311 ; CHECK-NEXT: and a5, a6, a5
312 ; CHECK-NEXT: sub a6, a5, a1
313 ; CHECK-NEXT: sltu a7, a5, a6
314 ; CHECK-NEXT: addi a7, a7, -1
315 ; CHECK-NEXT: and a6, a7, a6
316 ; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
317 ; CHECK-NEXT: vl8re64.v v24, (a0)
318 ; CHECK-NEXT: vslidedown.vx v0, v16, a3
319 ; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma
320 ; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
321 ; CHECK-NEXT: bltu a5, a1, .LBB17_2
322 ; CHECK-NEXT: # %bb.1:
323 ; CHECK-NEXT: mv a5, a1
324 ; CHECK-NEXT: .LBB17_2:
325 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
326 ; CHECK-NEXT: vslidedown.vx v2, v1, a3
327 ; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
328 ; CHECK-NEXT: vmv1r.v v0, v16
329 ; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
330 ; CHECK-NEXT: bltu a2, a4, .LBB17_4
331 ; CHECK-NEXT: # %bb.3:
332 ; CHECK-NEXT: mv a2, a4
333 ; CHECK-NEXT: .LBB17_4:
334 ; CHECK-NEXT: sub a0, a2, a1
335 ; CHECK-NEXT: sltu a3, a2, a0
336 ; CHECK-NEXT: addi a3, a3, -1
337 ; CHECK-NEXT: and a0, a3, a0
338 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
339 ; CHECK-NEXT: vmv1r.v v0, v2
340 ; CHECK-NEXT: addi a0, sp, 16
341 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
342 ; CHECK-NEXT: vnsrl.wi v28, v8, 0, v0.t
343 ; CHECK-NEXT: bltu a2, a1, .LBB17_6
344 ; CHECK-NEXT: # %bb.5:
345 ; CHECK-NEXT: mv a2, a1
346 ; CHECK-NEXT: .LBB17_6:
347 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
348 ; CHECK-NEXT: vmv1r.v v0, v1
349 ; CHECK-NEXT: csrr a0, vlenb
350 ; CHECK-NEXT: slli a0, a0, 3
351 ; CHECK-NEXT: add a0, sp, a0
352 ; CHECK-NEXT: addi a0, a0, 16
353 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
354 ; CHECK-NEXT: vnsrl.wi v24, v8, 0, v0.t
355 ; CHECK-NEXT: vmv8r.v v8, v24
356 ; CHECK-NEXT: csrr a0, vlenb
357 ; CHECK-NEXT: slli a0, a0, 4
358 ; CHECK-NEXT: add sp, sp, a0
359 ; CHECK-NEXT: addi sp, sp, 16
361 %v = call <vscale x 32 x i32> @llvm.vp.trunc.nxv32i32.nxv32i64(<vscale x 32 x i64> %a, <vscale x 32 x i1> %m, i32 %vl)
362 ret <vscale x 32 x i32> %v