1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
7 declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
13 define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
17 ; CHECK-NEXT: csrwi vxrm, 0
18 ; CHECK-NEXT: vnclipu.wv v8, v8, v9
21 %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
22 <vscale x 1 x i8> undef,
23 <vscale x 1 x i16> %0,
27 ret <vscale x 1 x i8> %a
30 declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
37 define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
41 ; CHECK-NEXT: csrwi vxrm, 0
42 ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t
45 %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
47 <vscale x 1 x i16> %1,
50 iXLen 0, iXLen %4, iXLen 1)
52 ret <vscale x 1 x i8> %a
55 declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
61 define <vscale x 2 x i8> @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
62 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
65 ; CHECK-NEXT: csrwi vxrm, 0
66 ; CHECK-NEXT: vnclipu.wv v8, v8, v9
69 %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
70 <vscale x 2 x i8> undef,
71 <vscale x 2 x i16> %0,
75 ret <vscale x 2 x i8> %a
78 declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
85 define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
86 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8:
87 ; CHECK: # %bb.0: # %entry
88 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
89 ; CHECK-NEXT: csrwi vxrm, 0
90 ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t
93 %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
95 <vscale x 2 x i16> %1,
98 iXLen 0, iXLen %4, iXLen 1)
100 ret <vscale x 2 x i8> %a
103 declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
109 define <vscale x 4 x i8> @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
110 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8:
111 ; CHECK: # %bb.0: # %entry
112 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
113 ; CHECK-NEXT: csrwi vxrm, 0
114 ; CHECK-NEXT: vnclipu.wv v8, v8, v9
117 %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
118 <vscale x 4 x i8> undef,
119 <vscale x 4 x i16> %0,
120 <vscale x 4 x i8> %1,
123 ret <vscale x 4 x i8> %a
126 declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
131 iXLen, iXLen, iXLen);
133 define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
134 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8:
135 ; CHECK: # %bb.0: # %entry
136 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
137 ; CHECK-NEXT: csrwi vxrm, 0
138 ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t
141 %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
142 <vscale x 4 x i8> %0,
143 <vscale x 4 x i16> %1,
144 <vscale x 4 x i8> %2,
145 <vscale x 4 x i1> %3,
146 iXLen 0, iXLen %4, iXLen 1)
148 ret <vscale x 4 x i8> %a
151 declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
157 define <vscale x 8 x i8> @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
158 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8:
159 ; CHECK: # %bb.0: # %entry
160 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
161 ; CHECK-NEXT: csrwi vxrm, 0
162 ; CHECK-NEXT: vnclipu.wv v11, v8, v10
163 ; CHECK-NEXT: vmv.v.v v8, v11
166 %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
167 <vscale x 8 x i8> undef,
168 <vscale x 8 x i16> %0,
169 <vscale x 8 x i8> %1,
172 ret <vscale x 8 x i8> %a
175 declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
180 iXLen, iXLen, iXLen);
182 define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
183 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8:
184 ; CHECK: # %bb.0: # %entry
185 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
186 ; CHECK-NEXT: csrwi vxrm, 0
187 ; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t
190 %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
191 <vscale x 8 x i8> %0,
192 <vscale x 8 x i16> %1,
193 <vscale x 8 x i8> %2,
194 <vscale x 8 x i1> %3,
195 iXLen 0, iXLen %4, iXLen 1)
197 ret <vscale x 8 x i8> %a
200 declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
206 define <vscale x 16 x i8> @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
207 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8:
208 ; CHECK: # %bb.0: # %entry
209 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
210 ; CHECK-NEXT: csrwi vxrm, 0
211 ; CHECK-NEXT: vnclipu.wv v14, v8, v12
212 ; CHECK-NEXT: vmv.v.v v8, v14
215 %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
216 <vscale x 16 x i8> undef,
217 <vscale x 16 x i16> %0,
218 <vscale x 16 x i8> %1,
221 ret <vscale x 16 x i8> %a
224 declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
229 iXLen, iXLen, iXLen);
231 define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
232 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8:
233 ; CHECK: # %bb.0: # %entry
234 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
235 ; CHECK-NEXT: csrwi vxrm, 0
236 ; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t
239 %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
240 <vscale x 16 x i8> %0,
241 <vscale x 16 x i16> %1,
242 <vscale x 16 x i8> %2,
243 <vscale x 16 x i1> %3,
244 iXLen 0, iXLen %4, iXLen 1)
246 ret <vscale x 16 x i8> %a
249 declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
255 define <vscale x 32 x i8> @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
256 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8:
257 ; CHECK: # %bb.0: # %entry
258 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
259 ; CHECK-NEXT: csrwi vxrm, 0
260 ; CHECK-NEXT: vnclipu.wv v20, v8, v16
261 ; CHECK-NEXT: vmv.v.v v8, v20
264 %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
265 <vscale x 32 x i8> undef,
266 <vscale x 32 x i16> %0,
267 <vscale x 32 x i8> %1,
270 ret <vscale x 32 x i8> %a
273 declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
278 iXLen, iXLen, iXLen);
280 define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
281 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8:
282 ; CHECK: # %bb.0: # %entry
283 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
284 ; CHECK-NEXT: csrwi vxrm, 0
285 ; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t
288 %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
289 <vscale x 32 x i8> %0,
290 <vscale x 32 x i16> %1,
291 <vscale x 32 x i8> %2,
292 <vscale x 32 x i1> %3,
293 iXLen 0, iXLen %4, iXLen 1)
295 ret <vscale x 32 x i8> %a
298 declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
304 define <vscale x 1 x i16> @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
305 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16:
306 ; CHECK: # %bb.0: # %entry
307 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
308 ; CHECK-NEXT: csrwi vxrm, 0
309 ; CHECK-NEXT: vnclipu.wv v8, v8, v9
312 %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
313 <vscale x 1 x i16> undef,
314 <vscale x 1 x i32> %0,
315 <vscale x 1 x i16> %1,
318 ret <vscale x 1 x i16> %a
321 declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
326 iXLen, iXLen, iXLen);
328 define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
329 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16:
330 ; CHECK: # %bb.0: # %entry
331 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
332 ; CHECK-NEXT: csrwi vxrm, 0
333 ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t
336 %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
337 <vscale x 1 x i16> %0,
338 <vscale x 1 x i32> %1,
339 <vscale x 1 x i16> %2,
340 <vscale x 1 x i1> %3,
341 iXLen 0, iXLen %4, iXLen 1)
343 ret <vscale x 1 x i16> %a
346 declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
352 define <vscale x 2 x i16> @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
353 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16:
354 ; CHECK: # %bb.0: # %entry
355 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
356 ; CHECK-NEXT: csrwi vxrm, 0
357 ; CHECK-NEXT: vnclipu.wv v8, v8, v9
360 %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
361 <vscale x 2 x i16> undef,
362 <vscale x 2 x i32> %0,
363 <vscale x 2 x i16> %1,
366 ret <vscale x 2 x i16> %a
369 declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
374 iXLen, iXLen, iXLen);
376 define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
377 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16:
378 ; CHECK: # %bb.0: # %entry
379 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
380 ; CHECK-NEXT: csrwi vxrm, 0
381 ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t
384 %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
385 <vscale x 2 x i16> %0,
386 <vscale x 2 x i32> %1,
387 <vscale x 2 x i16> %2,
388 <vscale x 2 x i1> %3,
389 iXLen 0, iXLen %4, iXLen 1)
391 ret <vscale x 2 x i16> %a
394 declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
400 define <vscale x 4 x i16> @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
401 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16:
402 ; CHECK: # %bb.0: # %entry
403 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
404 ; CHECK-NEXT: csrwi vxrm, 0
405 ; CHECK-NEXT: vnclipu.wv v11, v8, v10
406 ; CHECK-NEXT: vmv.v.v v8, v11
409 %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
410 <vscale x 4 x i16> undef,
411 <vscale x 4 x i32> %0,
412 <vscale x 4 x i16> %1,
415 ret <vscale x 4 x i16> %a
418 declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
423 iXLen, iXLen, iXLen);
425 define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
426 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16:
427 ; CHECK: # %bb.0: # %entry
428 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
429 ; CHECK-NEXT: csrwi vxrm, 0
430 ; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t
433 %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
434 <vscale x 4 x i16> %0,
435 <vscale x 4 x i32> %1,
436 <vscale x 4 x i16> %2,
437 <vscale x 4 x i1> %3,
438 iXLen 0, iXLen %4, iXLen 1)
440 ret <vscale x 4 x i16> %a
443 declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
449 define <vscale x 8 x i16> @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
450 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16:
451 ; CHECK: # %bb.0: # %entry
452 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
453 ; CHECK-NEXT: csrwi vxrm, 0
454 ; CHECK-NEXT: vnclipu.wv v14, v8, v12
455 ; CHECK-NEXT: vmv.v.v v8, v14
458 %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
459 <vscale x 8 x i16> undef,
460 <vscale x 8 x i32> %0,
461 <vscale x 8 x i16> %1,
464 ret <vscale x 8 x i16> %a
467 declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
472 iXLen, iXLen, iXLen);
474 define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
475 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16:
476 ; CHECK: # %bb.0: # %entry
477 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
478 ; CHECK-NEXT: csrwi vxrm, 0
479 ; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t
482 %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
483 <vscale x 8 x i16> %0,
484 <vscale x 8 x i32> %1,
485 <vscale x 8 x i16> %2,
486 <vscale x 8 x i1> %3,
487 iXLen 0, iXLen %4, iXLen 1)
489 ret <vscale x 8 x i16> %a
492 declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
498 define <vscale x 16 x i16> @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
499 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16:
500 ; CHECK: # %bb.0: # %entry
501 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
502 ; CHECK-NEXT: csrwi vxrm, 0
503 ; CHECK-NEXT: vnclipu.wv v20, v8, v16
504 ; CHECK-NEXT: vmv.v.v v8, v20
507 %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
508 <vscale x 16 x i16> undef,
509 <vscale x 16 x i32> %0,
510 <vscale x 16 x i16> %1,
513 ret <vscale x 16 x i16> %a
516 declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
521 iXLen, iXLen, iXLen);
523 define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
524 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16:
525 ; CHECK: # %bb.0: # %entry
526 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
527 ; CHECK-NEXT: csrwi vxrm, 0
528 ; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t
531 %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
532 <vscale x 16 x i16> %0,
533 <vscale x 16 x i32> %1,
534 <vscale x 16 x i16> %2,
535 <vscale x 16 x i1> %3,
536 iXLen 0, iXLen %4, iXLen 1)
538 ret <vscale x 16 x i16> %a
541 declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
547 define <vscale x 1 x i32> @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
548 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32:
549 ; CHECK: # %bb.0: # %entry
550 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
551 ; CHECK-NEXT: csrwi vxrm, 0
552 ; CHECK-NEXT: vnclipu.wv v8, v8, v9
555 %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
556 <vscale x 1 x i32> undef,
557 <vscale x 1 x i64> %0,
558 <vscale x 1 x i32> %1,
561 ret <vscale x 1 x i32> %a
564 declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
569 iXLen, iXLen, iXLen);
571 define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
572 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32:
573 ; CHECK: # %bb.0: # %entry
574 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
575 ; CHECK-NEXT: csrwi vxrm, 0
576 ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t
579 %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
580 <vscale x 1 x i32> %0,
581 <vscale x 1 x i64> %1,
582 <vscale x 1 x i32> %2,
583 <vscale x 1 x i1> %3,
584 iXLen 0, iXLen %4, iXLen 1)
586 ret <vscale x 1 x i32> %a
589 declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
595 define <vscale x 2 x i32> @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
596 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32:
597 ; CHECK: # %bb.0: # %entry
598 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
599 ; CHECK-NEXT: csrwi vxrm, 0
600 ; CHECK-NEXT: vnclipu.wv v11, v8, v10
601 ; CHECK-NEXT: vmv.v.v v8, v11
604 %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
605 <vscale x 2 x i32> undef,
606 <vscale x 2 x i64> %0,
607 <vscale x 2 x i32> %1,
610 ret <vscale x 2 x i32> %a
613 declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
618 iXLen, iXLen, iXLen);
620 define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
621 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32:
622 ; CHECK: # %bb.0: # %entry
623 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
624 ; CHECK-NEXT: csrwi vxrm, 0
625 ; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t
628 %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
629 <vscale x 2 x i32> %0,
630 <vscale x 2 x i64> %1,
631 <vscale x 2 x i32> %2,
632 <vscale x 2 x i1> %3,
633 iXLen 0, iXLen %4, iXLen 1)
635 ret <vscale x 2 x i32> %a
638 declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
644 define <vscale x 4 x i32> @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
645 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32:
646 ; CHECK: # %bb.0: # %entry
647 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
648 ; CHECK-NEXT: csrwi vxrm, 0
649 ; CHECK-NEXT: vnclipu.wv v14, v8, v12
650 ; CHECK-NEXT: vmv.v.v v8, v14
653 %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
654 <vscale x 4 x i32> undef,
655 <vscale x 4 x i64> %0,
656 <vscale x 4 x i32> %1,
659 ret <vscale x 4 x i32> %a
662 declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
667 iXLen, iXLen, iXLen);
669 define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
670 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32:
671 ; CHECK: # %bb.0: # %entry
672 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
673 ; CHECK-NEXT: csrwi vxrm, 0
674 ; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t
677 %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
678 <vscale x 4 x i32> %0,
679 <vscale x 4 x i64> %1,
680 <vscale x 4 x i32> %2,
681 <vscale x 4 x i1> %3,
682 iXLen 0, iXLen %4, iXLen 1)
684 ret <vscale x 4 x i32> %a
687 declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
693 define <vscale x 8 x i32> @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
694 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32:
695 ; CHECK: # %bb.0: # %entry
696 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
697 ; CHECK-NEXT: csrwi vxrm, 0
698 ; CHECK-NEXT: vnclipu.wv v20, v8, v16
699 ; CHECK-NEXT: vmv.v.v v8, v20
702 %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
703 <vscale x 8 x i32> undef,
704 <vscale x 8 x i64> %0,
705 <vscale x 8 x i32> %1,
708 ret <vscale x 8 x i32> %a
711 declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
716 iXLen, iXLen, iXLen);
718 define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
719 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32:
720 ; CHECK: # %bb.0: # %entry
721 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
722 ; CHECK-NEXT: csrwi vxrm, 0
723 ; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t
726 %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
727 <vscale x 8 x i32> %0,
728 <vscale x 8 x i64> %1,
729 <vscale x 8 x i32> %2,
730 <vscale x 8 x i1> %3,
731 iXLen 0, iXLen %4, iXLen 1)
733 ret <vscale x 8 x i32> %a
736 declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
739 iXLen, iXLen, iXLen);
741 define <vscale x 1 x i8> @intrinsic_vnclipu_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
742 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i8_nxv1i16:
743 ; CHECK: # %bb.0: # %entry
744 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
745 ; CHECK-NEXT: csrwi vxrm, 0
746 ; CHECK-NEXT: vnclipu.wx v8, v8, a0
749 %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
750 <vscale x 1 x i8> undef,
751 <vscale x 1 x i16> %0,
755 ret <vscale x 1 x i8> %a
758 declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
763 iXLen, iXLen, iXLen);
765 define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
766 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16:
767 ; CHECK: # %bb.0: # %entry
768 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
769 ; CHECK-NEXT: csrwi vxrm, 0
770 ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t
773 %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
774 <vscale x 1 x i8> %0,
775 <vscale x 1 x i16> %1,
777 <vscale x 1 x i1> %3,
778 iXLen 0, iXLen %4, iXLen 1)
780 ret <vscale x 1 x i8> %a
783 declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
786 iXLen, iXLen, iXLen);
788 define <vscale x 2 x i8> @intrinsic_vnclipu_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
789 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i8_nxv2i16:
790 ; CHECK: # %bb.0: # %entry
791 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
792 ; CHECK-NEXT: csrwi vxrm, 0
793 ; CHECK-NEXT: vnclipu.wx v8, v8, a0
796 %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
797 <vscale x 2 x i8> undef,
798 <vscale x 2 x i16> %0,
802 ret <vscale x 2 x i8> %a
805 declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
810 iXLen, iXLen, iXLen);
812 define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
813 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16:
814 ; CHECK: # %bb.0: # %entry
815 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
816 ; CHECK-NEXT: csrwi vxrm, 0
817 ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t
820 %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
821 <vscale x 2 x i8> %0,
822 <vscale x 2 x i16> %1,
824 <vscale x 2 x i1> %3,
825 iXLen 0, iXLen %4, iXLen 1)
827 ret <vscale x 2 x i8> %a
830 declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
833 iXLen, iXLen, iXLen);
835 define <vscale x 4 x i8> @intrinsic_vnclipu_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
836 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i8_nxv4i16:
837 ; CHECK: # %bb.0: # %entry
838 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
839 ; CHECK-NEXT: csrwi vxrm, 0
840 ; CHECK-NEXT: vnclipu.wx v8, v8, a0
843 %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
844 <vscale x 4 x i8> undef,
845 <vscale x 4 x i16> %0,
849 ret <vscale x 4 x i8> %a
852 declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
857 iXLen, iXLen, iXLen);
859 define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
860 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16:
861 ; CHECK: # %bb.0: # %entry
862 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
863 ; CHECK-NEXT: csrwi vxrm, 0
864 ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t
867 %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
868 <vscale x 4 x i8> %0,
869 <vscale x 4 x i16> %1,
871 <vscale x 4 x i1> %3,
872 iXLen 0, iXLen %4, iXLen 1)
874 ret <vscale x 4 x i8> %a
877 declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
880 iXLen, iXLen, iXLen);
882 define <vscale x 8 x i8> @intrinsic_vnclipu_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
883 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i8_nxv8i16:
884 ; CHECK: # %bb.0: # %entry
885 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
886 ; CHECK-NEXT: csrwi vxrm, 0
887 ; CHECK-NEXT: vnclipu.wx v10, v8, a0
888 ; CHECK-NEXT: vmv.v.v v8, v10
891 %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
892 <vscale x 8 x i8> undef,
893 <vscale x 8 x i16> %0,
897 ret <vscale x 8 x i8> %a
900 declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
905 iXLen, iXLen, iXLen);
907 define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
908 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16:
909 ; CHECK: # %bb.0: # %entry
910 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
911 ; CHECK-NEXT: csrwi vxrm, 0
912 ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t
915 %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
916 <vscale x 8 x i8> %0,
917 <vscale x 8 x i16> %1,
919 <vscale x 8 x i1> %3,
920 iXLen 0, iXLen %4, iXLen 1)
922 ret <vscale x 8 x i8> %a
925 declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
928 iXLen, iXLen, iXLen);
930 define <vscale x 16 x i8> @intrinsic_vnclipu_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
931 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i8_nxv16i16:
932 ; CHECK: # %bb.0: # %entry
933 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
934 ; CHECK-NEXT: csrwi vxrm, 0
935 ; CHECK-NEXT: vnclipu.wx v12, v8, a0
936 ; CHECK-NEXT: vmv.v.v v8, v12
939 %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
940 <vscale x 16 x i8> undef,
941 <vscale x 16 x i16> %0,
945 ret <vscale x 16 x i8> %a
948 declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
953 iXLen, iXLen, iXLen);
955 define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
956 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16:
957 ; CHECK: # %bb.0: # %entry
958 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
959 ; CHECK-NEXT: csrwi vxrm, 0
960 ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t
963 %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
964 <vscale x 16 x i8> %0,
965 <vscale x 16 x i16> %1,
967 <vscale x 16 x i1> %3,
968 iXLen 0, iXLen %4, iXLen 1)
970 ret <vscale x 16 x i8> %a
973 declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
976 iXLen, iXLen, iXLen);
978 define <vscale x 32 x i8> @intrinsic_vnclipu_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
979 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv32i8_nxv32i16:
980 ; CHECK: # %bb.0: # %entry
981 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
982 ; CHECK-NEXT: csrwi vxrm, 0
983 ; CHECK-NEXT: vnclipu.wx v16, v8, a0
984 ; CHECK-NEXT: vmv.v.v v8, v16
987 %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
988 <vscale x 32 x i8> undef,
989 <vscale x 32 x i16> %0,
993 ret <vscale x 32 x i8> %a
996 declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
1001 iXLen, iXLen, iXLen);
1003 define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1004 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16:
1005 ; CHECK: # %bb.0: # %entry
1006 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1007 ; CHECK-NEXT: csrwi vxrm, 0
1008 ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t
1011 %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
1012 <vscale x 32 x i8> %0,
1013 <vscale x 32 x i16> %1,
1015 <vscale x 32 x i1> %3,
1016 iXLen 0, iXLen %4, iXLen 1)
1018 ret <vscale x 32 x i8> %a
1021 declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
1024 iXLen, iXLen, iXLen);
1026 define <vscale x 1 x i16> @intrinsic_vnclipu_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
1027 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i16_nxv1i32:
1028 ; CHECK: # %bb.0: # %entry
1029 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1030 ; CHECK-NEXT: csrwi vxrm, 0
1031 ; CHECK-NEXT: vnclipu.wx v8, v8, a0
1034 %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
1035 <vscale x 1 x i16> undef,
1036 <vscale x 1 x i32> %0,
1040 ret <vscale x 1 x i16> %a
1043 declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
1048 iXLen, iXLen, iXLen);
1050 define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1051 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32:
1052 ; CHECK: # %bb.0: # %entry
1053 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1054 ; CHECK-NEXT: csrwi vxrm, 0
1055 ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t
1058 %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
1059 <vscale x 1 x i16> %0,
1060 <vscale x 1 x i32> %1,
1062 <vscale x 1 x i1> %3,
1063 iXLen 0, iXLen %4, iXLen 1)
1065 ret <vscale x 1 x i16> %a
1068 declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
1071 iXLen, iXLen, iXLen);
1073 define <vscale x 2 x i16> @intrinsic_vnclipu_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
1074 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i16_nxv2i32:
1075 ; CHECK: # %bb.0: # %entry
1076 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1077 ; CHECK-NEXT: csrwi vxrm, 0
1078 ; CHECK-NEXT: vnclipu.wx v8, v8, a0
1081 %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
1082 <vscale x 2 x i16> undef,
1083 <vscale x 2 x i32> %0,
1087 ret <vscale x 2 x i16> %a
1090 declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
1095 iXLen, iXLen, iXLen);
1097 define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1098 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32:
1099 ; CHECK: # %bb.0: # %entry
1100 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1101 ; CHECK-NEXT: csrwi vxrm, 0
1102 ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t
1105 %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
1106 <vscale x 2 x i16> %0,
1107 <vscale x 2 x i32> %1,
1109 <vscale x 2 x i1> %3,
1110 iXLen 0, iXLen %4, iXLen 1)
1112 ret <vscale x 2 x i16> %a
1115 declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
1118 iXLen, iXLen, iXLen);
1120 define <vscale x 4 x i16> @intrinsic_vnclipu_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
1121 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i16_nxv4i32:
1122 ; CHECK: # %bb.0: # %entry
1123 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1124 ; CHECK-NEXT: csrwi vxrm, 0
1125 ; CHECK-NEXT: vnclipu.wx v10, v8, a0
1126 ; CHECK-NEXT: vmv.v.v v8, v10
1129 %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
1130 <vscale x 4 x i16> undef,
1131 <vscale x 4 x i32> %0,
1135 ret <vscale x 4 x i16> %a
1138 declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
1143 iXLen, iXLen, iXLen);
1145 define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1146 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32:
1147 ; CHECK: # %bb.0: # %entry
1148 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1149 ; CHECK-NEXT: csrwi vxrm, 0
1150 ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t
1153 %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
1154 <vscale x 4 x i16> %0,
1155 <vscale x 4 x i32> %1,
1157 <vscale x 4 x i1> %3,
1158 iXLen 0, iXLen %4, iXLen 1)
1160 ret <vscale x 4 x i16> %a
1163 declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
1166 iXLen, iXLen, iXLen);
1168 define <vscale x 8 x i16> @intrinsic_vnclipu_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
1169 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i16_nxv8i32:
1170 ; CHECK: # %bb.0: # %entry
1171 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1172 ; CHECK-NEXT: csrwi vxrm, 0
1173 ; CHECK-NEXT: vnclipu.wx v12, v8, a0
1174 ; CHECK-NEXT: vmv.v.v v8, v12
1177 %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
1178 <vscale x 8 x i16> undef,
1179 <vscale x 8 x i32> %0,
1183 ret <vscale x 8 x i16> %a
1186 declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
1191 iXLen, iXLen, iXLen);
1193 define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1194 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32:
1195 ; CHECK: # %bb.0: # %entry
1196 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1197 ; CHECK-NEXT: csrwi vxrm, 0
1198 ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t
1201 %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
1202 <vscale x 8 x i16> %0,
1203 <vscale x 8 x i32> %1,
1205 <vscale x 8 x i1> %3,
1206 iXLen 0, iXLen %4, iXLen 1)
1208 ret <vscale x 8 x i16> %a
1211 declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
1212 <vscale x 16 x i16>,
1213 <vscale x 16 x i32>,
1214 iXLen, iXLen, iXLen);
1216 define <vscale x 16 x i16> @intrinsic_vnclipu_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
1217 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i16_nxv16i32:
1218 ; CHECK: # %bb.0: # %entry
1219 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1220 ; CHECK-NEXT: csrwi vxrm, 0
1221 ; CHECK-NEXT: vnclipu.wx v16, v8, a0
1222 ; CHECK-NEXT: vmv.v.v v8, v16
1225 %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
1226 <vscale x 16 x i16> undef,
1227 <vscale x 16 x i32> %0,
1231 ret <vscale x 16 x i16> %a
1234 declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
1235 <vscale x 16 x i16>,
1236 <vscale x 16 x i32>,
1239 iXLen, iXLen, iXLen);
1241 define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1242 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32:
1243 ; CHECK: # %bb.0: # %entry
1244 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1245 ; CHECK-NEXT: csrwi vxrm, 0
1246 ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t
1249 %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
1250 <vscale x 16 x i16> %0,
1251 <vscale x 16 x i32> %1,
1253 <vscale x 16 x i1> %3,
1254 iXLen 0, iXLen %4, iXLen 1)
1256 ret <vscale x 16 x i16> %a
1259 declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
1262 iXLen, iXLen, iXLen);
1264 define <vscale x 1 x i32> @intrinsic_vnclipu_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
1265 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i32_nxv1i64:
1266 ; CHECK: # %bb.0: # %entry
1267 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1268 ; CHECK-NEXT: csrwi vxrm, 0
1269 ; CHECK-NEXT: vnclipu.wx v8, v8, a0
1272 %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
1273 <vscale x 1 x i32> undef,
1274 <vscale x 1 x i64> %0,
1278 ret <vscale x 1 x i32> %a
1281 declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
1286 iXLen, iXLen, iXLen);
1288 define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1289 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64:
1290 ; CHECK: # %bb.0: # %entry
1291 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1292 ; CHECK-NEXT: csrwi vxrm, 0
1293 ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t
1296 %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
1297 <vscale x 1 x i32> %0,
1298 <vscale x 1 x i64> %1,
1300 <vscale x 1 x i1> %3,
1301 iXLen 0, iXLen %4, iXLen 1)
1303 ret <vscale x 1 x i32> %a
1306 declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
1309 iXLen, iXLen, iXLen);
1311 define <vscale x 2 x i32> @intrinsic_vnclipu_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
1312 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i32_nxv2i64:
1313 ; CHECK: # %bb.0: # %entry
1314 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1315 ; CHECK-NEXT: csrwi vxrm, 0
1316 ; CHECK-NEXT: vnclipu.wx v10, v8, a0
1317 ; CHECK-NEXT: vmv.v.v v8, v10
1320 %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
1321 <vscale x 2 x i32> undef,
1322 <vscale x 2 x i64> %0,
1326 ret <vscale x 2 x i32> %a
1329 declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
1334 iXLen, iXLen, iXLen);
1336 define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1337 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64:
1338 ; CHECK: # %bb.0: # %entry
1339 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1340 ; CHECK-NEXT: csrwi vxrm, 0
1341 ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t
1344 %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
1345 <vscale x 2 x i32> %0,
1346 <vscale x 2 x i64> %1,
1348 <vscale x 2 x i1> %3,
1349 iXLen 0, iXLen %4, iXLen 1)
1351 ret <vscale x 2 x i32> %a
1354 declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
1357 iXLen, iXLen, iXLen);
1359 define <vscale x 4 x i32> @intrinsic_vnclipu_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
1360 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i32_nxv4i64:
1361 ; CHECK: # %bb.0: # %entry
1362 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1363 ; CHECK-NEXT: csrwi vxrm, 0
1364 ; CHECK-NEXT: vnclipu.wx v12, v8, a0
1365 ; CHECK-NEXT: vmv.v.v v8, v12
1368 %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
1369 <vscale x 4 x i32> undef,
1370 <vscale x 4 x i64> %0,
1374 ret <vscale x 4 x i32> %a
1377 declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
1382 iXLen, iXLen, iXLen);
1384 define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1385 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64:
1386 ; CHECK: # %bb.0: # %entry
1387 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1388 ; CHECK-NEXT: csrwi vxrm, 0
1389 ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t
1392 %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
1393 <vscale x 4 x i32> %0,
1394 <vscale x 4 x i64> %1,
1396 <vscale x 4 x i1> %3,
1397 iXLen 0, iXLen %4, iXLen 1)
1399 ret <vscale x 4 x i32> %a
1402 declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
1405 iXLen, iXLen, iXLen);
1407 define <vscale x 8 x i32> @intrinsic_vnclipu_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
1408 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i32_nxv8i64:
1409 ; CHECK: # %bb.0: # %entry
1410 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1411 ; CHECK-NEXT: csrwi vxrm, 0
1412 ; CHECK-NEXT: vnclipu.wx v16, v8, a0
1413 ; CHECK-NEXT: vmv.v.v v8, v16
1416 %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
1417 <vscale x 8 x i32> undef,
1418 <vscale x 8 x i64> %0,
1422 ret <vscale x 8 x i32> %a
1425 declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
1430 iXLen, iXLen, iXLen);
1432 define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1433 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64:
1434 ; CHECK: # %bb.0: # %entry
1435 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1436 ; CHECK-NEXT: csrwi vxrm, 0
1437 ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t
1440 %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
1441 <vscale x 8 x i32> %0,
1442 <vscale x 8 x i64> %1,
1444 <vscale x 8 x i1> %3,
1445 iXLen 0, iXLen %4, iXLen 1)
1447 ret <vscale x 8 x i32> %a
1450 define <vscale x 1 x i8> @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, iXLen %1) nounwind {
1451 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8:
1452 ; CHECK: # %bb.0: # %entry
1453 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1454 ; CHECK-NEXT: csrwi vxrm, 0
1455 ; CHECK-NEXT: vnclipu.wi v8, v8, 9
1458 %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
1459 <vscale x 1 x i8> undef,
1460 <vscale x 1 x i16> %0,
1464 ret <vscale x 1 x i8> %a
1467 define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1468 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8:
1469 ; CHECK: # %bb.0: # %entry
1470 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
1471 ; CHECK-NEXT: csrwi vxrm, 0
1472 ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t
1475 %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
1476 <vscale x 1 x i8> %0,
1477 <vscale x 1 x i16> %1,
1479 <vscale x 1 x i1> %2,
1480 iXLen 0, iXLen %3, iXLen 1)
1482 ret <vscale x 1 x i8> %a
1485 define <vscale x 2 x i8> @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, iXLen %1) nounwind {
1486 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8:
1487 ; CHECK: # %bb.0: # %entry
1488 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1489 ; CHECK-NEXT: csrwi vxrm, 0
1490 ; CHECK-NEXT: vnclipu.wi v8, v8, 9
1493 %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
1494 <vscale x 2 x i8> undef,
1495 <vscale x 2 x i16> %0,
1499 ret <vscale x 2 x i8> %a
1502 define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1503 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8:
1504 ; CHECK: # %bb.0: # %entry
1505 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
1506 ; CHECK-NEXT: csrwi vxrm, 0
1507 ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t
1510 %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
1511 <vscale x 2 x i8> %0,
1512 <vscale x 2 x i16> %1,
1514 <vscale x 2 x i1> %2,
1515 iXLen 0, iXLen %3, iXLen 1)
1517 ret <vscale x 2 x i8> %a
1520 define <vscale x 4 x i8> @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, iXLen %1) nounwind {
1521 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8:
1522 ; CHECK: # %bb.0: # %entry
1523 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1524 ; CHECK-NEXT: csrwi vxrm, 0
1525 ; CHECK-NEXT: vnclipu.wi v8, v8, 9
1528 %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
1529 <vscale x 4 x i8> undef,
1530 <vscale x 4 x i16> %0,
1534 ret <vscale x 4 x i8> %a
1537 define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1538 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8:
1539 ; CHECK: # %bb.0: # %entry
1540 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
1541 ; CHECK-NEXT: csrwi vxrm, 0
1542 ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t
1545 %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
1546 <vscale x 4 x i8> %0,
1547 <vscale x 4 x i16> %1,
1549 <vscale x 4 x i1> %2,
1550 iXLen 0, iXLen %3, iXLen 1)
1552 ret <vscale x 4 x i8> %a
1555 define <vscale x 8 x i8> @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, iXLen %1) nounwind {
1556 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8:
1557 ; CHECK: # %bb.0: # %entry
1558 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1559 ; CHECK-NEXT: csrwi vxrm, 0
1560 ; CHECK-NEXT: vnclipu.wi v10, v8, 9
1561 ; CHECK-NEXT: vmv.v.v v8, v10
1564 %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
1565 <vscale x 8 x i8> undef,
1566 <vscale x 8 x i16> %0,
1570 ret <vscale x 8 x i8> %a
1573 define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1574 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8:
1575 ; CHECK: # %bb.0: # %entry
1576 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
1577 ; CHECK-NEXT: csrwi vxrm, 0
1578 ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t
1581 %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
1582 <vscale x 8 x i8> %0,
1583 <vscale x 8 x i16> %1,
1585 <vscale x 8 x i1> %2,
1586 iXLen 0, iXLen %3, iXLen 1)
1588 ret <vscale x 8 x i8> %a
1591 define <vscale x 16 x i8> @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, iXLen %1) nounwind {
1592 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8:
1593 ; CHECK: # %bb.0: # %entry
1594 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
1595 ; CHECK-NEXT: csrwi vxrm, 0
1596 ; CHECK-NEXT: vnclipu.wi v12, v8, 9
1597 ; CHECK-NEXT: vmv.v.v v8, v12
1600 %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
1601 <vscale x 16 x i8> undef,
1602 <vscale x 16 x i16> %0,
1606 ret <vscale x 16 x i8> %a
1609 define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1610 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8:
1611 ; CHECK: # %bb.0: # %entry
1612 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
1613 ; CHECK-NEXT: csrwi vxrm, 0
1614 ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t
1617 %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
1618 <vscale x 16 x i8> %0,
1619 <vscale x 16 x i16> %1,
1621 <vscale x 16 x i1> %2,
1622 iXLen 0, iXLen %3, iXLen 1)
1624 ret <vscale x 16 x i8> %a
1627 define <vscale x 32 x i8> @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, iXLen %1) nounwind {
1628 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8:
1629 ; CHECK: # %bb.0: # %entry
1630 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
1631 ; CHECK-NEXT: csrwi vxrm, 0
1632 ; CHECK-NEXT: vnclipu.wi v16, v8, 9
1633 ; CHECK-NEXT: vmv.v.v v8, v16
1636 %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
1637 <vscale x 32 x i8> undef,
1638 <vscale x 32 x i16> %0,
1642 ret <vscale x 32 x i8> %a
1645 define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1646 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8:
1647 ; CHECK: # %bb.0: # %entry
1648 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
1649 ; CHECK-NEXT: csrwi vxrm, 0
1650 ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t
1653 %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
1654 <vscale x 32 x i8> %0,
1655 <vscale x 32 x i16> %1,
1657 <vscale x 32 x i1> %2,
1658 iXLen 0, iXLen %3, iXLen 1)
1660 ret <vscale x 32 x i8> %a
1663 define <vscale x 1 x i16> @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, iXLen %1) nounwind {
1664 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16:
1665 ; CHECK: # %bb.0: # %entry
1666 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1667 ; CHECK-NEXT: csrwi vxrm, 0
1668 ; CHECK-NEXT: vnclipu.wi v8, v8, 9
1671 %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
1672 <vscale x 1 x i16> undef,
1673 <vscale x 1 x i32> %0,
1677 ret <vscale x 1 x i16> %a
1680 define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1681 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16:
1682 ; CHECK: # %bb.0: # %entry
1683 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
1684 ; CHECK-NEXT: csrwi vxrm, 0
1685 ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t
1688 %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
1689 <vscale x 1 x i16> %0,
1690 <vscale x 1 x i32> %1,
1692 <vscale x 1 x i1> %2,
1693 iXLen 0, iXLen %3, iXLen 1)
1695 ret <vscale x 1 x i16> %a
1698 define <vscale x 2 x i16> @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, iXLen %1) nounwind {
1699 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16:
1700 ; CHECK: # %bb.0: # %entry
1701 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1702 ; CHECK-NEXT: csrwi vxrm, 0
1703 ; CHECK-NEXT: vnclipu.wi v8, v8, 9
1706 %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
1707 <vscale x 2 x i16> undef,
1708 <vscale x 2 x i32> %0,
1712 ret <vscale x 2 x i16> %a
1715 define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1716 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16:
1717 ; CHECK: # %bb.0: # %entry
1718 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
1719 ; CHECK-NEXT: csrwi vxrm, 0
1720 ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t
1723 %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
1724 <vscale x 2 x i16> %0,
1725 <vscale x 2 x i32> %1,
1727 <vscale x 2 x i1> %2,
1728 iXLen 0, iXLen %3, iXLen 1)
1730 ret <vscale x 2 x i16> %a
1733 define <vscale x 4 x i16> @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, iXLen %1) nounwind {
1734 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16:
1735 ; CHECK: # %bb.0: # %entry
1736 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1737 ; CHECK-NEXT: csrwi vxrm, 0
1738 ; CHECK-NEXT: vnclipu.wi v10, v8, 9
1739 ; CHECK-NEXT: vmv.v.v v8, v10
1742 %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
1743 <vscale x 4 x i16> undef,
1744 <vscale x 4 x i32> %0,
1748 ret <vscale x 4 x i16> %a
1751 define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1752 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16:
1753 ; CHECK: # %bb.0: # %entry
1754 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
1755 ; CHECK-NEXT: csrwi vxrm, 0
1756 ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t
1759 %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
1760 <vscale x 4 x i16> %0,
1761 <vscale x 4 x i32> %1,
1763 <vscale x 4 x i1> %2,
1764 iXLen 0, iXLen %3, iXLen 1)
1766 ret <vscale x 4 x i16> %a
1769 define <vscale x 8 x i16> @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, iXLen %1) nounwind {
1770 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16:
1771 ; CHECK: # %bb.0: # %entry
1772 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1773 ; CHECK-NEXT: csrwi vxrm, 0
1774 ; CHECK-NEXT: vnclipu.wi v12, v8, 9
1775 ; CHECK-NEXT: vmv.v.v v8, v12
1778 %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
1779 <vscale x 8 x i16> undef,
1780 <vscale x 8 x i32> %0,
1784 ret <vscale x 8 x i16> %a
1787 define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1788 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16:
1789 ; CHECK: # %bb.0: # %entry
1790 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
1791 ; CHECK-NEXT: csrwi vxrm, 0
1792 ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t
1795 %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
1796 <vscale x 8 x i16> %0,
1797 <vscale x 8 x i32> %1,
1799 <vscale x 8 x i1> %2,
1800 iXLen 0, iXLen %3, iXLen 1)
1802 ret <vscale x 8 x i16> %a
1805 define <vscale x 16 x i16> @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, iXLen %1) nounwind {
1806 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16:
1807 ; CHECK: # %bb.0: # %entry
1808 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1809 ; CHECK-NEXT: csrwi vxrm, 0
1810 ; CHECK-NEXT: vnclipu.wi v16, v8, 9
1811 ; CHECK-NEXT: vmv.v.v v8, v16
1814 %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
1815 <vscale x 16 x i16> undef,
1816 <vscale x 16 x i32> %0,
1820 ret <vscale x 16 x i16> %a
1823 define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1824 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16:
1825 ; CHECK: # %bb.0: # %entry
1826 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
1827 ; CHECK-NEXT: csrwi vxrm, 0
1828 ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t
1831 %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
1832 <vscale x 16 x i16> %0,
1833 <vscale x 16 x i32> %1,
1835 <vscale x 16 x i1> %2,
1836 iXLen 0, iXLen %3, iXLen 1)
1838 ret <vscale x 16 x i16> %a
1841 define <vscale x 1 x i32> @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, iXLen %1) nounwind {
1842 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32:
1843 ; CHECK: # %bb.0: # %entry
1844 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1845 ; CHECK-NEXT: csrwi vxrm, 0
1846 ; CHECK-NEXT: vnclipu.wi v8, v8, 9
1849 %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
1850 <vscale x 1 x i32> undef,
1851 <vscale x 1 x i64> %0,
1855 ret <vscale x 1 x i32> %a
1858 define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1859 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32:
1860 ; CHECK: # %bb.0: # %entry
1861 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1862 ; CHECK-NEXT: csrwi vxrm, 0
1863 ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t
1866 %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
1867 <vscale x 1 x i32> %0,
1868 <vscale x 1 x i64> %1,
1870 <vscale x 1 x i1> %2,
1871 iXLen 0, iXLen %3, iXLen 1)
1873 ret <vscale x 1 x i32> %a
1876 define <vscale x 2 x i32> @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, iXLen %1) nounwind {
1877 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32:
1878 ; CHECK: # %bb.0: # %entry
1879 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1880 ; CHECK-NEXT: csrwi vxrm, 0
1881 ; CHECK-NEXT: vnclipu.wi v10, v8, 9
1882 ; CHECK-NEXT: vmv.v.v v8, v10
1885 %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
1886 <vscale x 2 x i32> undef,
1887 <vscale x 2 x i64> %0,
1891 ret <vscale x 2 x i32> %a
1894 define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1895 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32:
1896 ; CHECK: # %bb.0: # %entry
1897 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1898 ; CHECK-NEXT: csrwi vxrm, 0
1899 ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t
1902 %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
1903 <vscale x 2 x i32> %0,
1904 <vscale x 2 x i64> %1,
1906 <vscale x 2 x i1> %2,
1907 iXLen 0, iXLen %3, iXLen 1)
1909 ret <vscale x 2 x i32> %a
1912 define <vscale x 4 x i32> @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, iXLen %1) nounwind {
1913 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32:
1914 ; CHECK: # %bb.0: # %entry
1915 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1916 ; CHECK-NEXT: csrwi vxrm, 0
1917 ; CHECK-NEXT: vnclipu.wi v12, v8, 9
1918 ; CHECK-NEXT: vmv.v.v v8, v12
1921 %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
1922 <vscale x 4 x i32> undef,
1923 <vscale x 4 x i64> %0,
1927 ret <vscale x 4 x i32> %a
1930 define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1931 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32:
1932 ; CHECK: # %bb.0: # %entry
1933 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1934 ; CHECK-NEXT: csrwi vxrm, 0
1935 ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t
1938 %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
1939 <vscale x 4 x i32> %0,
1940 <vscale x 4 x i64> %1,
1942 <vscale x 4 x i1> %2,
1943 iXLen 0, iXLen %3, iXLen 1)
1945 ret <vscale x 4 x i32> %a
1948 define <vscale x 8 x i32> @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, iXLen %1) nounwind {
1949 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32:
1950 ; CHECK: # %bb.0: # %entry
1951 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1952 ; CHECK-NEXT: csrwi vxrm, 0
1953 ; CHECK-NEXT: vnclipu.wi v16, v8, 9
1954 ; CHECK-NEXT: vmv.v.v v8, v16
1957 %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
1958 <vscale x 8 x i32> undef,
1959 <vscale x 8 x i64> %0,
1963 ret <vscale x 8 x i32> %a
1966 define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1967 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32:
1968 ; CHECK: # %bb.0: # %entry
1969 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1970 ; CHECK-NEXT: csrwi vxrm, 0
1971 ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t
1974 %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
1975 <vscale x 8 x i32> %0,
1976 <vscale x 8 x i64> %1,
1978 <vscale x 8 x i1> %2,
1979 iXLen 0, iXLen %3, iXLen 1)
1981 ret <vscale x 8 x i32> %a