1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
12 define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
13 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
16 ; CHECK-NEXT: vmslt.vv v0, v8, v9
19 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
24 ret <vscale x 1 x i1> %a
27 declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
34 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
35 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8:
36 ; CHECK: # %bb.0: # %entry
37 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
38 ; CHECK-NEXT: vmslt.vv v8, v8, v9
39 ; CHECK-NEXT: vmv1r.v v11, v0
40 ; CHECK-NEXT: vmv1r.v v0, v8
41 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
42 ; CHECK-NEXT: vmv1r.v v0, v11
45 %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
49 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
53 <vscale x 1 x i1> %mask,
56 ret <vscale x 1 x i1> %a
59 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
64 define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
65 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8:
66 ; CHECK: # %bb.0: # %entry
67 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
68 ; CHECK-NEXT: vmslt.vv v0, v8, v9
71 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
76 ret <vscale x 2 x i1> %a
79 declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
86 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
87 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8:
88 ; CHECK: # %bb.0: # %entry
89 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
90 ; CHECK-NEXT: vmslt.vv v8, v8, v9
91 ; CHECK-NEXT: vmv1r.v v11, v0
92 ; CHECK-NEXT: vmv1r.v v0, v8
93 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
94 ; CHECK-NEXT: vmv1r.v v0, v11
97 %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
101 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
102 <vscale x 2 x i1> %0,
103 <vscale x 2 x i8> %2,
104 <vscale x 2 x i8> %3,
105 <vscale x 2 x i1> %mask,
108 ret <vscale x 2 x i1> %a
111 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
116 define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
117 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8:
118 ; CHECK: # %bb.0: # %entry
119 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
120 ; CHECK-NEXT: vmslt.vv v0, v8, v9
123 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
124 <vscale x 4 x i8> %0,
125 <vscale x 4 x i8> %1,
128 ret <vscale x 4 x i1> %a
131 declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
138 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
139 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8:
140 ; CHECK: # %bb.0: # %entry
141 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
142 ; CHECK-NEXT: vmslt.vv v8, v8, v9
143 ; CHECK-NEXT: vmv1r.v v11, v0
144 ; CHECK-NEXT: vmv1r.v v0, v8
145 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
146 ; CHECK-NEXT: vmv1r.v v0, v11
149 %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
150 <vscale x 4 x i8> %1,
151 <vscale x 4 x i8> %2,
153 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
154 <vscale x 4 x i1> %0,
155 <vscale x 4 x i8> %2,
156 <vscale x 4 x i8> %3,
157 <vscale x 4 x i1> %mask,
160 ret <vscale x 4 x i1> %a
163 declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
168 define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
169 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8:
170 ; CHECK: # %bb.0: # %entry
171 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
172 ; CHECK-NEXT: vmslt.vv v0, v8, v9
175 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
176 <vscale x 8 x i8> %0,
177 <vscale x 8 x i8> %1,
180 ret <vscale x 8 x i1> %a
183 declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
190 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
191 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8:
192 ; CHECK: # %bb.0: # %entry
193 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
194 ; CHECK-NEXT: vmslt.vv v8, v8, v9
195 ; CHECK-NEXT: vmv1r.v v11, v0
196 ; CHECK-NEXT: vmv.v.v v0, v8
197 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
198 ; CHECK-NEXT: vmv.v.v v0, v11
201 %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
202 <vscale x 8 x i8> %1,
203 <vscale x 8 x i8> %2,
205 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
206 <vscale x 8 x i1> %0,
207 <vscale x 8 x i8> %2,
208 <vscale x 8 x i8> %3,
209 <vscale x 8 x i1> %mask,
212 ret <vscale x 8 x i1> %a
215 declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
220 define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
221 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
224 ; CHECK-NEXT: vmslt.vv v0, v8, v10
227 %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
228 <vscale x 16 x i8> %0,
229 <vscale x 16 x i8> %1,
232 ret <vscale x 16 x i1> %a
235 declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
242 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
243 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8:
244 ; CHECK: # %bb.0: # %entry
245 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
246 ; CHECK-NEXT: vmslt.vv v14, v8, v10
247 ; CHECK-NEXT: vmv1r.v v8, v0
248 ; CHECK-NEXT: vmv1r.v v0, v14
249 ; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
250 ; CHECK-NEXT: vmv1r.v v0, v8
253 %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
254 <vscale x 16 x i8> %1,
255 <vscale x 16 x i8> %2,
257 %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
258 <vscale x 16 x i1> %0,
259 <vscale x 16 x i8> %2,
260 <vscale x 16 x i8> %3,
261 <vscale x 16 x i1> %mask,
264 ret <vscale x 16 x i1> %a
267 declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
272 define <vscale x 32 x i1> @intrinsic_vmslt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
273 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8:
274 ; CHECK: # %bb.0: # %entry
275 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
276 ; CHECK-NEXT: vmslt.vv v0, v8, v12
279 %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
280 <vscale x 32 x i8> %0,
281 <vscale x 32 x i8> %1,
284 ret <vscale x 32 x i1> %a
287 declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
294 define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
295 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8:
296 ; CHECK: # %bb.0: # %entry
297 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
298 ; CHECK-NEXT: vmslt.vv v20, v8, v12
299 ; CHECK-NEXT: vmv1r.v v8, v0
300 ; CHECK-NEXT: vmv1r.v v0, v20
301 ; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
302 ; CHECK-NEXT: vmv1r.v v0, v8
305 %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
306 <vscale x 32 x i8> %1,
307 <vscale x 32 x i8> %2,
309 %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
310 <vscale x 32 x i1> %0,
311 <vscale x 32 x i8> %2,
312 <vscale x 32 x i8> %3,
313 <vscale x 32 x i1> %mask,
316 ret <vscale x 32 x i1> %a
319 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
324 define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
325 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16:
326 ; CHECK: # %bb.0: # %entry
327 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
328 ; CHECK-NEXT: vmslt.vv v0, v8, v9
331 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
332 <vscale x 1 x i16> %0,
333 <vscale x 1 x i16> %1,
336 ret <vscale x 1 x i1> %a
339 declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
346 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
347 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16:
348 ; CHECK: # %bb.0: # %entry
349 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
350 ; CHECK-NEXT: vmslt.vv v8, v8, v9
351 ; CHECK-NEXT: vmv1r.v v11, v0
352 ; CHECK-NEXT: vmv1r.v v0, v8
353 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
354 ; CHECK-NEXT: vmv1r.v v0, v11
357 %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
358 <vscale x 1 x i16> %1,
359 <vscale x 1 x i16> %2,
361 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
362 <vscale x 1 x i1> %0,
363 <vscale x 1 x i16> %2,
364 <vscale x 1 x i16> %3,
365 <vscale x 1 x i1> %mask,
368 ret <vscale x 1 x i1> %a
371 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
376 define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
377 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16:
378 ; CHECK: # %bb.0: # %entry
379 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
380 ; CHECK-NEXT: vmslt.vv v0, v8, v9
383 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
384 <vscale x 2 x i16> %0,
385 <vscale x 2 x i16> %1,
388 ret <vscale x 2 x i1> %a
391 declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
398 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
399 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16:
400 ; CHECK: # %bb.0: # %entry
401 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
402 ; CHECK-NEXT: vmslt.vv v8, v8, v9
403 ; CHECK-NEXT: vmv1r.v v11, v0
404 ; CHECK-NEXT: vmv1r.v v0, v8
405 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
406 ; CHECK-NEXT: vmv1r.v v0, v11
409 %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
410 <vscale x 2 x i16> %1,
411 <vscale x 2 x i16> %2,
413 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
414 <vscale x 2 x i1> %0,
415 <vscale x 2 x i16> %2,
416 <vscale x 2 x i16> %3,
417 <vscale x 2 x i1> %mask,
420 ret <vscale x 2 x i1> %a
423 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
428 define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
429 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16:
430 ; CHECK: # %bb.0: # %entry
431 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
432 ; CHECK-NEXT: vmslt.vv v0, v8, v9
435 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
436 <vscale x 4 x i16> %0,
437 <vscale x 4 x i16> %1,
440 ret <vscale x 4 x i1> %a
443 declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
450 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
451 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16:
452 ; CHECK: # %bb.0: # %entry
453 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
454 ; CHECK-NEXT: vmslt.vv v8, v8, v9
455 ; CHECK-NEXT: vmv1r.v v11, v0
456 ; CHECK-NEXT: vmv.v.v v0, v8
457 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
458 ; CHECK-NEXT: vmv.v.v v0, v11
461 %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
462 <vscale x 4 x i16> %1,
463 <vscale x 4 x i16> %2,
465 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
466 <vscale x 4 x i1> %0,
467 <vscale x 4 x i16> %2,
468 <vscale x 4 x i16> %3,
469 <vscale x 4 x i1> %mask,
472 ret <vscale x 4 x i1> %a
475 declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
480 define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
481 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16:
482 ; CHECK: # %bb.0: # %entry
483 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
484 ; CHECK-NEXT: vmslt.vv v0, v8, v10
487 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
488 <vscale x 8 x i16> %0,
489 <vscale x 8 x i16> %1,
492 ret <vscale x 8 x i1> %a
495 declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
502 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
503 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16:
504 ; CHECK: # %bb.0: # %entry
505 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
506 ; CHECK-NEXT: vmslt.vv v14, v8, v10
507 ; CHECK-NEXT: vmv1r.v v8, v0
508 ; CHECK-NEXT: vmv1r.v v0, v14
509 ; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
510 ; CHECK-NEXT: vmv1r.v v0, v8
513 %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
514 <vscale x 8 x i16> %1,
515 <vscale x 8 x i16> %2,
517 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
518 <vscale x 8 x i1> %0,
519 <vscale x 8 x i16> %2,
520 <vscale x 8 x i16> %3,
521 <vscale x 8 x i1> %mask,
524 ret <vscale x 8 x i1> %a
527 declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
532 define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
533 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16:
534 ; CHECK: # %bb.0: # %entry
535 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
536 ; CHECK-NEXT: vmslt.vv v0, v8, v12
539 %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
540 <vscale x 16 x i16> %0,
541 <vscale x 16 x i16> %1,
544 ret <vscale x 16 x i1> %a
547 declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
554 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
555 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16:
556 ; CHECK: # %bb.0: # %entry
557 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
558 ; CHECK-NEXT: vmslt.vv v20, v8, v12
559 ; CHECK-NEXT: vmv1r.v v8, v0
560 ; CHECK-NEXT: vmv1r.v v0, v20
561 ; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
562 ; CHECK-NEXT: vmv1r.v v0, v8
565 %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
566 <vscale x 16 x i16> %1,
567 <vscale x 16 x i16> %2,
569 %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
570 <vscale x 16 x i1> %0,
571 <vscale x 16 x i16> %2,
572 <vscale x 16 x i16> %3,
573 <vscale x 16 x i1> %mask,
576 ret <vscale x 16 x i1> %a
579 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
584 define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
585 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32:
586 ; CHECK: # %bb.0: # %entry
587 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
588 ; CHECK-NEXT: vmslt.vv v0, v8, v9
591 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
592 <vscale x 1 x i32> %0,
593 <vscale x 1 x i32> %1,
596 ret <vscale x 1 x i1> %a
599 declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
606 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
607 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32:
608 ; CHECK: # %bb.0: # %entry
609 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
610 ; CHECK-NEXT: vmslt.vv v8, v8, v9
611 ; CHECK-NEXT: vmv1r.v v11, v0
612 ; CHECK-NEXT: vmv1r.v v0, v8
613 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
614 ; CHECK-NEXT: vmv1r.v v0, v11
617 %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
618 <vscale x 1 x i32> %1,
619 <vscale x 1 x i32> %2,
621 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
622 <vscale x 1 x i1> %0,
623 <vscale x 1 x i32> %2,
624 <vscale x 1 x i32> %3,
625 <vscale x 1 x i1> %mask,
628 ret <vscale x 1 x i1> %a
631 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
636 define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
637 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32:
638 ; CHECK: # %bb.0: # %entry
639 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
640 ; CHECK-NEXT: vmslt.vv v0, v8, v9
643 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
644 <vscale x 2 x i32> %0,
645 <vscale x 2 x i32> %1,
648 ret <vscale x 2 x i1> %a
651 declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
658 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
659 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32:
660 ; CHECK: # %bb.0: # %entry
661 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
662 ; CHECK-NEXT: vmslt.vv v8, v8, v9
663 ; CHECK-NEXT: vmv1r.v v11, v0
664 ; CHECK-NEXT: vmv.v.v v0, v8
665 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
666 ; CHECK-NEXT: vmv.v.v v0, v11
669 %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
670 <vscale x 2 x i32> %1,
671 <vscale x 2 x i32> %2,
673 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
674 <vscale x 2 x i1> %0,
675 <vscale x 2 x i32> %2,
676 <vscale x 2 x i32> %3,
677 <vscale x 2 x i1> %mask,
680 ret <vscale x 2 x i1> %a
683 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
688 define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
689 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32:
690 ; CHECK: # %bb.0: # %entry
691 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
692 ; CHECK-NEXT: vmslt.vv v0, v8, v10
695 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
696 <vscale x 4 x i32> %0,
697 <vscale x 4 x i32> %1,
700 ret <vscale x 4 x i1> %a
703 declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
710 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
711 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32:
712 ; CHECK: # %bb.0: # %entry
713 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
714 ; CHECK-NEXT: vmslt.vv v14, v8, v10
715 ; CHECK-NEXT: vmv1r.v v8, v0
716 ; CHECK-NEXT: vmv1r.v v0, v14
717 ; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
718 ; CHECK-NEXT: vmv1r.v v0, v8
721 %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
722 <vscale x 4 x i32> %1,
723 <vscale x 4 x i32> %2,
725 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
726 <vscale x 4 x i1> %0,
727 <vscale x 4 x i32> %2,
728 <vscale x 4 x i32> %3,
729 <vscale x 4 x i1> %mask,
732 ret <vscale x 4 x i1> %a
735 declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
740 define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
741 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32:
742 ; CHECK: # %bb.0: # %entry
743 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
744 ; CHECK-NEXT: vmslt.vv v0, v8, v12
747 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
748 <vscale x 8 x i32> %0,
749 <vscale x 8 x i32> %1,
752 ret <vscale x 8 x i1> %a
755 declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
762 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
763 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32:
764 ; CHECK: # %bb.0: # %entry
765 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
766 ; CHECK-NEXT: vmslt.vv v20, v8, v12
767 ; CHECK-NEXT: vmv1r.v v8, v0
768 ; CHECK-NEXT: vmv1r.v v0, v20
769 ; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
770 ; CHECK-NEXT: vmv1r.v v0, v8
773 %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
774 <vscale x 8 x i32> %1,
775 <vscale x 8 x i32> %2,
777 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
778 <vscale x 8 x i1> %0,
779 <vscale x 8 x i32> %2,
780 <vscale x 8 x i32> %3,
781 <vscale x 8 x i1> %mask,
784 ret <vscale x 8 x i1> %a
787 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
792 define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
793 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i64_nxv1i64:
794 ; CHECK: # %bb.0: # %entry
795 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
796 ; CHECK-NEXT: vmslt.vv v0, v8, v9
799 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
800 <vscale x 1 x i64> %0,
801 <vscale x 1 x i64> %1,
804 ret <vscale x 1 x i1> %a
807 declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
814 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
815 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64:
816 ; CHECK: # %bb.0: # %entry
817 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
818 ; CHECK-NEXT: vmslt.vv v8, v8, v9
819 ; CHECK-NEXT: vmv1r.v v11, v0
820 ; CHECK-NEXT: vmv.v.v v0, v8
821 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
822 ; CHECK-NEXT: vmv.v.v v0, v11
825 %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
826 <vscale x 1 x i64> %1,
827 <vscale x 1 x i64> %2,
829 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
830 <vscale x 1 x i1> %0,
831 <vscale x 1 x i64> %2,
832 <vscale x 1 x i64> %3,
833 <vscale x 1 x i1> %mask,
836 ret <vscale x 1 x i1> %a
839 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
844 define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
845 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i64_nxv2i64:
846 ; CHECK: # %bb.0: # %entry
847 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
848 ; CHECK-NEXT: vmslt.vv v0, v8, v10
851 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
852 <vscale x 2 x i64> %0,
853 <vscale x 2 x i64> %1,
856 ret <vscale x 2 x i1> %a
859 declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
866 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
867 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64:
868 ; CHECK: # %bb.0: # %entry
869 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
870 ; CHECK-NEXT: vmslt.vv v14, v8, v10
871 ; CHECK-NEXT: vmv1r.v v8, v0
872 ; CHECK-NEXT: vmv1r.v v0, v14
873 ; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
874 ; CHECK-NEXT: vmv1r.v v0, v8
877 %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
878 <vscale x 2 x i64> %1,
879 <vscale x 2 x i64> %2,
881 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
882 <vscale x 2 x i1> %0,
883 <vscale x 2 x i64> %2,
884 <vscale x 2 x i64> %3,
885 <vscale x 2 x i1> %mask,
888 ret <vscale x 2 x i1> %a
891 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
896 define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
897 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i64_nxv4i64:
898 ; CHECK: # %bb.0: # %entry
899 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
900 ; CHECK-NEXT: vmslt.vv v0, v8, v12
903 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
904 <vscale x 4 x i64> %0,
905 <vscale x 4 x i64> %1,
908 ret <vscale x 4 x i1> %a
911 declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
918 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
919 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64:
920 ; CHECK: # %bb.0: # %entry
921 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
922 ; CHECK-NEXT: vmslt.vv v20, v8, v12
923 ; CHECK-NEXT: vmv1r.v v8, v0
924 ; CHECK-NEXT: vmv1r.v v0, v20
925 ; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
926 ; CHECK-NEXT: vmv1r.v v0, v8
929 %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
930 <vscale x 4 x i64> %1,
931 <vscale x 4 x i64> %2,
933 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
934 <vscale x 4 x i1> %0,
935 <vscale x 4 x i64> %2,
936 <vscale x 4 x i64> %3,
937 <vscale x 4 x i1> %mask,
940 ret <vscale x 4 x i1> %a
943 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
948 define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
949 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8:
950 ; CHECK: # %bb.0: # %entry
951 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
952 ; CHECK-NEXT: vmslt.vx v0, v8, a0
955 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
956 <vscale x 1 x i8> %0,
960 ret <vscale x 1 x i1> %a
963 declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
970 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
971 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
972 ; CHECK: # %bb.0: # %entry
973 ; CHECK-NEXT: vmv1r.v v10, v0
974 ; CHECK-NEXT: vmv1r.v v0, v9
975 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
976 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
977 ; CHECK-NEXT: vmv1r.v v0, v10
980 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
981 <vscale x 1 x i1> %0,
982 <vscale x 1 x i8> %1,
984 <vscale x 1 x i1> %3,
987 ret <vscale x 1 x i1> %a
990 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
995 define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
996 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8:
997 ; CHECK: # %bb.0: # %entry
998 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
999 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1002 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
1003 <vscale x 2 x i8> %0,
1007 ret <vscale x 2 x i1> %a
1010 declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
1017 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1018 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
1019 ; CHECK: # %bb.0: # %entry
1020 ; CHECK-NEXT: vmv1r.v v10, v0
1021 ; CHECK-NEXT: vmv1r.v v0, v9
1022 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1023 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1024 ; CHECK-NEXT: vmv1r.v v0, v10
1027 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
1028 <vscale x 2 x i1> %0,
1029 <vscale x 2 x i8> %1,
1031 <vscale x 2 x i1> %3,
1034 ret <vscale x 2 x i1> %a
1037 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
1042 define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1043 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8:
1044 ; CHECK: # %bb.0: # %entry
1045 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1046 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1049 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
1050 <vscale x 4 x i8> %0,
1054 ret <vscale x 4 x i1> %a
1057 declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
1064 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1065 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
1066 ; CHECK: # %bb.0: # %entry
1067 ; CHECK-NEXT: vmv1r.v v10, v0
1068 ; CHECK-NEXT: vmv1r.v v0, v9
1069 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1070 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1071 ; CHECK-NEXT: vmv1r.v v0, v10
1074 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
1075 <vscale x 4 x i1> %0,
1076 <vscale x 4 x i8> %1,
1078 <vscale x 4 x i1> %3,
1081 ret <vscale x 4 x i1> %a
1084 declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
1089 define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1090 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8:
1091 ; CHECK: # %bb.0: # %entry
1092 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1093 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1096 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
1097 <vscale x 8 x i8> %0,
1101 ret <vscale x 8 x i1> %a
1104 declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
1111 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1112 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
1113 ; CHECK: # %bb.0: # %entry
1114 ; CHECK-NEXT: vmv1r.v v10, v0
1115 ; CHECK-NEXT: vmv1r.v v0, v9
1116 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1117 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1118 ; CHECK-NEXT: vmv.v.v v0, v10
1121 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
1122 <vscale x 8 x i1> %0,
1123 <vscale x 8 x i8> %1,
1125 <vscale x 8 x i1> %3,
1128 ret <vscale x 8 x i1> %a
1131 declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
1136 define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1137 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8:
1138 ; CHECK: # %bb.0: # %entry
1139 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1140 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1143 %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
1144 <vscale x 16 x i8> %0,
1148 ret <vscale x 16 x i1> %a
1151 declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
1158 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1159 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
1160 ; CHECK: # %bb.0: # %entry
1161 ; CHECK-NEXT: vmv1r.v v11, v0
1162 ; CHECK-NEXT: vmv1r.v v0, v10
1163 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1164 ; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
1165 ; CHECK-NEXT: vmv1r.v v0, v11
1168 %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
1169 <vscale x 16 x i1> %0,
1170 <vscale x 16 x i8> %1,
1172 <vscale x 16 x i1> %3,
1175 ret <vscale x 16 x i1> %a
1178 declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
1183 define <vscale x 32 x i1> @intrinsic_vmslt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1184 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8:
1185 ; CHECK: # %bb.0: # %entry
1186 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1187 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1190 %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
1191 <vscale x 32 x i8> %0,
1195 ret <vscale x 32 x i1> %a
1198 declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
1205 define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1206 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
1207 ; CHECK: # %bb.0: # %entry
1208 ; CHECK-NEXT: vmv1r.v v13, v0
1209 ; CHECK-NEXT: vmv1r.v v0, v12
1210 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1211 ; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
1212 ; CHECK-NEXT: vmv1r.v v0, v13
1215 %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
1216 <vscale x 32 x i1> %0,
1217 <vscale x 32 x i8> %1,
1219 <vscale x 32 x i1> %3,
1222 ret <vscale x 32 x i1> %a
1225 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
1230 define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1231 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16:
1232 ; CHECK: # %bb.0: # %entry
1233 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1234 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1237 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
1238 <vscale x 1 x i16> %0,
1242 ret <vscale x 1 x i1> %a
1245 declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
1252 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1253 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
1254 ; CHECK: # %bb.0: # %entry
1255 ; CHECK-NEXT: vmv1r.v v10, v0
1256 ; CHECK-NEXT: vmv1r.v v0, v9
1257 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1258 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1259 ; CHECK-NEXT: vmv1r.v v0, v10
1262 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
1263 <vscale x 1 x i1> %0,
1264 <vscale x 1 x i16> %1,
1266 <vscale x 1 x i1> %3,
1269 ret <vscale x 1 x i1> %a
1272 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
1277 define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1278 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16:
1279 ; CHECK: # %bb.0: # %entry
1280 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1281 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1284 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
1285 <vscale x 2 x i16> %0,
1289 ret <vscale x 2 x i1> %a
1292 declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
1299 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1300 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
1301 ; CHECK: # %bb.0: # %entry
1302 ; CHECK-NEXT: vmv1r.v v10, v0
1303 ; CHECK-NEXT: vmv1r.v v0, v9
1304 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1305 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1306 ; CHECK-NEXT: vmv1r.v v0, v10
1309 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
1310 <vscale x 2 x i1> %0,
1311 <vscale x 2 x i16> %1,
1313 <vscale x 2 x i1> %3,
1316 ret <vscale x 2 x i1> %a
1319 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
1324 define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1325 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16:
1326 ; CHECK: # %bb.0: # %entry
1327 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1328 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1331 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
1332 <vscale x 4 x i16> %0,
1336 ret <vscale x 4 x i1> %a
1339 declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
1346 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1347 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
1348 ; CHECK: # %bb.0: # %entry
1349 ; CHECK-NEXT: vmv1r.v v10, v0
1350 ; CHECK-NEXT: vmv1r.v v0, v9
1351 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1352 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1353 ; CHECK-NEXT: vmv.v.v v0, v10
1356 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
1357 <vscale x 4 x i1> %0,
1358 <vscale x 4 x i16> %1,
1360 <vscale x 4 x i1> %3,
1363 ret <vscale x 4 x i1> %a
1366 declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
1371 define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1372 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16:
1373 ; CHECK: # %bb.0: # %entry
1374 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1375 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1378 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
1379 <vscale x 8 x i16> %0,
1383 ret <vscale x 8 x i1> %a
1386 declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
1393 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1394 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
1395 ; CHECK: # %bb.0: # %entry
1396 ; CHECK-NEXT: vmv1r.v v11, v0
1397 ; CHECK-NEXT: vmv1r.v v0, v10
1398 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1399 ; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
1400 ; CHECK-NEXT: vmv1r.v v0, v11
1403 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
1404 <vscale x 8 x i1> %0,
1405 <vscale x 8 x i16> %1,
1407 <vscale x 8 x i1> %3,
1410 ret <vscale x 8 x i1> %a
1413 declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
1414 <vscale x 16 x i16>,
1418 define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1419 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16:
1420 ; CHECK: # %bb.0: # %entry
1421 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1422 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1425 %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
1426 <vscale x 16 x i16> %0,
1430 ret <vscale x 16 x i1> %a
1433 declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
1435 <vscale x 16 x i16>,
1440 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1441 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
1442 ; CHECK: # %bb.0: # %entry
1443 ; CHECK-NEXT: vmv1r.v v13, v0
1444 ; CHECK-NEXT: vmv1r.v v0, v12
1445 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1446 ; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
1447 ; CHECK-NEXT: vmv1r.v v0, v13
1450 %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
1451 <vscale x 16 x i1> %0,
1452 <vscale x 16 x i16> %1,
1454 <vscale x 16 x i1> %3,
1457 ret <vscale x 16 x i1> %a
1460 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
1465 define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1466 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32:
1467 ; CHECK: # %bb.0: # %entry
1468 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1469 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1472 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
1473 <vscale x 1 x i32> %0,
1477 ret <vscale x 1 x i1> %a
1480 declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
1487 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1488 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
1489 ; CHECK: # %bb.0: # %entry
1490 ; CHECK-NEXT: vmv1r.v v10, v0
1491 ; CHECK-NEXT: vmv1r.v v0, v9
1492 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1493 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1494 ; CHECK-NEXT: vmv1r.v v0, v10
1497 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
1498 <vscale x 1 x i1> %0,
1499 <vscale x 1 x i32> %1,
1501 <vscale x 1 x i1> %3,
1504 ret <vscale x 1 x i1> %a
1507 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
1512 define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1513 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32:
1514 ; CHECK: # %bb.0: # %entry
1515 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1516 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1519 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
1520 <vscale x 2 x i32> %0,
1524 ret <vscale x 2 x i1> %a
1527 declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
1534 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1535 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
1536 ; CHECK: # %bb.0: # %entry
1537 ; CHECK-NEXT: vmv1r.v v10, v0
1538 ; CHECK-NEXT: vmv1r.v v0, v9
1539 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1540 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1541 ; CHECK-NEXT: vmv.v.v v0, v10
1544 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
1545 <vscale x 2 x i1> %0,
1546 <vscale x 2 x i32> %1,
1548 <vscale x 2 x i1> %3,
1551 ret <vscale x 2 x i1> %a
1554 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
1559 define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1560 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32:
1561 ; CHECK: # %bb.0: # %entry
1562 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1563 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1566 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
1567 <vscale x 4 x i32> %0,
1571 ret <vscale x 4 x i1> %a
1574 declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
1581 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1582 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
1583 ; CHECK: # %bb.0: # %entry
1584 ; CHECK-NEXT: vmv1r.v v11, v0
1585 ; CHECK-NEXT: vmv1r.v v0, v10
1586 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1587 ; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
1588 ; CHECK-NEXT: vmv1r.v v0, v11
1591 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
1592 <vscale x 4 x i1> %0,
1593 <vscale x 4 x i32> %1,
1595 <vscale x 4 x i1> %3,
1598 ret <vscale x 4 x i1> %a
1601 declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
1606 define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1607 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32:
1608 ; CHECK: # %bb.0: # %entry
1609 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1610 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1613 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
1614 <vscale x 8 x i32> %0,
1618 ret <vscale x 8 x i1> %a
1621 declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
1628 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1629 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
1630 ; CHECK: # %bb.0: # %entry
1631 ; CHECK-NEXT: vmv1r.v v13, v0
1632 ; CHECK-NEXT: vmv1r.v v0, v12
1633 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1634 ; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
1635 ; CHECK-NEXT: vmv1r.v v0, v13
1638 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
1639 <vscale x 8 x i1> %0,
1640 <vscale x 8 x i32> %1,
1642 <vscale x 8 x i1> %3,
1645 ret <vscale x 8 x i1> %a
1648 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
1653 define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1654 ; RV32-LABEL: intrinsic_vmslt_vx_nxv1i64_i64:
1655 ; RV32: # %bb.0: # %entry
1656 ; RV32-NEXT: addi sp, sp, -16
1657 ; RV32-NEXT: sw a1, 12(sp)
1658 ; RV32-NEXT: sw a0, 8(sp)
1659 ; RV32-NEXT: addi a0, sp, 8
1660 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1661 ; RV32-NEXT: vlse64.v v9, (a0), zero
1662 ; RV32-NEXT: vmslt.vv v0, v8, v9
1663 ; RV32-NEXT: addi sp, sp, 16
1666 ; RV64-LABEL: intrinsic_vmslt_vx_nxv1i64_i64:
1667 ; RV64: # %bb.0: # %entry
1668 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1669 ; RV64-NEXT: vmslt.vx v0, v8, a0
1672 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
1673 <vscale x 1 x i64> %0,
1677 ret <vscale x 1 x i1> %a
1680 declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
1687 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1688 ; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
1689 ; RV32: # %bb.0: # %entry
1690 ; RV32-NEXT: addi sp, sp, -16
1691 ; RV32-NEXT: sw a1, 12(sp)
1692 ; RV32-NEXT: sw a0, 8(sp)
1693 ; RV32-NEXT: addi a0, sp, 8
1694 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1695 ; RV32-NEXT: vlse64.v v11, (a0), zero
1696 ; RV32-NEXT: vmv1r.v v10, v0
1697 ; RV32-NEXT: vmv1r.v v0, v9
1698 ; RV32-NEXT: vmslt.vv v10, v8, v11, v0.t
1699 ; RV32-NEXT: vmv.v.v v0, v10
1700 ; RV32-NEXT: addi sp, sp, 16
1703 ; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
1704 ; RV64: # %bb.0: # %entry
1705 ; RV64-NEXT: vmv1r.v v10, v0
1706 ; RV64-NEXT: vmv1r.v v0, v9
1707 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1708 ; RV64-NEXT: vmslt.vx v10, v8, a0, v0.t
1709 ; RV64-NEXT: vmv.v.v v0, v10
1712 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
1713 <vscale x 1 x i1> %0,
1714 <vscale x 1 x i64> %1,
1716 <vscale x 1 x i1> %3,
1719 ret <vscale x 1 x i1> %a
1722 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
1727 define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1728 ; RV32-LABEL: intrinsic_vmslt_vx_nxv2i64_i64:
1729 ; RV32: # %bb.0: # %entry
1730 ; RV32-NEXT: addi sp, sp, -16
1731 ; RV32-NEXT: sw a1, 12(sp)
1732 ; RV32-NEXT: sw a0, 8(sp)
1733 ; RV32-NEXT: addi a0, sp, 8
1734 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1735 ; RV32-NEXT: vlse64.v v10, (a0), zero
1736 ; RV32-NEXT: vmslt.vv v0, v8, v10
1737 ; RV32-NEXT: addi sp, sp, 16
1740 ; RV64-LABEL: intrinsic_vmslt_vx_nxv2i64_i64:
1741 ; RV64: # %bb.0: # %entry
1742 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1743 ; RV64-NEXT: vmslt.vx v0, v8, a0
1746 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
1747 <vscale x 2 x i64> %0,
1751 ret <vscale x 2 x i1> %a
1754 declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
1761 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1762 ; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
1763 ; RV32: # %bb.0: # %entry
1764 ; RV32-NEXT: addi sp, sp, -16
1765 ; RV32-NEXT: sw a1, 12(sp)
1766 ; RV32-NEXT: sw a0, 8(sp)
1767 ; RV32-NEXT: addi a0, sp, 8
1768 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
1769 ; RV32-NEXT: vlse64.v v12, (a0), zero
1770 ; RV32-NEXT: vmv1r.v v11, v0
1771 ; RV32-NEXT: vmv1r.v v0, v10
1772 ; RV32-NEXT: vmslt.vv v11, v8, v12, v0.t
1773 ; RV32-NEXT: vmv1r.v v0, v11
1774 ; RV32-NEXT: addi sp, sp, 16
1777 ; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
1778 ; RV64: # %bb.0: # %entry
1779 ; RV64-NEXT: vmv1r.v v11, v0
1780 ; RV64-NEXT: vmv1r.v v0, v10
1781 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
1782 ; RV64-NEXT: vmslt.vx v11, v8, a0, v0.t
1783 ; RV64-NEXT: vmv1r.v v0, v11
1786 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
1787 <vscale x 2 x i1> %0,
1788 <vscale x 2 x i64> %1,
1790 <vscale x 2 x i1> %3,
1793 ret <vscale x 2 x i1> %a
1796 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
1801 define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
1802 ; RV32-LABEL: intrinsic_vmslt_vx_nxv4i64_i64:
1803 ; RV32: # %bb.0: # %entry
1804 ; RV32-NEXT: addi sp, sp, -16
1805 ; RV32-NEXT: sw a1, 12(sp)
1806 ; RV32-NEXT: sw a0, 8(sp)
1807 ; RV32-NEXT: addi a0, sp, 8
1808 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1809 ; RV32-NEXT: vlse64.v v12, (a0), zero
1810 ; RV32-NEXT: vmslt.vv v0, v8, v12
1811 ; RV32-NEXT: addi sp, sp, 16
1814 ; RV64-LABEL: intrinsic_vmslt_vx_nxv4i64_i64:
1815 ; RV64: # %bb.0: # %entry
1816 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1817 ; RV64-NEXT: vmslt.vx v0, v8, a0
1820 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
1821 <vscale x 4 x i64> %0,
1825 ret <vscale x 4 x i1> %a
1828 declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
1835 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1836 ; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
1837 ; RV32: # %bb.0: # %entry
1838 ; RV32-NEXT: addi sp, sp, -16
1839 ; RV32-NEXT: sw a1, 12(sp)
1840 ; RV32-NEXT: sw a0, 8(sp)
1841 ; RV32-NEXT: addi a0, sp, 8
1842 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
1843 ; RV32-NEXT: vlse64.v v16, (a0), zero
1844 ; RV32-NEXT: vmv1r.v v13, v0
1845 ; RV32-NEXT: vmv1r.v v0, v12
1846 ; RV32-NEXT: vmslt.vv v13, v8, v16, v0.t
1847 ; RV32-NEXT: vmv1r.v v0, v13
1848 ; RV32-NEXT: addi sp, sp, 16
1851 ; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
1852 ; RV64: # %bb.0: # %entry
1853 ; RV64-NEXT: vmv1r.v v13, v0
1854 ; RV64-NEXT: vmv1r.v v0, v12
1855 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
1856 ; RV64-NEXT: vmslt.vx v13, v8, a0, v0.t
1857 ; RV64-NEXT: vmv1r.v v0, v13
1860 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
1861 <vscale x 4 x i1> %0,
1862 <vscale x 4 x i64> %1,
1864 <vscale x 4 x i1> %3,
1867 ret <vscale x 4 x i1> %a
1870 define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
1871 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8:
1872 ; CHECK: # %bb.0: # %entry
1873 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1874 ; CHECK-NEXT: vmsle.vi v0, v8, -16
1877 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
1878 <vscale x 1 x i8> %0,
1882 ret <vscale x 1 x i1> %a
1885 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1886 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8:
1887 ; CHECK: # %bb.0: # %entry
1888 ; CHECK-NEXT: vmv1r.v v10, v0
1889 ; CHECK-NEXT: vmv1r.v v0, v9
1890 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
1891 ; CHECK-NEXT: vmsle.vi v10, v8, -15, v0.t
1892 ; CHECK-NEXT: vmv1r.v v0, v10
1895 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
1896 <vscale x 1 x i1> %0,
1897 <vscale x 1 x i8> %1,
1899 <vscale x 1 x i1> %2,
1902 ret <vscale x 1 x i1> %a
1905 define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
1906 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8:
1907 ; CHECK: # %bb.0: # %entry
1908 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1909 ; CHECK-NEXT: vmsle.vi v0, v8, -14
1912 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
1913 <vscale x 2 x i8> %0,
1917 ret <vscale x 2 x i1> %a
1920 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1921 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8:
1922 ; CHECK: # %bb.0: # %entry
1923 ; CHECK-NEXT: vmv1r.v v10, v0
1924 ; CHECK-NEXT: vmv1r.v v0, v9
1925 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
1926 ; CHECK-NEXT: vmsle.vi v10, v8, -13, v0.t
1927 ; CHECK-NEXT: vmv1r.v v0, v10
1930 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
1931 <vscale x 2 x i1> %0,
1932 <vscale x 2 x i8> %1,
1934 <vscale x 2 x i1> %2,
1937 ret <vscale x 2 x i1> %a
1940 define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
1941 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8:
1942 ; CHECK: # %bb.0: # %entry
1943 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1944 ; CHECK-NEXT: vmsle.vi v0, v8, -12
1947 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
1948 <vscale x 4 x i8> %0,
1952 ret <vscale x 4 x i1> %a
1955 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1956 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8:
1957 ; CHECK: # %bb.0: # %entry
1958 ; CHECK-NEXT: vmv1r.v v10, v0
1959 ; CHECK-NEXT: vmv1r.v v0, v9
1960 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
1961 ; CHECK-NEXT: vmsle.vi v10, v8, -11, v0.t
1962 ; CHECK-NEXT: vmv1r.v v0, v10
1965 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
1966 <vscale x 4 x i1> %0,
1967 <vscale x 4 x i8> %1,
1969 <vscale x 4 x i1> %2,
1972 ret <vscale x 4 x i1> %a
1975 define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
1976 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8:
1977 ; CHECK: # %bb.0: # %entry
1978 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1979 ; CHECK-NEXT: vmsle.vi v0, v8, -10
1982 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
1983 <vscale x 8 x i8> %0,
1987 ret <vscale x 8 x i1> %a
1990 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1991 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8:
1992 ; CHECK: # %bb.0: # %entry
1993 ; CHECK-NEXT: vmv1r.v v10, v0
1994 ; CHECK-NEXT: vmv1r.v v0, v9
1995 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
1996 ; CHECK-NEXT: vmsle.vi v10, v8, -9, v0.t
1997 ; CHECK-NEXT: vmv.v.v v0, v10
2000 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
2001 <vscale x 8 x i1> %0,
2002 <vscale x 8 x i8> %1,
2004 <vscale x 8 x i1> %2,
2007 ret <vscale x 8 x i1> %a
2010 define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
2011 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8:
2012 ; CHECK: # %bb.0: # %entry
2013 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2014 ; CHECK-NEXT: vmsle.vi v0, v8, -8
2017 %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
2018 <vscale x 16 x i8> %0,
2022 ret <vscale x 16 x i1> %a
2025 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2026 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8:
2027 ; CHECK: # %bb.0: # %entry
2028 ; CHECK-NEXT: vmv1r.v v11, v0
2029 ; CHECK-NEXT: vmv1r.v v0, v10
2030 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
2031 ; CHECK-NEXT: vmsle.vi v11, v8, -7, v0.t
2032 ; CHECK-NEXT: vmv1r.v v0, v11
2035 %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
2036 <vscale x 16 x i1> %0,
2037 <vscale x 16 x i8> %1,
2039 <vscale x 16 x i1> %2,
2042 ret <vscale x 16 x i1> %a
2045 define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
2046 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8:
2047 ; CHECK: # %bb.0: # %entry
2048 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2049 ; CHECK-NEXT: vmsle.vi v0, v8, -6
2052 %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
2053 <vscale x 32 x i8> %0,
2057 ret <vscale x 32 x i1> %a
2060 define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2061 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8:
2062 ; CHECK: # %bb.0: # %entry
2063 ; CHECK-NEXT: vmv1r.v v13, v0
2064 ; CHECK-NEXT: vmv1r.v v0, v12
2065 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
2066 ; CHECK-NEXT: vmsle.vi v13, v8, -5, v0.t
2067 ; CHECK-NEXT: vmv1r.v v0, v13
2070 %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
2071 <vscale x 32 x i1> %0,
2072 <vscale x 32 x i8> %1,
2074 <vscale x 32 x i1> %2,
2077 ret <vscale x 32 x i1> %a
2080 define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
2081 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16:
2082 ; CHECK: # %bb.0: # %entry
2083 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2084 ; CHECK-NEXT: vmsle.vi v0, v8, -4
2087 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
2088 <vscale x 1 x i16> %0,
2092 ret <vscale x 1 x i1> %a
2095 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2096 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16:
2097 ; CHECK: # %bb.0: # %entry
2098 ; CHECK-NEXT: vmv1r.v v10, v0
2099 ; CHECK-NEXT: vmv1r.v v0, v9
2100 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
2101 ; CHECK-NEXT: vmsle.vi v10, v8, -3, v0.t
2102 ; CHECK-NEXT: vmv1r.v v0, v10
2105 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
2106 <vscale x 1 x i1> %0,
2107 <vscale x 1 x i16> %1,
2109 <vscale x 1 x i1> %2,
2112 ret <vscale x 1 x i1> %a
2115 define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
2116 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16:
2117 ; CHECK: # %bb.0: # %entry
2118 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2119 ; CHECK-NEXT: vmsle.vi v0, v8, -2
2122 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
2123 <vscale x 2 x i16> %0,
2127 ret <vscale x 2 x i1> %a
2130 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2131 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16:
2132 ; CHECK: # %bb.0: # %entry
2133 ; CHECK-NEXT: vmv1r.v v10, v0
2134 ; CHECK-NEXT: vmv1r.v v0, v9
2135 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
2136 ; CHECK-NEXT: vmslt.vx v10, v8, zero, v0.t
2137 ; CHECK-NEXT: vmv1r.v v0, v10
2140 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
2141 <vscale x 2 x i1> %0,
2142 <vscale x 2 x i16> %1,
2144 <vscale x 2 x i1> %2,
2147 ret <vscale x 2 x i1> %a
2150 define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2151 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16:
2152 ; CHECK: # %bb.0: # %entry
2153 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2154 ; CHECK-NEXT: vmslt.vx v0, v8, zero
2157 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
2158 <vscale x 4 x i16> %0,
2162 ret <vscale x 4 x i1> %a
2165 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2166 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16:
2167 ; CHECK: # %bb.0: # %entry
2168 ; CHECK-NEXT: vmv1r.v v10, v0
2169 ; CHECK-NEXT: vmv1r.v v0, v9
2170 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
2171 ; CHECK-NEXT: vmsle.vi v10, v8, 0, v0.t
2172 ; CHECK-NEXT: vmv.v.v v0, v10
2175 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
2176 <vscale x 4 x i1> %0,
2177 <vscale x 4 x i16> %1,
2179 <vscale x 4 x i1> %2,
2182 ret <vscale x 4 x i1> %a
2185 define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
2186 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16:
2187 ; CHECK: # %bb.0: # %entry
2188 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2189 ; CHECK-NEXT: vmsle.vi v0, v8, 1
2192 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
2193 <vscale x 8 x i16> %0,
2197 ret <vscale x 8 x i1> %a
2200 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2201 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16:
2202 ; CHECK: # %bb.0: # %entry
2203 ; CHECK-NEXT: vmv1r.v v11, v0
2204 ; CHECK-NEXT: vmv1r.v v0, v10
2205 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
2206 ; CHECK-NEXT: vmsle.vi v11, v8, 2, v0.t
2207 ; CHECK-NEXT: vmv1r.v v0, v11
2210 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
2211 <vscale x 8 x i1> %0,
2212 <vscale x 8 x i16> %1,
2214 <vscale x 8 x i1> %2,
2217 ret <vscale x 8 x i1> %a
2220 define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
2221 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16:
2222 ; CHECK: # %bb.0: # %entry
2223 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2224 ; CHECK-NEXT: vmsle.vi v0, v8, 3
2227 %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
2228 <vscale x 16 x i16> %0,
2232 ret <vscale x 16 x i1> %a
2235 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2236 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16:
2237 ; CHECK: # %bb.0: # %entry
2238 ; CHECK-NEXT: vmv1r.v v13, v0
2239 ; CHECK-NEXT: vmv1r.v v0, v12
2240 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
2241 ; CHECK-NEXT: vmsle.vi v13, v8, 4, v0.t
2242 ; CHECK-NEXT: vmv1r.v v0, v13
2245 %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
2246 <vscale x 16 x i1> %0,
2247 <vscale x 16 x i16> %1,
2249 <vscale x 16 x i1> %2,
2252 ret <vscale x 16 x i1> %a
2255 define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
2256 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32:
2257 ; CHECK: # %bb.0: # %entry
2258 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2259 ; CHECK-NEXT: vmsle.vi v0, v8, 5
2262 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
2263 <vscale x 1 x i32> %0,
2267 ret <vscale x 1 x i1> %a
2270 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2271 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32:
2272 ; CHECK: # %bb.0: # %entry
2273 ; CHECK-NEXT: vmv1r.v v10, v0
2274 ; CHECK-NEXT: vmv1r.v v0, v9
2275 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
2276 ; CHECK-NEXT: vmsle.vi v10, v8, 6, v0.t
2277 ; CHECK-NEXT: vmv1r.v v0, v10
2280 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
2281 <vscale x 1 x i1> %0,
2282 <vscale x 1 x i32> %1,
2284 <vscale x 1 x i1> %2,
2287 ret <vscale x 1 x i1> %a
2290 define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
2291 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32:
2292 ; CHECK: # %bb.0: # %entry
2293 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2294 ; CHECK-NEXT: vmsle.vi v0, v8, 7
2297 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
2298 <vscale x 2 x i32> %0,
2302 ret <vscale x 2 x i1> %a
2305 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2306 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32:
2307 ; CHECK: # %bb.0: # %entry
2308 ; CHECK-NEXT: vmv1r.v v10, v0
2309 ; CHECK-NEXT: vmv1r.v v0, v9
2310 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
2311 ; CHECK-NEXT: vmsle.vi v10, v8, 8, v0.t
2312 ; CHECK-NEXT: vmv.v.v v0, v10
2315 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
2316 <vscale x 2 x i1> %0,
2317 <vscale x 2 x i32> %1,
2319 <vscale x 2 x i1> %2,
2322 ret <vscale x 2 x i1> %a
2325 define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
2326 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32:
2327 ; CHECK: # %bb.0: # %entry
2328 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2329 ; CHECK-NEXT: vmsle.vi v0, v8, 9
2332 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
2333 <vscale x 4 x i32> %0,
2337 ret <vscale x 4 x i1> %a
2340 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2341 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32:
2342 ; CHECK: # %bb.0: # %entry
2343 ; CHECK-NEXT: vmv1r.v v11, v0
2344 ; CHECK-NEXT: vmv1r.v v0, v10
2345 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
2346 ; CHECK-NEXT: vmsle.vi v11, v8, 10, v0.t
2347 ; CHECK-NEXT: vmv1r.v v0, v11
2350 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
2351 <vscale x 4 x i1> %0,
2352 <vscale x 4 x i32> %1,
2354 <vscale x 4 x i1> %2,
2357 ret <vscale x 4 x i1> %a
2360 define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
2361 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32:
2362 ; CHECK: # %bb.0: # %entry
2363 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2364 ; CHECK-NEXT: vmsle.vi v0, v8, 11
2367 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
2368 <vscale x 8 x i32> %0,
2372 ret <vscale x 8 x i1> %a
2375 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2376 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32:
2377 ; CHECK: # %bb.0: # %entry
2378 ; CHECK-NEXT: vmv1r.v v13, v0
2379 ; CHECK-NEXT: vmv1r.v v0, v12
2380 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
2381 ; CHECK-NEXT: vmsle.vi v13, v8, 12, v0.t
2382 ; CHECK-NEXT: vmv1r.v v0, v13
2385 %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
2386 <vscale x 8 x i1> %0,
2387 <vscale x 8 x i32> %1,
2389 <vscale x 8 x i1> %2,
2392 ret <vscale x 8 x i1> %a
2395 define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
2396 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i64_i64:
2397 ; CHECK: # %bb.0: # %entry
2398 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2399 ; CHECK-NEXT: vmsle.vi v0, v8, 8
2402 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
2403 <vscale x 1 x i64> %0,
2407 ret <vscale x 1 x i1> %a
2410 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2411 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64:
2412 ; CHECK: # %bb.0: # %entry
2413 ; CHECK-NEXT: vmv1r.v v10, v0
2414 ; CHECK-NEXT: vmv1r.v v0, v9
2415 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
2416 ; CHECK-NEXT: vmsle.vi v10, v8, 8, v0.t
2417 ; CHECK-NEXT: vmv.v.v v0, v10
2420 %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
2421 <vscale x 1 x i1> %0,
2422 <vscale x 1 x i64> %1,
2424 <vscale x 1 x i1> %2,
2427 ret <vscale x 1 x i1> %a
2430 define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
2431 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i64_i64:
2432 ; CHECK: # %bb.0: # %entry
2433 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2434 ; CHECK-NEXT: vmsle.vi v0, v8, 8
2437 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
2438 <vscale x 2 x i64> %0,
2442 ret <vscale x 2 x i1> %a
2445 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2446 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64:
2447 ; CHECK: # %bb.0: # %entry
2448 ; CHECK-NEXT: vmv1r.v v11, v0
2449 ; CHECK-NEXT: vmv1r.v v0, v10
2450 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
2451 ; CHECK-NEXT: vmsle.vi v11, v8, 8, v0.t
2452 ; CHECK-NEXT: vmv1r.v v0, v11
2455 %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
2456 <vscale x 2 x i1> %0,
2457 <vscale x 2 x i64> %1,
2459 <vscale x 2 x i1> %2,
2462 ret <vscale x 2 x i1> %a
2465 define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
2466 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i64_i64:
2467 ; CHECK: # %bb.0: # %entry
2468 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2469 ; CHECK-NEXT: vmsle.vi v0, v8, 8
2472 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
2473 <vscale x 4 x i64> %0,
2477 ret <vscale x 4 x i1> %a
2480 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2481 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64:
2482 ; CHECK: # %bb.0: # %entry
2483 ; CHECK-NEXT: vmv1r.v v13, v0
2484 ; CHECK-NEXT: vmv1r.v v0, v12
2485 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
2486 ; CHECK-NEXT: vmsle.vi v13, v8, 8, v0.t
2487 ; CHECK-NEXT: vmv1r.v v0, v13
2490 %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
2491 <vscale x 4 x i1> %0,
2492 <vscale x 4 x i64> %1,
2494 <vscale x 4 x i1> %2,
2497 ret <vscale x 4 x i1> %a