1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
12 define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
13 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
16 ; CHECK-NEXT: vmsle.vv v0, v9, v8
19 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
24 ret <vscale x 1 x i1> %a
27 declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
34 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
35 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
36 ; CHECK: # %bb.0: # %entry
37 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
38 ; CHECK-NEXT: vmsle.vv v8, v9, v8
39 ; CHECK-NEXT: vmv1r.v v11, v0
40 ; CHECK-NEXT: vmv1r.v v0, v8
41 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
42 ; CHECK-NEXT: vmv1r.v v0, v11
45 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
49 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
53 <vscale x 1 x i1> %mask,
56 ret <vscale x 1 x i1> %a
59 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
64 define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
65 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8:
66 ; CHECK: # %bb.0: # %entry
67 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
68 ; CHECK-NEXT: vmsle.vv v0, v9, v8
71 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
76 ret <vscale x 2 x i1> %a
79 declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
86 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
87 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
88 ; CHECK: # %bb.0: # %entry
89 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
90 ; CHECK-NEXT: vmsle.vv v8, v9, v8
91 ; CHECK-NEXT: vmv1r.v v11, v0
92 ; CHECK-NEXT: vmv1r.v v0, v8
93 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
94 ; CHECK-NEXT: vmv1r.v v0, v11
97 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
101 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
102 <vscale x 2 x i1> %0,
103 <vscale x 2 x i8> %2,
104 <vscale x 2 x i8> %3,
105 <vscale x 2 x i1> %mask,
108 ret <vscale x 2 x i1> %a
111 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
116 define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
117 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8:
118 ; CHECK: # %bb.0: # %entry
119 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
120 ; CHECK-NEXT: vmsle.vv v0, v9, v8
123 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
124 <vscale x 4 x i8> %0,
125 <vscale x 4 x i8> %1,
128 ret <vscale x 4 x i1> %a
131 declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
138 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
139 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
140 ; CHECK: # %bb.0: # %entry
141 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
142 ; CHECK-NEXT: vmsle.vv v8, v9, v8
143 ; CHECK-NEXT: vmv1r.v v11, v0
144 ; CHECK-NEXT: vmv1r.v v0, v8
145 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
146 ; CHECK-NEXT: vmv1r.v v0, v11
149 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
150 <vscale x 4 x i8> %1,
151 <vscale x 4 x i8> %2,
153 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
154 <vscale x 4 x i1> %0,
155 <vscale x 4 x i8> %2,
156 <vscale x 4 x i8> %3,
157 <vscale x 4 x i1> %mask,
160 ret <vscale x 4 x i1> %a
163 declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
168 define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
169 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8:
170 ; CHECK: # %bb.0: # %entry
171 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
172 ; CHECK-NEXT: vmsle.vv v0, v9, v8
175 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
176 <vscale x 8 x i8> %0,
177 <vscale x 8 x i8> %1,
180 ret <vscale x 8 x i1> %a
183 declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
190 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
191 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
192 ; CHECK: # %bb.0: # %entry
193 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
194 ; CHECK-NEXT: vmsle.vv v8, v9, v8
195 ; CHECK-NEXT: vmv1r.v v11, v0
196 ; CHECK-NEXT: vmv.v.v v0, v8
197 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
198 ; CHECK-NEXT: vmv.v.v v0, v11
201 %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
202 <vscale x 8 x i8> %1,
203 <vscale x 8 x i8> %2,
205 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
206 <vscale x 8 x i1> %0,
207 <vscale x 8 x i8> %2,
208 <vscale x 8 x i8> %3,
209 <vscale x 8 x i1> %mask,
212 ret <vscale x 8 x i1> %a
215 declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
220 define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
221 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
224 ; CHECK-NEXT: vmsle.vv v0, v10, v8
227 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
228 <vscale x 16 x i8> %0,
229 <vscale x 16 x i8> %1,
232 ret <vscale x 16 x i1> %a
235 declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
242 define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
243 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
244 ; CHECK: # %bb.0: # %entry
245 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
246 ; CHECK-NEXT: vmsle.vv v14, v10, v8
247 ; CHECK-NEXT: vmv1r.v v8, v0
248 ; CHECK-NEXT: vmv1r.v v0, v14
249 ; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
250 ; CHECK-NEXT: vmv1r.v v0, v8
253 %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
254 <vscale x 16 x i8> %1,
255 <vscale x 16 x i8> %2,
257 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
258 <vscale x 16 x i1> %0,
259 <vscale x 16 x i8> %2,
260 <vscale x 16 x i8> %3,
261 <vscale x 16 x i1> %mask,
264 ret <vscale x 16 x i1> %a
267 declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
272 define <vscale x 32 x i1> @intrinsic_vmsge_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
273 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8:
274 ; CHECK: # %bb.0: # %entry
275 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
276 ; CHECK-NEXT: vmsle.vv v0, v12, v8
279 %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
280 <vscale x 32 x i8> %0,
281 <vscale x 32 x i8> %1,
284 ret <vscale x 32 x i1> %a
287 declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
294 define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
295 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
296 ; CHECK: # %bb.0: # %entry
297 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
298 ; CHECK-NEXT: vmsle.vv v20, v12, v8
299 ; CHECK-NEXT: vmv1r.v v8, v0
300 ; CHECK-NEXT: vmv1r.v v0, v20
301 ; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
302 ; CHECK-NEXT: vmv1r.v v0, v8
305 %mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
306 <vscale x 32 x i8> %1,
307 <vscale x 32 x i8> %2,
309 %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
310 <vscale x 32 x i1> %0,
311 <vscale x 32 x i8> %2,
312 <vscale x 32 x i8> %3,
313 <vscale x 32 x i1> %mask,
316 ret <vscale x 32 x i1> %a
319 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
324 define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
325 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16:
326 ; CHECK: # %bb.0: # %entry
327 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
328 ; CHECK-NEXT: vmsle.vv v0, v9, v8
331 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
332 <vscale x 1 x i16> %0,
333 <vscale x 1 x i16> %1,
336 ret <vscale x 1 x i1> %a
339 declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
346 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
347 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
348 ; CHECK: # %bb.0: # %entry
349 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
350 ; CHECK-NEXT: vmsle.vv v8, v9, v8
351 ; CHECK-NEXT: vmv1r.v v11, v0
352 ; CHECK-NEXT: vmv1r.v v0, v8
353 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
354 ; CHECK-NEXT: vmv1r.v v0, v11
357 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
358 <vscale x 1 x i16> %1,
359 <vscale x 1 x i16> %2,
361 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
362 <vscale x 1 x i1> %0,
363 <vscale x 1 x i16> %2,
364 <vscale x 1 x i16> %3,
365 <vscale x 1 x i1> %mask,
368 ret <vscale x 1 x i1> %a
371 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
376 define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
377 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16:
378 ; CHECK: # %bb.0: # %entry
379 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
380 ; CHECK-NEXT: vmsle.vv v0, v9, v8
383 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
384 <vscale x 2 x i16> %0,
385 <vscale x 2 x i16> %1,
388 ret <vscale x 2 x i1> %a
391 declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
398 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
399 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
400 ; CHECK: # %bb.0: # %entry
401 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
402 ; CHECK-NEXT: vmsle.vv v8, v9, v8
403 ; CHECK-NEXT: vmv1r.v v11, v0
404 ; CHECK-NEXT: vmv1r.v v0, v8
405 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
406 ; CHECK-NEXT: vmv1r.v v0, v11
409 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
410 <vscale x 2 x i16> %1,
411 <vscale x 2 x i16> %2,
413 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
414 <vscale x 2 x i1> %0,
415 <vscale x 2 x i16> %2,
416 <vscale x 2 x i16> %3,
417 <vscale x 2 x i1> %mask,
420 ret <vscale x 2 x i1> %a
423 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
428 define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
429 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16:
430 ; CHECK: # %bb.0: # %entry
431 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
432 ; CHECK-NEXT: vmsle.vv v0, v9, v8
435 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
436 <vscale x 4 x i16> %0,
437 <vscale x 4 x i16> %1,
440 ret <vscale x 4 x i1> %a
443 declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
450 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
451 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
452 ; CHECK: # %bb.0: # %entry
453 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
454 ; CHECK-NEXT: vmsle.vv v8, v9, v8
455 ; CHECK-NEXT: vmv1r.v v11, v0
456 ; CHECK-NEXT: vmv.v.v v0, v8
457 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
458 ; CHECK-NEXT: vmv.v.v v0, v11
461 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
462 <vscale x 4 x i16> %1,
463 <vscale x 4 x i16> %2,
465 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
466 <vscale x 4 x i1> %0,
467 <vscale x 4 x i16> %2,
468 <vscale x 4 x i16> %3,
469 <vscale x 4 x i1> %mask,
472 ret <vscale x 4 x i1> %a
475 declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
480 define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
481 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16:
482 ; CHECK: # %bb.0: # %entry
483 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
484 ; CHECK-NEXT: vmsle.vv v0, v10, v8
487 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
488 <vscale x 8 x i16> %0,
489 <vscale x 8 x i16> %1,
492 ret <vscale x 8 x i1> %a
495 declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
502 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
503 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
504 ; CHECK: # %bb.0: # %entry
505 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
506 ; CHECK-NEXT: vmsle.vv v14, v10, v8
507 ; CHECK-NEXT: vmv1r.v v8, v0
508 ; CHECK-NEXT: vmv1r.v v0, v14
509 ; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
510 ; CHECK-NEXT: vmv1r.v v0, v8
513 %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
514 <vscale x 8 x i16> %1,
515 <vscale x 8 x i16> %2,
517 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
518 <vscale x 8 x i1> %0,
519 <vscale x 8 x i16> %2,
520 <vscale x 8 x i16> %3,
521 <vscale x 8 x i1> %mask,
524 ret <vscale x 8 x i1> %a
527 declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
532 define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
533 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16:
534 ; CHECK: # %bb.0: # %entry
535 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
536 ; CHECK-NEXT: vmsle.vv v0, v12, v8
539 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
540 <vscale x 16 x i16> %0,
541 <vscale x 16 x i16> %1,
544 ret <vscale x 16 x i1> %a
547 declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
554 define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
555 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
556 ; CHECK: # %bb.0: # %entry
557 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
558 ; CHECK-NEXT: vmsle.vv v20, v12, v8
559 ; CHECK-NEXT: vmv1r.v v8, v0
560 ; CHECK-NEXT: vmv1r.v v0, v20
561 ; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
562 ; CHECK-NEXT: vmv1r.v v0, v8
565 %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
566 <vscale x 16 x i16> %1,
567 <vscale x 16 x i16> %2,
569 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
570 <vscale x 16 x i1> %0,
571 <vscale x 16 x i16> %2,
572 <vscale x 16 x i16> %3,
573 <vscale x 16 x i1> %mask,
576 ret <vscale x 16 x i1> %a
579 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
584 define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
585 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32:
586 ; CHECK: # %bb.0: # %entry
587 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
588 ; CHECK-NEXT: vmsle.vv v0, v9, v8
591 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
592 <vscale x 1 x i32> %0,
593 <vscale x 1 x i32> %1,
596 ret <vscale x 1 x i1> %a
599 declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
606 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
607 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
608 ; CHECK: # %bb.0: # %entry
609 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
610 ; CHECK-NEXT: vmsle.vv v8, v9, v8
611 ; CHECK-NEXT: vmv1r.v v11, v0
612 ; CHECK-NEXT: vmv1r.v v0, v8
613 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
614 ; CHECK-NEXT: vmv1r.v v0, v11
617 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
618 <vscale x 1 x i32> %1,
619 <vscale x 1 x i32> %2,
621 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
622 <vscale x 1 x i1> %0,
623 <vscale x 1 x i32> %2,
624 <vscale x 1 x i32> %3,
625 <vscale x 1 x i1> %mask,
628 ret <vscale x 1 x i1> %a
631 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
636 define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
637 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32:
638 ; CHECK: # %bb.0: # %entry
639 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
640 ; CHECK-NEXT: vmsle.vv v0, v9, v8
643 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
644 <vscale x 2 x i32> %0,
645 <vscale x 2 x i32> %1,
648 ret <vscale x 2 x i1> %a
651 declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
658 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
659 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
660 ; CHECK: # %bb.0: # %entry
661 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
662 ; CHECK-NEXT: vmsle.vv v8, v9, v8
663 ; CHECK-NEXT: vmv1r.v v11, v0
664 ; CHECK-NEXT: vmv.v.v v0, v8
665 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
666 ; CHECK-NEXT: vmv.v.v v0, v11
669 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
670 <vscale x 2 x i32> %1,
671 <vscale x 2 x i32> %2,
673 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
674 <vscale x 2 x i1> %0,
675 <vscale x 2 x i32> %2,
676 <vscale x 2 x i32> %3,
677 <vscale x 2 x i1> %mask,
680 ret <vscale x 2 x i1> %a
683 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
688 define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
689 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32:
690 ; CHECK: # %bb.0: # %entry
691 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
692 ; CHECK-NEXT: vmsle.vv v0, v10, v8
695 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
696 <vscale x 4 x i32> %0,
697 <vscale x 4 x i32> %1,
700 ret <vscale x 4 x i1> %a
703 declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
710 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
711 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
712 ; CHECK: # %bb.0: # %entry
713 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
714 ; CHECK-NEXT: vmsle.vv v14, v10, v8
715 ; CHECK-NEXT: vmv1r.v v8, v0
716 ; CHECK-NEXT: vmv1r.v v0, v14
717 ; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
718 ; CHECK-NEXT: vmv1r.v v0, v8
721 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
722 <vscale x 4 x i32> %1,
723 <vscale x 4 x i32> %2,
725 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
726 <vscale x 4 x i1> %0,
727 <vscale x 4 x i32> %2,
728 <vscale x 4 x i32> %3,
729 <vscale x 4 x i1> %mask,
732 ret <vscale x 4 x i1> %a
735 declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
740 define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
741 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32:
742 ; CHECK: # %bb.0: # %entry
743 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
744 ; CHECK-NEXT: vmsle.vv v0, v12, v8
747 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
748 <vscale x 8 x i32> %0,
749 <vscale x 8 x i32> %1,
752 ret <vscale x 8 x i1> %a
755 declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
762 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
763 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
764 ; CHECK: # %bb.0: # %entry
765 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
766 ; CHECK-NEXT: vmsle.vv v20, v12, v8
767 ; CHECK-NEXT: vmv1r.v v8, v0
768 ; CHECK-NEXT: vmv1r.v v0, v20
769 ; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
770 ; CHECK-NEXT: vmv1r.v v0, v8
773 %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
774 <vscale x 8 x i32> %1,
775 <vscale x 8 x i32> %2,
777 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
778 <vscale x 8 x i1> %0,
779 <vscale x 8 x i32> %2,
780 <vscale x 8 x i32> %3,
781 <vscale x 8 x i1> %mask,
784 ret <vscale x 8 x i1> %a
787 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
792 define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
793 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64:
794 ; CHECK: # %bb.0: # %entry
795 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
796 ; CHECK-NEXT: vmsle.vv v0, v9, v8
799 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
800 <vscale x 1 x i64> %0,
801 <vscale x 1 x i64> %1,
804 ret <vscale x 1 x i1> %a
807 declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
814 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
815 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
816 ; CHECK: # %bb.0: # %entry
817 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
818 ; CHECK-NEXT: vmsle.vv v8, v9, v8
819 ; CHECK-NEXT: vmv1r.v v11, v0
820 ; CHECK-NEXT: vmv.v.v v0, v8
821 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
822 ; CHECK-NEXT: vmv.v.v v0, v11
825 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
826 <vscale x 1 x i64> %1,
827 <vscale x 1 x i64> %2,
829 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
830 <vscale x 1 x i1> %0,
831 <vscale x 1 x i64> %2,
832 <vscale x 1 x i64> %3,
833 <vscale x 1 x i1> %mask,
836 ret <vscale x 1 x i1> %a
839 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
844 define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
845 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64:
846 ; CHECK: # %bb.0: # %entry
847 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
848 ; CHECK-NEXT: vmsle.vv v0, v10, v8
851 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
852 <vscale x 2 x i64> %0,
853 <vscale x 2 x i64> %1,
856 ret <vscale x 2 x i1> %a
859 declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
866 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
867 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
868 ; CHECK: # %bb.0: # %entry
869 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
870 ; CHECK-NEXT: vmsle.vv v14, v10, v8
871 ; CHECK-NEXT: vmv1r.v v8, v0
872 ; CHECK-NEXT: vmv1r.v v0, v14
873 ; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
874 ; CHECK-NEXT: vmv1r.v v0, v8
877 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
878 <vscale x 2 x i64> %1,
879 <vscale x 2 x i64> %2,
881 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
882 <vscale x 2 x i1> %0,
883 <vscale x 2 x i64> %2,
884 <vscale x 2 x i64> %3,
885 <vscale x 2 x i1> %mask,
888 ret <vscale x 2 x i1> %a
891 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
896 define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
897 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64:
898 ; CHECK: # %bb.0: # %entry
899 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
900 ; CHECK-NEXT: vmsle.vv v0, v12, v8
903 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
904 <vscale x 4 x i64> %0,
905 <vscale x 4 x i64> %1,
908 ret <vscale x 4 x i1> %a
911 declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
918 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
919 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
920 ; CHECK: # %bb.0: # %entry
921 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
922 ; CHECK-NEXT: vmsle.vv v20, v12, v8
923 ; CHECK-NEXT: vmv1r.v v8, v0
924 ; CHECK-NEXT: vmv1r.v v0, v20
925 ; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
926 ; CHECK-NEXT: vmv1r.v v0, v8
929 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
930 <vscale x 4 x i64> %1,
931 <vscale x 4 x i64> %2,
933 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
934 <vscale x 4 x i1> %0,
935 <vscale x 4 x i64> %2,
936 <vscale x 4 x i64> %3,
937 <vscale x 4 x i1> %mask,
940 ret <vscale x 4 x i1> %a
943 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
948 define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
949 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8:
950 ; CHECK: # %bb.0: # %entry
951 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
952 ; CHECK-NEXT: vmslt.vx v8, v8, a0
953 ; CHECK-NEXT: vmnot.m v0, v8
956 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
957 <vscale x 1 x i8> %0,
961 ret <vscale x 1 x i1> %a
964 declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
971 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
972 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
973 ; CHECK: # %bb.0: # %entry
974 ; CHECK-NEXT: vmv1r.v v10, v0
975 ; CHECK-NEXT: vmv1r.v v0, v9
976 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
977 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
978 ; CHECK-NEXT: vmxor.mm v0, v10, v9
981 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
982 <vscale x 1 x i1> %0,
983 <vscale x 1 x i8> %1,
985 <vscale x 1 x i1> %3,
988 ret <vscale x 1 x i1> %a
991 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
996 define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
997 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8:
998 ; CHECK: # %bb.0: # %entry
999 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1000 ; CHECK-NEXT: vmslt.vx v8, v8, a0
1001 ; CHECK-NEXT: vmnot.m v0, v8
1004 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
1005 <vscale x 2 x i8> %0,
1009 ret <vscale x 2 x i1> %a
1012 declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
1019 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1020 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
1021 ; CHECK: # %bb.0: # %entry
1022 ; CHECK-NEXT: vmv1r.v v10, v0
1023 ; CHECK-NEXT: vmv1r.v v0, v9
1024 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1025 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1026 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1029 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
1030 <vscale x 2 x i1> %0,
1031 <vscale x 2 x i8> %1,
1033 <vscale x 2 x i1> %3,
1036 ret <vscale x 2 x i1> %a
1039 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
1044 define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1045 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8:
1046 ; CHECK: # %bb.0: # %entry
1047 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1048 ; CHECK-NEXT: vmslt.vx v8, v8, a0
1049 ; CHECK-NEXT: vmnot.m v0, v8
1052 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
1053 <vscale x 4 x i8> %0,
1057 ret <vscale x 4 x i1> %a
1060 declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
1067 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1068 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
1069 ; CHECK: # %bb.0: # %entry
1070 ; CHECK-NEXT: vmv1r.v v10, v0
1071 ; CHECK-NEXT: vmv1r.v v0, v9
1072 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1073 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1074 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1077 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
1078 <vscale x 4 x i1> %0,
1079 <vscale x 4 x i8> %1,
1081 <vscale x 4 x i1> %3,
1084 ret <vscale x 4 x i1> %a
1087 declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
1092 define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1093 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8:
1094 ; CHECK: # %bb.0: # %entry
1095 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1096 ; CHECK-NEXT: vmslt.vx v8, v8, a0
1097 ; CHECK-NEXT: vmnot.m v0, v8
1100 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
1101 <vscale x 8 x i8> %0,
1105 ret <vscale x 8 x i1> %a
1108 declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
1115 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1116 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
1117 ; CHECK: # %bb.0: # %entry
1118 ; CHECK-NEXT: vmv1r.v v10, v0
1119 ; CHECK-NEXT: vmv1r.v v0, v9
1120 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1121 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1122 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1125 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
1126 <vscale x 8 x i1> %0,
1127 <vscale x 8 x i8> %1,
1129 <vscale x 8 x i1> %3,
1132 ret <vscale x 8 x i1> %a
1135 declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
1140 define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1141 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8:
1142 ; CHECK: # %bb.0: # %entry
1143 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1144 ; CHECK-NEXT: vmslt.vx v10, v8, a0
1145 ; CHECK-NEXT: vmnot.m v0, v10
1148 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
1149 <vscale x 16 x i8> %0,
1153 ret <vscale x 16 x i1> %a
1156 declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
1163 define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1164 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
1165 ; CHECK: # %bb.0: # %entry
1166 ; CHECK-NEXT: vmv1r.v v11, v0
1167 ; CHECK-NEXT: vmv1r.v v0, v10
1168 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1169 ; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
1170 ; CHECK-NEXT: vmxor.mm v0, v11, v10
1173 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
1174 <vscale x 16 x i1> %0,
1175 <vscale x 16 x i8> %1,
1177 <vscale x 16 x i1> %3,
1180 ret <vscale x 16 x i1> %a
1183 declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
1188 define <vscale x 32 x i1> @intrinsic_vmsge_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1189 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8:
1190 ; CHECK: # %bb.0: # %entry
1191 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1192 ; CHECK-NEXT: vmslt.vx v12, v8, a0
1193 ; CHECK-NEXT: vmnot.m v0, v12
1196 %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
1197 <vscale x 32 x i8> %0,
1201 ret <vscale x 32 x i1> %a
1204 declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
1211 define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1212 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
1213 ; CHECK: # %bb.0: # %entry
1214 ; CHECK-NEXT: vmv1r.v v13, v0
1215 ; CHECK-NEXT: vmv1r.v v0, v12
1216 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1217 ; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
1218 ; CHECK-NEXT: vmxor.mm v0, v13, v12
1221 %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
1222 <vscale x 32 x i1> %0,
1223 <vscale x 32 x i8> %1,
1225 <vscale x 32 x i1> %3,
1228 ret <vscale x 32 x i1> %a
1231 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
1236 define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1237 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16:
1238 ; CHECK: # %bb.0: # %entry
1239 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1240 ; CHECK-NEXT: vmslt.vx v8, v8, a0
1241 ; CHECK-NEXT: vmnot.m v0, v8
1244 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
1245 <vscale x 1 x i16> %0,
1249 ret <vscale x 1 x i1> %a
1252 declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
1259 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1260 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
1261 ; CHECK: # %bb.0: # %entry
1262 ; CHECK-NEXT: vmv1r.v v10, v0
1263 ; CHECK-NEXT: vmv1r.v v0, v9
1264 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1265 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1266 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1269 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
1270 <vscale x 1 x i1> %0,
1271 <vscale x 1 x i16> %1,
1273 <vscale x 1 x i1> %3,
1276 ret <vscale x 1 x i1> %a
1279 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
1284 define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1285 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16:
1286 ; CHECK: # %bb.0: # %entry
1287 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1288 ; CHECK-NEXT: vmslt.vx v8, v8, a0
1289 ; CHECK-NEXT: vmnot.m v0, v8
1292 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
1293 <vscale x 2 x i16> %0,
1297 ret <vscale x 2 x i1> %a
1300 declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
1307 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1308 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
1309 ; CHECK: # %bb.0: # %entry
1310 ; CHECK-NEXT: vmv1r.v v10, v0
1311 ; CHECK-NEXT: vmv1r.v v0, v9
1312 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1313 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1314 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1317 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
1318 <vscale x 2 x i1> %0,
1319 <vscale x 2 x i16> %1,
1321 <vscale x 2 x i1> %3,
1324 ret <vscale x 2 x i1> %a
1327 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
1332 define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1333 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16:
1334 ; CHECK: # %bb.0: # %entry
1335 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1336 ; CHECK-NEXT: vmslt.vx v8, v8, a0
1337 ; CHECK-NEXT: vmnot.m v0, v8
1340 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
1341 <vscale x 4 x i16> %0,
1345 ret <vscale x 4 x i1> %a
1348 declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
1355 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1356 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
1357 ; CHECK: # %bb.0: # %entry
1358 ; CHECK-NEXT: vmv1r.v v10, v0
1359 ; CHECK-NEXT: vmv1r.v v0, v9
1360 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1361 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1362 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1365 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
1366 <vscale x 4 x i1> %0,
1367 <vscale x 4 x i16> %1,
1369 <vscale x 4 x i1> %3,
1372 ret <vscale x 4 x i1> %a
1375 declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
1380 define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1381 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16:
1382 ; CHECK: # %bb.0: # %entry
1383 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1384 ; CHECK-NEXT: vmslt.vx v10, v8, a0
1385 ; CHECK-NEXT: vmnot.m v0, v10
1388 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
1389 <vscale x 8 x i16> %0,
1393 ret <vscale x 8 x i1> %a
1396 declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
1403 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1404 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
1405 ; CHECK: # %bb.0: # %entry
1406 ; CHECK-NEXT: vmv1r.v v11, v0
1407 ; CHECK-NEXT: vmv1r.v v0, v10
1408 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1409 ; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
1410 ; CHECK-NEXT: vmxor.mm v0, v11, v10
1413 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
1414 <vscale x 8 x i1> %0,
1415 <vscale x 8 x i16> %1,
1417 <vscale x 8 x i1> %3,
1420 ret <vscale x 8 x i1> %a
1423 declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
1424 <vscale x 16 x i16>,
1428 define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1429 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16:
1430 ; CHECK: # %bb.0: # %entry
1431 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1432 ; CHECK-NEXT: vmslt.vx v12, v8, a0
1433 ; CHECK-NEXT: vmnot.m v0, v12
1436 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
1437 <vscale x 16 x i16> %0,
1441 ret <vscale x 16 x i1> %a
1444 declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
1446 <vscale x 16 x i16>,
1451 define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1452 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
1453 ; CHECK: # %bb.0: # %entry
1454 ; CHECK-NEXT: vmv1r.v v13, v0
1455 ; CHECK-NEXT: vmv1r.v v0, v12
1456 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1457 ; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
1458 ; CHECK-NEXT: vmxor.mm v0, v13, v12
1461 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
1462 <vscale x 16 x i1> %0,
1463 <vscale x 16 x i16> %1,
1465 <vscale x 16 x i1> %3,
1468 ret <vscale x 16 x i1> %a
1471 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
1476 define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1477 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32:
1478 ; CHECK: # %bb.0: # %entry
1479 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1480 ; CHECK-NEXT: vmslt.vx v8, v8, a0
1481 ; CHECK-NEXT: vmnot.m v0, v8
1484 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
1485 <vscale x 1 x i32> %0,
1489 ret <vscale x 1 x i1> %a
1492 declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
1499 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1500 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
1501 ; CHECK: # %bb.0: # %entry
1502 ; CHECK-NEXT: vmv1r.v v10, v0
1503 ; CHECK-NEXT: vmv1r.v v0, v9
1504 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1505 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1506 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1509 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
1510 <vscale x 1 x i1> %0,
1511 <vscale x 1 x i32> %1,
1513 <vscale x 1 x i1> %3,
1516 ret <vscale x 1 x i1> %a
1519 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
1524 define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1525 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32:
1526 ; CHECK: # %bb.0: # %entry
1527 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1528 ; CHECK-NEXT: vmslt.vx v8, v8, a0
1529 ; CHECK-NEXT: vmnot.m v0, v8
1532 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
1533 <vscale x 2 x i32> %0,
1537 ret <vscale x 2 x i1> %a
1540 declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
1547 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1548 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
1549 ; CHECK: # %bb.0: # %entry
1550 ; CHECK-NEXT: vmv1r.v v10, v0
1551 ; CHECK-NEXT: vmv1r.v v0, v9
1552 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1553 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
1554 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1557 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
1558 <vscale x 2 x i1> %0,
1559 <vscale x 2 x i32> %1,
1561 <vscale x 2 x i1> %3,
1564 ret <vscale x 2 x i1> %a
1567 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
1572 define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1573 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32:
1574 ; CHECK: # %bb.0: # %entry
1575 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1576 ; CHECK-NEXT: vmslt.vx v10, v8, a0
1577 ; CHECK-NEXT: vmnot.m v0, v10
1580 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
1581 <vscale x 4 x i32> %0,
1585 ret <vscale x 4 x i1> %a
1588 declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
1595 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1596 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
1597 ; CHECK: # %bb.0: # %entry
1598 ; CHECK-NEXT: vmv1r.v v11, v0
1599 ; CHECK-NEXT: vmv1r.v v0, v10
1600 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1601 ; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
1602 ; CHECK-NEXT: vmxor.mm v0, v11, v10
1605 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
1606 <vscale x 4 x i1> %0,
1607 <vscale x 4 x i32> %1,
1609 <vscale x 4 x i1> %3,
1612 ret <vscale x 4 x i1> %a
1615 declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
1620 define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1621 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32:
1622 ; CHECK: # %bb.0: # %entry
1623 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1624 ; CHECK-NEXT: vmslt.vx v12, v8, a0
1625 ; CHECK-NEXT: vmnot.m v0, v12
1628 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
1629 <vscale x 8 x i32> %0,
1633 ret <vscale x 8 x i1> %a
1636 declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
1643 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1644 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
1645 ; CHECK: # %bb.0: # %entry
1646 ; CHECK-NEXT: vmv1r.v v13, v0
1647 ; CHECK-NEXT: vmv1r.v v0, v12
1648 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1649 ; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
1650 ; CHECK-NEXT: vmxor.mm v0, v13, v12
1653 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
1654 <vscale x 8 x i1> %0,
1655 <vscale x 8 x i32> %1,
1657 <vscale x 8 x i1> %3,
1660 ret <vscale x 8 x i1> %a
1663 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
1668 define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1669 ; RV32-LABEL: intrinsic_vmsge_vx_nxv1i64_i64:
1670 ; RV32: # %bb.0: # %entry
1671 ; RV32-NEXT: addi sp, sp, -16
1672 ; RV32-NEXT: sw a1, 12(sp)
1673 ; RV32-NEXT: sw a0, 8(sp)
1674 ; RV32-NEXT: addi a0, sp, 8
1675 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1676 ; RV32-NEXT: vlse64.v v9, (a0), zero
1677 ; RV32-NEXT: vmsle.vv v0, v9, v8
1678 ; RV32-NEXT: addi sp, sp, 16
1681 ; RV64-LABEL: intrinsic_vmsge_vx_nxv1i64_i64:
1682 ; RV64: # %bb.0: # %entry
1683 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1684 ; RV64-NEXT: vmslt.vx v8, v8, a0
1685 ; RV64-NEXT: vmnot.m v0, v8
1688 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
1689 <vscale x 1 x i64> %0,
1693 ret <vscale x 1 x i1> %a
1696 declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
1703 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1704 ; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
1705 ; RV32: # %bb.0: # %entry
1706 ; RV32-NEXT: addi sp, sp, -16
1707 ; RV32-NEXT: sw a1, 12(sp)
1708 ; RV32-NEXT: sw a0, 8(sp)
1709 ; RV32-NEXT: addi a0, sp, 8
1710 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1711 ; RV32-NEXT: vlse64.v v11, (a0), zero
1712 ; RV32-NEXT: vmv1r.v v10, v0
1713 ; RV32-NEXT: vmv1r.v v0, v9
1714 ; RV32-NEXT: vmsle.vv v10, v11, v8, v0.t
1715 ; RV32-NEXT: vmv.v.v v0, v10
1716 ; RV32-NEXT: addi sp, sp, 16
1719 ; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
1720 ; RV64: # %bb.0: # %entry
1721 ; RV64-NEXT: vmv1r.v v10, v0
1722 ; RV64-NEXT: vmv1r.v v0, v9
1723 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1724 ; RV64-NEXT: vmslt.vx v10, v8, a0, v0.t
1725 ; RV64-NEXT: vmxor.mm v0, v10, v9
1728 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
1729 <vscale x 1 x i1> %0,
1730 <vscale x 1 x i64> %1,
1732 <vscale x 1 x i1> %3,
1735 ret <vscale x 1 x i1> %a
1738 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
1743 define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1744 ; RV32-LABEL: intrinsic_vmsge_vx_nxv2i64_i64:
1745 ; RV32: # %bb.0: # %entry
1746 ; RV32-NEXT: addi sp, sp, -16
1747 ; RV32-NEXT: sw a1, 12(sp)
1748 ; RV32-NEXT: sw a0, 8(sp)
1749 ; RV32-NEXT: addi a0, sp, 8
1750 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1751 ; RV32-NEXT: vlse64.v v10, (a0), zero
1752 ; RV32-NEXT: vmsle.vv v0, v10, v8
1753 ; RV32-NEXT: addi sp, sp, 16
1756 ; RV64-LABEL: intrinsic_vmsge_vx_nxv2i64_i64:
1757 ; RV64: # %bb.0: # %entry
1758 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1759 ; RV64-NEXT: vmslt.vx v10, v8, a0
1760 ; RV64-NEXT: vmnot.m v0, v10
1763 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
1764 <vscale x 2 x i64> %0,
1768 ret <vscale x 2 x i1> %a
1771 declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
1778 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1779 ; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
1780 ; RV32: # %bb.0: # %entry
1781 ; RV32-NEXT: addi sp, sp, -16
1782 ; RV32-NEXT: sw a1, 12(sp)
1783 ; RV32-NEXT: sw a0, 8(sp)
1784 ; RV32-NEXT: addi a0, sp, 8
1785 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
1786 ; RV32-NEXT: vlse64.v v12, (a0), zero
1787 ; RV32-NEXT: vmv1r.v v11, v0
1788 ; RV32-NEXT: vmv1r.v v0, v10
1789 ; RV32-NEXT: vmsle.vv v11, v12, v8, v0.t
1790 ; RV32-NEXT: vmv1r.v v0, v11
1791 ; RV32-NEXT: addi sp, sp, 16
1794 ; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
1795 ; RV64: # %bb.0: # %entry
1796 ; RV64-NEXT: vmv1r.v v11, v0
1797 ; RV64-NEXT: vmv1r.v v0, v10
1798 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
1799 ; RV64-NEXT: vmslt.vx v11, v8, a0, v0.t
1800 ; RV64-NEXT: vmxor.mm v0, v11, v10
1803 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
1804 <vscale x 2 x i1> %0,
1805 <vscale x 2 x i64> %1,
1807 <vscale x 2 x i1> %3,
1810 ret <vscale x 2 x i1> %a
1813 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
1818 define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
1819 ; RV32-LABEL: intrinsic_vmsge_vx_nxv4i64_i64:
1820 ; RV32: # %bb.0: # %entry
1821 ; RV32-NEXT: addi sp, sp, -16
1822 ; RV32-NEXT: sw a1, 12(sp)
1823 ; RV32-NEXT: sw a0, 8(sp)
1824 ; RV32-NEXT: addi a0, sp, 8
1825 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1826 ; RV32-NEXT: vlse64.v v12, (a0), zero
1827 ; RV32-NEXT: vmsle.vv v0, v12, v8
1828 ; RV32-NEXT: addi sp, sp, 16
1831 ; RV64-LABEL: intrinsic_vmsge_vx_nxv4i64_i64:
1832 ; RV64: # %bb.0: # %entry
1833 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1834 ; RV64-NEXT: vmslt.vx v12, v8, a0
1835 ; RV64-NEXT: vmnot.m v0, v12
1838 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
1839 <vscale x 4 x i64> %0,
1843 ret <vscale x 4 x i1> %a
1846 declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
1853 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1854 ; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
1855 ; RV32: # %bb.0: # %entry
1856 ; RV32-NEXT: addi sp, sp, -16
1857 ; RV32-NEXT: sw a1, 12(sp)
1858 ; RV32-NEXT: sw a0, 8(sp)
1859 ; RV32-NEXT: addi a0, sp, 8
1860 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
1861 ; RV32-NEXT: vlse64.v v16, (a0), zero
1862 ; RV32-NEXT: vmv1r.v v13, v0
1863 ; RV32-NEXT: vmv1r.v v0, v12
1864 ; RV32-NEXT: vmsle.vv v13, v16, v8, v0.t
1865 ; RV32-NEXT: vmv1r.v v0, v13
1866 ; RV32-NEXT: addi sp, sp, 16
1869 ; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
1870 ; RV64: # %bb.0: # %entry
1871 ; RV64-NEXT: vmv1r.v v13, v0
1872 ; RV64-NEXT: vmv1r.v v0, v12
1873 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
1874 ; RV64-NEXT: vmslt.vx v13, v8, a0, v0.t
1875 ; RV64-NEXT: vmxor.mm v0, v13, v12
1878 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
1879 <vscale x 4 x i1> %0,
1880 <vscale x 4 x i64> %1,
1882 <vscale x 4 x i1> %3,
1885 ret <vscale x 4 x i1> %a
1888 define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
1889 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8:
1890 ; CHECK: # %bb.0: # %entry
1891 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1892 ; CHECK-NEXT: vmsgt.vi v0, v8, -16
1895 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
1896 <vscale x 1 x i8> %0,
1900 ret <vscale x 1 x i1> %a
1903 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1904 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
1905 ; CHECK: # %bb.0: # %entry
1906 ; CHECK-NEXT: vmv1r.v v10, v0
1907 ; CHECK-NEXT: vmv1r.v v0, v9
1908 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
1909 ; CHECK-NEXT: vmsgt.vi v10, v8, -15, v0.t
1910 ; CHECK-NEXT: vmv1r.v v0, v10
1913 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
1914 <vscale x 1 x i1> %0,
1915 <vscale x 1 x i8> %1,
1917 <vscale x 1 x i1> %2,
1920 ret <vscale x 1 x i1> %a
1923 define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
1924 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8:
1925 ; CHECK: # %bb.0: # %entry
1926 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1927 ; CHECK-NEXT: vmsgt.vi v0, v8, -14
1930 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
1931 <vscale x 2 x i8> %0,
1935 ret <vscale x 2 x i1> %a
1938 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1939 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
1940 ; CHECK: # %bb.0: # %entry
1941 ; CHECK-NEXT: vmv1r.v v10, v0
1942 ; CHECK-NEXT: vmv1r.v v0, v9
1943 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
1944 ; CHECK-NEXT: vmsgt.vi v10, v8, -13, v0.t
1945 ; CHECK-NEXT: vmv1r.v v0, v10
1948 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
1949 <vscale x 2 x i1> %0,
1950 <vscale x 2 x i8> %1,
1952 <vscale x 2 x i1> %2,
1955 ret <vscale x 2 x i1> %a
1958 define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
1959 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8:
1960 ; CHECK: # %bb.0: # %entry
1961 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1962 ; CHECK-NEXT: vmsgt.vi v0, v8, -12
1965 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
1966 <vscale x 4 x i8> %0,
1970 ret <vscale x 4 x i1> %a
1973 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1974 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
1975 ; CHECK: # %bb.0: # %entry
1976 ; CHECK-NEXT: vmv1r.v v10, v0
1977 ; CHECK-NEXT: vmv1r.v v0, v9
1978 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
1979 ; CHECK-NEXT: vmsgt.vi v10, v8, -11, v0.t
1980 ; CHECK-NEXT: vmv1r.v v0, v10
1983 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
1984 <vscale x 4 x i1> %0,
1985 <vscale x 4 x i8> %1,
1987 <vscale x 4 x i1> %2,
1990 ret <vscale x 4 x i1> %a
1993 define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
1994 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8:
1995 ; CHECK: # %bb.0: # %entry
1996 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1997 ; CHECK-NEXT: vmsgt.vi v0, v8, -10
2000 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
2001 <vscale x 8 x i8> %0,
2005 ret <vscale x 8 x i1> %a
2008 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2009 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
2010 ; CHECK: # %bb.0: # %entry
2011 ; CHECK-NEXT: vmv1r.v v10, v0
2012 ; CHECK-NEXT: vmv1r.v v0, v9
2013 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
2014 ; CHECK-NEXT: vmsgt.vi v10, v8, -9, v0.t
2015 ; CHECK-NEXT: vmv.v.v v0, v10
2018 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
2019 <vscale x 8 x i1> %0,
2020 <vscale x 8 x i8> %1,
2022 <vscale x 8 x i1> %2,
2025 ret <vscale x 8 x i1> %a
2028 define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
2029 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8:
2030 ; CHECK: # %bb.0: # %entry
2031 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2032 ; CHECK-NEXT: vmsgt.vi v0, v8, -8
2035 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
2036 <vscale x 16 x i8> %0,
2040 ret <vscale x 16 x i1> %a
2043 define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2044 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
2045 ; CHECK: # %bb.0: # %entry
2046 ; CHECK-NEXT: vmv1r.v v11, v0
2047 ; CHECK-NEXT: vmv1r.v v0, v10
2048 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
2049 ; CHECK-NEXT: vmsgt.vi v11, v8, -7, v0.t
2050 ; CHECK-NEXT: vmv1r.v v0, v11
2053 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
2054 <vscale x 16 x i1> %0,
2055 <vscale x 16 x i8> %1,
2057 <vscale x 16 x i1> %2,
2060 ret <vscale x 16 x i1> %a
2063 define <vscale x 32 x i1> @intrinsic_vmsge_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
2064 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8:
2065 ; CHECK: # %bb.0: # %entry
2066 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2067 ; CHECK-NEXT: vmsgt.vi v0, v8, -6
2070 %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
2071 <vscale x 32 x i8> %0,
2075 ret <vscale x 32 x i1> %a
2078 define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2079 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
2080 ; CHECK: # %bb.0: # %entry
2081 ; CHECK-NEXT: vmv1r.v v13, v0
2082 ; CHECK-NEXT: vmv1r.v v0, v12
2083 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
2084 ; CHECK-NEXT: vmsgt.vi v13, v8, -5, v0.t
2085 ; CHECK-NEXT: vmv1r.v v0, v13
2088 %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
2089 <vscale x 32 x i1> %0,
2090 <vscale x 32 x i8> %1,
2092 <vscale x 32 x i1> %2,
2095 ret <vscale x 32 x i1> %a
2098 define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
2099 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16:
2100 ; CHECK: # %bb.0: # %entry
2101 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2102 ; CHECK-NEXT: vmsgt.vi v0, v8, -4
2105 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
2106 <vscale x 1 x i16> %0,
2110 ret <vscale x 1 x i1> %a
2113 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2114 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
2115 ; CHECK: # %bb.0: # %entry
2116 ; CHECK-NEXT: vmv1r.v v10, v0
2117 ; CHECK-NEXT: vmv1r.v v0, v9
2118 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
2119 ; CHECK-NEXT: vmsgt.vi v10, v8, -3, v0.t
2120 ; CHECK-NEXT: vmv1r.v v0, v10
2123 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
2124 <vscale x 1 x i1> %0,
2125 <vscale x 1 x i16> %1,
2127 <vscale x 1 x i1> %2,
2130 ret <vscale x 1 x i1> %a
2133 define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
2134 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16:
2135 ; CHECK: # %bb.0: # %entry
2136 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2137 ; CHECK-NEXT: vmsgt.vi v0, v8, -2
2140 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
2141 <vscale x 2 x i16> %0,
2145 ret <vscale x 2 x i1> %a
2148 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2149 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
2150 ; CHECK: # %bb.0: # %entry
2151 ; CHECK-NEXT: vmv1r.v v10, v0
2152 ; CHECK-NEXT: vmv1r.v v0, v9
2153 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
2154 ; CHECK-NEXT: vmsgt.vi v10, v8, -1, v0.t
2155 ; CHECK-NEXT: vmv1r.v v0, v10
2158 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
2159 <vscale x 2 x i1> %0,
2160 <vscale x 2 x i16> %1,
2162 <vscale x 2 x i1> %2,
2165 ret <vscale x 2 x i1> %a
2168 define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2169 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16:
2170 ; CHECK: # %bb.0: # %entry
2171 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2172 ; CHECK-NEXT: vmsgt.vi v0, v8, -1
2175 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
2176 <vscale x 4 x i16> %0,
2180 ret <vscale x 4 x i1> %a
2183 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2184 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
2185 ; CHECK: # %bb.0: # %entry
2186 ; CHECK-NEXT: vmv1r.v v10, v0
2187 ; CHECK-NEXT: vmv1r.v v0, v9
2188 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
2189 ; CHECK-NEXT: vmsgt.vi v10, v8, 0, v0.t
2190 ; CHECK-NEXT: vmv.v.v v0, v10
2193 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
2194 <vscale x 4 x i1> %0,
2195 <vscale x 4 x i16> %1,
2197 <vscale x 4 x i1> %2,
2200 ret <vscale x 4 x i1> %a
2203 define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
2204 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16:
2205 ; CHECK: # %bb.0: # %entry
2206 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2207 ; CHECK-NEXT: vmsgt.vi v0, v8, 1
2210 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
2211 <vscale x 8 x i16> %0,
2215 ret <vscale x 8 x i1> %a
2218 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2219 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
2220 ; CHECK: # %bb.0: # %entry
2221 ; CHECK-NEXT: vmv1r.v v11, v0
2222 ; CHECK-NEXT: vmv1r.v v0, v10
2223 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
2224 ; CHECK-NEXT: vmsgt.vi v11, v8, 2, v0.t
2225 ; CHECK-NEXT: vmv1r.v v0, v11
2228 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
2229 <vscale x 8 x i1> %0,
2230 <vscale x 8 x i16> %1,
2232 <vscale x 8 x i1> %2,
2235 ret <vscale x 8 x i1> %a
2238 define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
2239 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16:
2240 ; CHECK: # %bb.0: # %entry
2241 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2242 ; CHECK-NEXT: vmsgt.vi v0, v8, 3
2245 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
2246 <vscale x 16 x i16> %0,
2250 ret <vscale x 16 x i1> %a
2253 define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2254 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
2255 ; CHECK: # %bb.0: # %entry
2256 ; CHECK-NEXT: vmv1r.v v13, v0
2257 ; CHECK-NEXT: vmv1r.v v0, v12
2258 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
2259 ; CHECK-NEXT: vmsgt.vi v13, v8, 4, v0.t
2260 ; CHECK-NEXT: vmv1r.v v0, v13
2263 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
2264 <vscale x 16 x i1> %0,
2265 <vscale x 16 x i16> %1,
2267 <vscale x 16 x i1> %2,
2270 ret <vscale x 16 x i1> %a
2273 define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
2274 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32:
2275 ; CHECK: # %bb.0: # %entry
2276 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2277 ; CHECK-NEXT: vmsgt.vi v0, v8, 5
2280 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
2281 <vscale x 1 x i32> %0,
2285 ret <vscale x 1 x i1> %a
2288 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2289 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
2290 ; CHECK: # %bb.0: # %entry
2291 ; CHECK-NEXT: vmv1r.v v10, v0
2292 ; CHECK-NEXT: vmv1r.v v0, v9
2293 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
2294 ; CHECK-NEXT: vmsgt.vi v10, v8, 6, v0.t
2295 ; CHECK-NEXT: vmv1r.v v0, v10
2298 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
2299 <vscale x 1 x i1> %0,
2300 <vscale x 1 x i32> %1,
2302 <vscale x 1 x i1> %2,
2305 ret <vscale x 1 x i1> %a
2308 define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
2309 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32:
2310 ; CHECK: # %bb.0: # %entry
2311 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2312 ; CHECK-NEXT: vmsgt.vi v0, v8, 7
2315 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
2316 <vscale x 2 x i32> %0,
2320 ret <vscale x 2 x i1> %a
2323 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2324 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
2325 ; CHECK: # %bb.0: # %entry
2326 ; CHECK-NEXT: vmv1r.v v10, v0
2327 ; CHECK-NEXT: vmv1r.v v0, v9
2328 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
2329 ; CHECK-NEXT: vmsgt.vi v10, v8, 8, v0.t
2330 ; CHECK-NEXT: vmv.v.v v0, v10
2333 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
2334 <vscale x 2 x i1> %0,
2335 <vscale x 2 x i32> %1,
2337 <vscale x 2 x i1> %2,
2340 ret <vscale x 2 x i1> %a
2343 define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
2344 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32:
2345 ; CHECK: # %bb.0: # %entry
2346 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2347 ; CHECK-NEXT: vmsgt.vi v0, v8, 9
2350 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
2351 <vscale x 4 x i32> %0,
2355 ret <vscale x 4 x i1> %a
2358 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2359 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
2360 ; CHECK: # %bb.0: # %entry
2361 ; CHECK-NEXT: vmv1r.v v11, v0
2362 ; CHECK-NEXT: vmv1r.v v0, v10
2363 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
2364 ; CHECK-NEXT: vmsgt.vi v11, v8, 10, v0.t
2365 ; CHECK-NEXT: vmv1r.v v0, v11
2368 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
2369 <vscale x 4 x i1> %0,
2370 <vscale x 4 x i32> %1,
2372 <vscale x 4 x i1> %2,
2375 ret <vscale x 4 x i1> %a
2378 define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
2379 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32:
2380 ; CHECK: # %bb.0: # %entry
2381 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2382 ; CHECK-NEXT: vmsgt.vi v0, v8, 11
2385 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
2386 <vscale x 8 x i32> %0,
2390 ret <vscale x 8 x i1> %a
2393 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2394 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
2395 ; CHECK: # %bb.0: # %entry
2396 ; CHECK-NEXT: vmv1r.v v13, v0
2397 ; CHECK-NEXT: vmv1r.v v0, v12
2398 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
2399 ; CHECK-NEXT: vmsgt.vi v13, v8, 12, v0.t
2400 ; CHECK-NEXT: vmv1r.v v0, v13
2403 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
2404 <vscale x 8 x i1> %0,
2405 <vscale x 8 x i32> %1,
2407 <vscale x 8 x i1> %2,
2410 ret <vscale x 8 x i1> %a
2413 define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
2414 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64:
2415 ; CHECK: # %bb.0: # %entry
2416 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2417 ; CHECK-NEXT: vmsgt.vi v0, v8, 8
2420 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
2421 <vscale x 1 x i64> %0,
2425 ret <vscale x 1 x i1> %a
2428 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2429 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
2430 ; CHECK: # %bb.0: # %entry
2431 ; CHECK-NEXT: vmv1r.v v10, v0
2432 ; CHECK-NEXT: vmv1r.v v0, v9
2433 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
2434 ; CHECK-NEXT: vmsgt.vi v10, v8, 8, v0.t
2435 ; CHECK-NEXT: vmv.v.v v0, v10
2438 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
2439 <vscale x 1 x i1> %0,
2440 <vscale x 1 x i64> %1,
2442 <vscale x 1 x i1> %2,
2445 ret <vscale x 1 x i1> %a
2448 define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
2449 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64:
2450 ; CHECK: # %bb.0: # %entry
2451 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2452 ; CHECK-NEXT: vmsgt.vi v0, v8, 8
2455 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
2456 <vscale x 2 x i64> %0,
2460 ret <vscale x 2 x i1> %a
2463 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2464 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
2465 ; CHECK: # %bb.0: # %entry
2466 ; CHECK-NEXT: vmv1r.v v11, v0
2467 ; CHECK-NEXT: vmv1r.v v0, v10
2468 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
2469 ; CHECK-NEXT: vmsgt.vi v11, v8, 8, v0.t
2470 ; CHECK-NEXT: vmv1r.v v0, v11
2473 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
2474 <vscale x 2 x i1> %0,
2475 <vscale x 2 x i64> %1,
2477 <vscale x 2 x i1> %2,
2480 ret <vscale x 2 x i1> %a
2483 define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
2484 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64:
2485 ; CHECK: # %bb.0: # %entry
2486 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2487 ; CHECK-NEXT: vmsgt.vi v0, v8, 8
2490 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
2491 <vscale x 4 x i64> %0,
2495 ret <vscale x 4 x i1> %a
2498 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2499 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
2500 ; CHECK: # %bb.0: # %entry
2501 ; CHECK-NEXT: vmv1r.v v13, v0
2502 ; CHECK-NEXT: vmv1r.v v0, v12
2503 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
2504 ; CHECK-NEXT: vmsgt.vi v13, v8, 8, v0.t
2505 ; CHECK-NEXT: vmv1r.v v0, v13
2508 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
2509 <vscale x 4 x i1> %0,
2510 <vscale x 4 x i64> %1,
2512 <vscale x 4 x i1> %2,
2515 ret <vscale x 4 x i1> %a
2518 ; Test cases where the mask and maskedoff are the same value.
2519 define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, iXLen %3) nounwind {
2520 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8:
2521 ; CHECK: # %bb.0: # %entry
2522 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2523 ; CHECK-NEXT: vmslt.vx v8, v8, a0
2524 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2527 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
2528 <vscale x 1 x i1> %0,
2529 <vscale x 1 x i8> %1,
2531 <vscale x 1 x i1> %0,
2534 ret <vscale x 1 x i1> %a
2537 define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, iXLen %3) nounwind {
2538 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8:
2539 ; CHECK: # %bb.0: # %entry
2540 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2541 ; CHECK-NEXT: vmslt.vx v8, v8, a0
2542 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2545 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
2546 <vscale x 2 x i1> %0,
2547 <vscale x 2 x i8> %1,
2549 <vscale x 2 x i1> %0,
2552 ret <vscale x 2 x i1> %a
2555 define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, iXLen %3) nounwind {
2556 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8:
2557 ; CHECK: # %bb.0: # %entry
2558 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
2559 ; CHECK-NEXT: vmslt.vx v8, v8, a0
2560 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2563 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
2564 <vscale x 4 x i1> %0,
2565 <vscale x 4 x i8> %1,
2567 <vscale x 4 x i1> %0,
2570 ret <vscale x 4 x i1> %a
2573 define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, iXLen %3) nounwind {
2574 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8:
2575 ; CHECK: # %bb.0: # %entry
2576 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2577 ; CHECK-NEXT: vmslt.vx v8, v8, a0
2578 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2581 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
2582 <vscale x 8 x i1> %0,
2583 <vscale x 8 x i8> %1,
2585 <vscale x 8 x i1> %0,
2588 ret <vscale x 8 x i1> %a
2591 define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, iXLen %3) nounwind {
2592 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8:
2593 ; CHECK: # %bb.0: # %entry
2594 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
2595 ; CHECK-NEXT: vmslt.vx v10, v8, a0
2596 ; CHECK-NEXT: vmandn.mm v0, v0, v10
2599 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
2600 <vscale x 16 x i1> %0,
2601 <vscale x 16 x i8> %1,
2603 <vscale x 16 x i1> %0,
2606 ret <vscale x 16 x i1> %a
2609 define <vscale x 32 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, iXLen %3) nounwind {
2610 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8:
2611 ; CHECK: # %bb.0: # %entry
2612 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
2613 ; CHECK-NEXT: vmslt.vx v12, v8, a0
2614 ; CHECK-NEXT: vmandn.mm v0, v0, v12
2617 %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
2618 <vscale x 32 x i1> %0,
2619 <vscale x 32 x i8> %1,
2621 <vscale x 32 x i1> %0,
2624 ret <vscale x 32 x i1> %a
2627 define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, iXLen %3) nounwind {
2628 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16:
2629 ; CHECK: # %bb.0: # %entry
2630 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2631 ; CHECK-NEXT: vmslt.vx v8, v8, a0
2632 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2635 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
2636 <vscale x 1 x i1> %0,
2637 <vscale x 1 x i16> %1,
2639 <vscale x 1 x i1> %0,
2642 ret <vscale x 1 x i1> %a
2645 define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, iXLen %3) nounwind {
2646 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16:
2647 ; CHECK: # %bb.0: # %entry
2648 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2649 ; CHECK-NEXT: vmslt.vx v8, v8, a0
2650 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2653 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
2654 <vscale x 2 x i1> %0,
2655 <vscale x 2 x i16> %1,
2657 <vscale x 2 x i1> %0,
2660 ret <vscale x 2 x i1> %a
2663 define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, iXLen %3) nounwind {
2664 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16:
2665 ; CHECK: # %bb.0: # %entry
2666 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2667 ; CHECK-NEXT: vmslt.vx v8, v8, a0
2668 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2671 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
2672 <vscale x 4 x i1> %0,
2673 <vscale x 4 x i16> %1,
2675 <vscale x 4 x i1> %0,
2678 ret <vscale x 4 x i1> %a
2681 define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, iXLen %3) nounwind {
2682 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16:
2683 ; CHECK: # %bb.0: # %entry
2684 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2685 ; CHECK-NEXT: vmslt.vx v10, v8, a0
2686 ; CHECK-NEXT: vmandn.mm v0, v0, v10
2689 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
2690 <vscale x 8 x i1> %0,
2691 <vscale x 8 x i16> %1,
2693 <vscale x 8 x i1> %0,
2696 ret <vscale x 8 x i1> %a
2699 define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, iXLen %3) nounwind {
2700 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16:
2701 ; CHECK: # %bb.0: # %entry
2702 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2703 ; CHECK-NEXT: vmslt.vx v12, v8, a0
2704 ; CHECK-NEXT: vmandn.mm v0, v0, v12
2707 %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
2708 <vscale x 16 x i1> %0,
2709 <vscale x 16 x i16> %1,
2711 <vscale x 16 x i1> %0,
2714 ret <vscale x 16 x i1> %a
2717 define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, iXLen %3) nounwind {
2718 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32:
2719 ; CHECK: # %bb.0: # %entry
2720 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2721 ; CHECK-NEXT: vmslt.vx v8, v8, a0
2722 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2725 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
2726 <vscale x 1 x i1> %0,
2727 <vscale x 1 x i32> %1,
2729 <vscale x 1 x i1> %0,
2732 ret <vscale x 1 x i1> %a
2735 define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, iXLen %3) nounwind {
2736 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32:
2737 ; CHECK: # %bb.0: # %entry
2738 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2739 ; CHECK-NEXT: vmslt.vx v8, v8, a0
2740 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2743 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
2744 <vscale x 2 x i1> %0,
2745 <vscale x 2 x i32> %1,
2747 <vscale x 2 x i1> %0,
2750 ret <vscale x 2 x i1> %a
2753 define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, iXLen %3) nounwind {
2754 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32:
2755 ; CHECK: # %bb.0: # %entry
2756 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2757 ; CHECK-NEXT: vmslt.vx v10, v8, a0
2758 ; CHECK-NEXT: vmandn.mm v0, v0, v10
2761 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
2762 <vscale x 4 x i1> %0,
2763 <vscale x 4 x i32> %1,
2765 <vscale x 4 x i1> %0,
2768 ret <vscale x 4 x i1> %a
2771 define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, iXLen %3) nounwind {
2772 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32:
2773 ; CHECK: # %bb.0: # %entry
2774 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2775 ; CHECK-NEXT: vmslt.vx v12, v8, a0
2776 ; CHECK-NEXT: vmandn.mm v0, v0, v12
2779 %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
2780 <vscale x 8 x i1> %0,
2781 <vscale x 8 x i32> %1,
2783 <vscale x 8 x i1> %0,
2786 ret <vscale x 8 x i1> %a
2789 define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
2790 ; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64:
2791 ; RV32: # %bb.0: # %entry
2792 ; RV32-NEXT: addi sp, sp, -16
2793 ; RV32-NEXT: sw a1, 12(sp)
2794 ; RV32-NEXT: sw a0, 8(sp)
2795 ; RV32-NEXT: addi a0, sp, 8
2796 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
2797 ; RV32-NEXT: vlse64.v v9, (a0), zero
2798 ; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t
2799 ; RV32-NEXT: addi sp, sp, 16
2802 ; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64:
2803 ; RV64: # %bb.0: # %entry
2804 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2805 ; RV64-NEXT: vmslt.vx v8, v8, a0
2806 ; RV64-NEXT: vmandn.mm v0, v0, v8
2809 %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
2810 <vscale x 1 x i1> %0,
2811 <vscale x 1 x i64> %1,
2813 <vscale x 1 x i1> %0,
2816 ret <vscale x 1 x i1> %a
2819 define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, iXLen %3) nounwind {
2820 ; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64:
2821 ; RV32: # %bb.0: # %entry
2822 ; RV32-NEXT: addi sp, sp, -16
2823 ; RV32-NEXT: sw a1, 12(sp)
2824 ; RV32-NEXT: sw a0, 8(sp)
2825 ; RV32-NEXT: addi a0, sp, 8
2826 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
2827 ; RV32-NEXT: vlse64.v v12, (a0), zero
2828 ; RV32-NEXT: vmv1r.v v10, v0
2829 ; RV32-NEXT: vmsle.vv v10, v12, v8, v0.t
2830 ; RV32-NEXT: vmv1r.v v0, v10
2831 ; RV32-NEXT: addi sp, sp, 16
2834 ; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64:
2835 ; RV64: # %bb.0: # %entry
2836 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2837 ; RV64-NEXT: vmslt.vx v10, v8, a0
2838 ; RV64-NEXT: vmandn.mm v0, v0, v10
2841 %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
2842 <vscale x 2 x i1> %0,
2843 <vscale x 2 x i64> %1,
2845 <vscale x 2 x i1> %0,
2848 ret <vscale x 2 x i1> %a
2851 define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, iXLen %3) nounwind {
2852 ; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64:
2853 ; RV32: # %bb.0: # %entry
2854 ; RV32-NEXT: addi sp, sp, -16
2855 ; RV32-NEXT: sw a1, 12(sp)
2856 ; RV32-NEXT: sw a0, 8(sp)
2857 ; RV32-NEXT: addi a0, sp, 8
2858 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2859 ; RV32-NEXT: vlse64.v v16, (a0), zero
2860 ; RV32-NEXT: vmv1r.v v12, v0
2861 ; RV32-NEXT: vmsle.vv v12, v16, v8, v0.t
2862 ; RV32-NEXT: vmv1r.v v0, v12
2863 ; RV32-NEXT: addi sp, sp, 16
2866 ; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64:
2867 ; RV64: # %bb.0: # %entry
2868 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2869 ; RV64-NEXT: vmslt.vx v12, v8, a0
2870 ; RV64-NEXT: vmandn.mm v0, v0, v12
2873 %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
2874 <vscale x 4 x i1> %0,
2875 <vscale x 4 x i64> %1,
2877 <vscale x 4 x i1> %0,
2880 ret <vscale x 4 x i1> %a