1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
12 define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
13 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
16 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
19 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
24 ret <vscale x 1 x i1> %a
27 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
34 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
35 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
36 ; CHECK: # %bb.0: # %entry
37 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
38 ; CHECK-NEXT: vmsleu.vv v8, v9, v8
39 ; CHECK-NEXT: vmv1r.v v11, v0
40 ; CHECK-NEXT: vmv1r.v v0, v8
41 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
42 ; CHECK-NEXT: vmv1r.v v0, v11
45 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
49 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
53 <vscale x 1 x i1> %mask,
56 ret <vscale x 1 x i1> %a
59 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
64 define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
65 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8:
66 ; CHECK: # %bb.0: # %entry
67 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
68 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
71 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
76 ret <vscale x 2 x i1> %a
79 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
86 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
87 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
88 ; CHECK: # %bb.0: # %entry
89 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
90 ; CHECK-NEXT: vmsleu.vv v8, v9, v8
91 ; CHECK-NEXT: vmv1r.v v11, v0
92 ; CHECK-NEXT: vmv1r.v v0, v8
93 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
94 ; CHECK-NEXT: vmv1r.v v0, v11
97 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
101 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
102 <vscale x 2 x i1> %0,
103 <vscale x 2 x i8> %2,
104 <vscale x 2 x i8> %3,
105 <vscale x 2 x i1> %mask,
108 ret <vscale x 2 x i1> %a
111 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
116 define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
117 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8:
118 ; CHECK: # %bb.0: # %entry
119 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
120 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
123 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
124 <vscale x 4 x i8> %0,
125 <vscale x 4 x i8> %1,
128 ret <vscale x 4 x i1> %a
131 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
138 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
139 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
140 ; CHECK: # %bb.0: # %entry
141 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
142 ; CHECK-NEXT: vmsleu.vv v8, v9, v8
143 ; CHECK-NEXT: vmv1r.v v11, v0
144 ; CHECK-NEXT: vmv1r.v v0, v8
145 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
146 ; CHECK-NEXT: vmv1r.v v0, v11
149 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
150 <vscale x 4 x i8> %1,
151 <vscale x 4 x i8> %2,
153 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
154 <vscale x 4 x i1> %0,
155 <vscale x 4 x i8> %2,
156 <vscale x 4 x i8> %3,
157 <vscale x 4 x i1> %mask,
160 ret <vscale x 4 x i1> %a
163 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
168 define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
169 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8:
170 ; CHECK: # %bb.0: # %entry
171 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
172 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
175 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
176 <vscale x 8 x i8> %0,
177 <vscale x 8 x i8> %1,
180 ret <vscale x 8 x i1> %a
183 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
190 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
191 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
192 ; CHECK: # %bb.0: # %entry
193 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
194 ; CHECK-NEXT: vmsleu.vv v8, v9, v8
195 ; CHECK-NEXT: vmv1r.v v11, v0
196 ; CHECK-NEXT: vmv.v.v v0, v8
197 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
198 ; CHECK-NEXT: vmv.v.v v0, v11
201 %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
202 <vscale x 8 x i8> %1,
203 <vscale x 8 x i8> %2,
205 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
206 <vscale x 8 x i1> %0,
207 <vscale x 8 x i8> %2,
208 <vscale x 8 x i8> %3,
209 <vscale x 8 x i1> %mask,
212 ret <vscale x 8 x i1> %a
215 declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
220 define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
221 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
224 ; CHECK-NEXT: vmsleu.vv v0, v10, v8
227 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
228 <vscale x 16 x i8> %0,
229 <vscale x 16 x i8> %1,
232 ret <vscale x 16 x i1> %a
235 declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
242 define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
243 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
244 ; CHECK: # %bb.0: # %entry
245 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
246 ; CHECK-NEXT: vmsleu.vv v14, v10, v8
247 ; CHECK-NEXT: vmv1r.v v8, v0
248 ; CHECK-NEXT: vmv1r.v v0, v14
249 ; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
250 ; CHECK-NEXT: vmv1r.v v0, v8
253 %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
254 <vscale x 16 x i8> %1,
255 <vscale x 16 x i8> %2,
257 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
258 <vscale x 16 x i1> %0,
259 <vscale x 16 x i8> %2,
260 <vscale x 16 x i8> %3,
261 <vscale x 16 x i1> %mask,
264 ret <vscale x 16 x i1> %a
267 declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
272 define <vscale x 32 x i1> @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
273 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8:
274 ; CHECK: # %bb.0: # %entry
275 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
276 ; CHECK-NEXT: vmsleu.vv v0, v12, v8
279 %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
280 <vscale x 32 x i8> %0,
281 <vscale x 32 x i8> %1,
284 ret <vscale x 32 x i1> %a
287 declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
294 define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
295 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
296 ; CHECK: # %bb.0: # %entry
297 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
298 ; CHECK-NEXT: vmsleu.vv v20, v12, v8
299 ; CHECK-NEXT: vmv1r.v v8, v0
300 ; CHECK-NEXT: vmv1r.v v0, v20
301 ; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
302 ; CHECK-NEXT: vmv1r.v v0, v8
305 %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
306 <vscale x 32 x i8> %1,
307 <vscale x 32 x i8> %2,
309 %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
310 <vscale x 32 x i1> %0,
311 <vscale x 32 x i8> %2,
312 <vscale x 32 x i8> %3,
313 <vscale x 32 x i1> %mask,
316 ret <vscale x 32 x i1> %a
319 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
324 define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
325 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16:
326 ; CHECK: # %bb.0: # %entry
327 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
328 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
331 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
332 <vscale x 1 x i16> %0,
333 <vscale x 1 x i16> %1,
336 ret <vscale x 1 x i1> %a
339 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
346 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
347 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
348 ; CHECK: # %bb.0: # %entry
349 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
350 ; CHECK-NEXT: vmsleu.vv v8, v9, v8
351 ; CHECK-NEXT: vmv1r.v v11, v0
352 ; CHECK-NEXT: vmv1r.v v0, v8
353 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
354 ; CHECK-NEXT: vmv1r.v v0, v11
357 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
358 <vscale x 1 x i16> %1,
359 <vscale x 1 x i16> %2,
361 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
362 <vscale x 1 x i1> %0,
363 <vscale x 1 x i16> %2,
364 <vscale x 1 x i16> %3,
365 <vscale x 1 x i1> %mask,
368 ret <vscale x 1 x i1> %a
371 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
376 define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
377 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16:
378 ; CHECK: # %bb.0: # %entry
379 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
380 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
383 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
384 <vscale x 2 x i16> %0,
385 <vscale x 2 x i16> %1,
388 ret <vscale x 2 x i1> %a
391 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
398 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
399 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
400 ; CHECK: # %bb.0: # %entry
401 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
402 ; CHECK-NEXT: vmsleu.vv v8, v9, v8
403 ; CHECK-NEXT: vmv1r.v v11, v0
404 ; CHECK-NEXT: vmv1r.v v0, v8
405 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
406 ; CHECK-NEXT: vmv1r.v v0, v11
409 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
410 <vscale x 2 x i16> %1,
411 <vscale x 2 x i16> %2,
413 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
414 <vscale x 2 x i1> %0,
415 <vscale x 2 x i16> %2,
416 <vscale x 2 x i16> %3,
417 <vscale x 2 x i1> %mask,
420 ret <vscale x 2 x i1> %a
423 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
428 define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
429 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16:
430 ; CHECK: # %bb.0: # %entry
431 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
432 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
435 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
436 <vscale x 4 x i16> %0,
437 <vscale x 4 x i16> %1,
440 ret <vscale x 4 x i1> %a
443 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
450 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
451 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
452 ; CHECK: # %bb.0: # %entry
453 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
454 ; CHECK-NEXT: vmsleu.vv v8, v9, v8
455 ; CHECK-NEXT: vmv1r.v v11, v0
456 ; CHECK-NEXT: vmv.v.v v0, v8
457 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
458 ; CHECK-NEXT: vmv.v.v v0, v11
461 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
462 <vscale x 4 x i16> %1,
463 <vscale x 4 x i16> %2,
465 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
466 <vscale x 4 x i1> %0,
467 <vscale x 4 x i16> %2,
468 <vscale x 4 x i16> %3,
469 <vscale x 4 x i1> %mask,
472 ret <vscale x 4 x i1> %a
475 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
480 define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
481 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16:
482 ; CHECK: # %bb.0: # %entry
483 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
484 ; CHECK-NEXT: vmsleu.vv v0, v10, v8
487 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
488 <vscale x 8 x i16> %0,
489 <vscale x 8 x i16> %1,
492 ret <vscale x 8 x i1> %a
495 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
502 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
503 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
504 ; CHECK: # %bb.0: # %entry
505 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
506 ; CHECK-NEXT: vmsleu.vv v14, v10, v8
507 ; CHECK-NEXT: vmv1r.v v8, v0
508 ; CHECK-NEXT: vmv1r.v v0, v14
509 ; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
510 ; CHECK-NEXT: vmv1r.v v0, v8
513 %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
514 <vscale x 8 x i16> %1,
515 <vscale x 8 x i16> %2,
517 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
518 <vscale x 8 x i1> %0,
519 <vscale x 8 x i16> %2,
520 <vscale x 8 x i16> %3,
521 <vscale x 8 x i1> %mask,
524 ret <vscale x 8 x i1> %a
527 declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
532 define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
533 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16:
534 ; CHECK: # %bb.0: # %entry
535 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
536 ; CHECK-NEXT: vmsleu.vv v0, v12, v8
539 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
540 <vscale x 16 x i16> %0,
541 <vscale x 16 x i16> %1,
544 ret <vscale x 16 x i1> %a
547 declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
554 define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
555 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
556 ; CHECK: # %bb.0: # %entry
557 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
558 ; CHECK-NEXT: vmsleu.vv v20, v12, v8
559 ; CHECK-NEXT: vmv1r.v v8, v0
560 ; CHECK-NEXT: vmv1r.v v0, v20
561 ; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
562 ; CHECK-NEXT: vmv1r.v v0, v8
565 %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
566 <vscale x 16 x i16> %1,
567 <vscale x 16 x i16> %2,
569 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
570 <vscale x 16 x i1> %0,
571 <vscale x 16 x i16> %2,
572 <vscale x 16 x i16> %3,
573 <vscale x 16 x i1> %mask,
576 ret <vscale x 16 x i1> %a
579 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
584 define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
585 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32:
586 ; CHECK: # %bb.0: # %entry
587 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
588 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
591 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
592 <vscale x 1 x i32> %0,
593 <vscale x 1 x i32> %1,
596 ret <vscale x 1 x i1> %a
599 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
606 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
607 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
608 ; CHECK: # %bb.0: # %entry
609 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
610 ; CHECK-NEXT: vmsleu.vv v8, v9, v8
611 ; CHECK-NEXT: vmv1r.v v11, v0
612 ; CHECK-NEXT: vmv1r.v v0, v8
613 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
614 ; CHECK-NEXT: vmv1r.v v0, v11
617 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
618 <vscale x 1 x i32> %1,
619 <vscale x 1 x i32> %2,
621 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
622 <vscale x 1 x i1> %0,
623 <vscale x 1 x i32> %2,
624 <vscale x 1 x i32> %3,
625 <vscale x 1 x i1> %mask,
628 ret <vscale x 1 x i1> %a
631 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
636 define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
637 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32:
638 ; CHECK: # %bb.0: # %entry
639 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
640 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
643 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
644 <vscale x 2 x i32> %0,
645 <vscale x 2 x i32> %1,
648 ret <vscale x 2 x i1> %a
651 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
658 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
659 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
660 ; CHECK: # %bb.0: # %entry
661 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
662 ; CHECK-NEXT: vmsleu.vv v8, v9, v8
663 ; CHECK-NEXT: vmv1r.v v11, v0
664 ; CHECK-NEXT: vmv.v.v v0, v8
665 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
666 ; CHECK-NEXT: vmv.v.v v0, v11
669 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
670 <vscale x 2 x i32> %1,
671 <vscale x 2 x i32> %2,
673 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
674 <vscale x 2 x i1> %0,
675 <vscale x 2 x i32> %2,
676 <vscale x 2 x i32> %3,
677 <vscale x 2 x i1> %mask,
680 ret <vscale x 2 x i1> %a
683 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
688 define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
689 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32:
690 ; CHECK: # %bb.0: # %entry
691 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
692 ; CHECK-NEXT: vmsleu.vv v0, v10, v8
695 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
696 <vscale x 4 x i32> %0,
697 <vscale x 4 x i32> %1,
700 ret <vscale x 4 x i1> %a
703 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
710 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
711 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
712 ; CHECK: # %bb.0: # %entry
713 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
714 ; CHECK-NEXT: vmsleu.vv v14, v10, v8
715 ; CHECK-NEXT: vmv1r.v v8, v0
716 ; CHECK-NEXT: vmv1r.v v0, v14
717 ; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
718 ; CHECK-NEXT: vmv1r.v v0, v8
721 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
722 <vscale x 4 x i32> %1,
723 <vscale x 4 x i32> %2,
725 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
726 <vscale x 4 x i1> %0,
727 <vscale x 4 x i32> %2,
728 <vscale x 4 x i32> %3,
729 <vscale x 4 x i1> %mask,
732 ret <vscale x 4 x i1> %a
735 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
740 define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
741 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32:
742 ; CHECK: # %bb.0: # %entry
743 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
744 ; CHECK-NEXT: vmsleu.vv v0, v12, v8
747 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
748 <vscale x 8 x i32> %0,
749 <vscale x 8 x i32> %1,
752 ret <vscale x 8 x i1> %a
755 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
762 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
763 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
764 ; CHECK: # %bb.0: # %entry
765 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
766 ; CHECK-NEXT: vmsleu.vv v20, v12, v8
767 ; CHECK-NEXT: vmv1r.v v8, v0
768 ; CHECK-NEXT: vmv1r.v v0, v20
769 ; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
770 ; CHECK-NEXT: vmv1r.v v0, v8
773 %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
774 <vscale x 8 x i32> %1,
775 <vscale x 8 x i32> %2,
777 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
778 <vscale x 8 x i1> %0,
779 <vscale x 8 x i32> %2,
780 <vscale x 8 x i32> %3,
781 <vscale x 8 x i1> %mask,
784 ret <vscale x 8 x i1> %a
787 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
792 define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
793 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64:
794 ; CHECK: # %bb.0: # %entry
795 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
796 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
799 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
800 <vscale x 1 x i64> %0,
801 <vscale x 1 x i64> %1,
804 ret <vscale x 1 x i1> %a
807 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
814 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
815 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
816 ; CHECK: # %bb.0: # %entry
817 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
818 ; CHECK-NEXT: vmsleu.vv v8, v9, v8
819 ; CHECK-NEXT: vmv1r.v v11, v0
820 ; CHECK-NEXT: vmv.v.v v0, v8
821 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
822 ; CHECK-NEXT: vmv.v.v v0, v11
825 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
826 <vscale x 1 x i64> %1,
827 <vscale x 1 x i64> %2,
829 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
830 <vscale x 1 x i1> %0,
831 <vscale x 1 x i64> %2,
832 <vscale x 1 x i64> %3,
833 <vscale x 1 x i1> %mask,
836 ret <vscale x 1 x i1> %a
839 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
844 define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
845 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64:
846 ; CHECK: # %bb.0: # %entry
847 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
848 ; CHECK-NEXT: vmsleu.vv v0, v10, v8
851 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
852 <vscale x 2 x i64> %0,
853 <vscale x 2 x i64> %1,
856 ret <vscale x 2 x i1> %a
859 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
866 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
867 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
868 ; CHECK: # %bb.0: # %entry
869 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
870 ; CHECK-NEXT: vmsleu.vv v14, v10, v8
871 ; CHECK-NEXT: vmv1r.v v8, v0
872 ; CHECK-NEXT: vmv1r.v v0, v14
873 ; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
874 ; CHECK-NEXT: vmv1r.v v0, v8
877 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
878 <vscale x 2 x i64> %1,
879 <vscale x 2 x i64> %2,
881 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
882 <vscale x 2 x i1> %0,
883 <vscale x 2 x i64> %2,
884 <vscale x 2 x i64> %3,
885 <vscale x 2 x i1> %mask,
888 ret <vscale x 2 x i1> %a
891 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
896 define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
897 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64:
898 ; CHECK: # %bb.0: # %entry
899 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
900 ; CHECK-NEXT: vmsleu.vv v0, v12, v8
903 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
904 <vscale x 4 x i64> %0,
905 <vscale x 4 x i64> %1,
908 ret <vscale x 4 x i1> %a
911 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
918 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
919 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
920 ; CHECK: # %bb.0: # %entry
921 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
922 ; CHECK-NEXT: vmsleu.vv v20, v12, v8
923 ; CHECK-NEXT: vmv1r.v v8, v0
924 ; CHECK-NEXT: vmv1r.v v0, v20
925 ; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
926 ; CHECK-NEXT: vmv1r.v v0, v8
929 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
930 <vscale x 4 x i64> %1,
931 <vscale x 4 x i64> %2,
933 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
934 <vscale x 4 x i1> %0,
935 <vscale x 4 x i64> %2,
936 <vscale x 4 x i64> %3,
937 <vscale x 4 x i1> %mask,
940 ret <vscale x 4 x i1> %a
943 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
948 define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
949 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8:
950 ; CHECK: # %bb.0: # %entry
951 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
952 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
953 ; CHECK-NEXT: vmnot.m v0, v8
956 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
957 <vscale x 1 x i8> %0,
961 ret <vscale x 1 x i1> %a
964 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
971 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
972 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
973 ; CHECK: # %bb.0: # %entry
974 ; CHECK-NEXT: vmv1r.v v10, v0
975 ; CHECK-NEXT: vmv1r.v v0, v9
976 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
977 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
978 ; CHECK-NEXT: vmxor.mm v0, v10, v9
981 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
982 <vscale x 1 x i1> %0,
983 <vscale x 1 x i8> %1,
985 <vscale x 1 x i1> %3,
988 ret <vscale x 1 x i1> %a
991 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
996 define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
997 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8:
998 ; CHECK: # %bb.0: # %entry
999 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1000 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
1001 ; CHECK-NEXT: vmnot.m v0, v8
1004 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
1005 <vscale x 2 x i8> %0,
1009 ret <vscale x 2 x i1> %a
1012 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
1019 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1020 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
1021 ; CHECK: # %bb.0: # %entry
1022 ; CHECK-NEXT: vmv1r.v v10, v0
1023 ; CHECK-NEXT: vmv1r.v v0, v9
1024 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1025 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
1026 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1029 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
1030 <vscale x 2 x i1> %0,
1031 <vscale x 2 x i8> %1,
1033 <vscale x 2 x i1> %3,
1036 ret <vscale x 2 x i1> %a
1039 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
1044 define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1045 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8:
1046 ; CHECK: # %bb.0: # %entry
1047 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1048 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
1049 ; CHECK-NEXT: vmnot.m v0, v8
1052 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
1053 <vscale x 4 x i8> %0,
1057 ret <vscale x 4 x i1> %a
1060 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
1067 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1068 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
1069 ; CHECK: # %bb.0: # %entry
1070 ; CHECK-NEXT: vmv1r.v v10, v0
1071 ; CHECK-NEXT: vmv1r.v v0, v9
1072 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1073 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
1074 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1077 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
1078 <vscale x 4 x i1> %0,
1079 <vscale x 4 x i8> %1,
1081 <vscale x 4 x i1> %3,
1084 ret <vscale x 4 x i1> %a
1087 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
1092 define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1093 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8:
1094 ; CHECK: # %bb.0: # %entry
1095 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1096 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
1097 ; CHECK-NEXT: vmnot.m v0, v8
1100 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
1101 <vscale x 8 x i8> %0,
1105 ret <vscale x 8 x i1> %a
1108 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
1115 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1116 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
1117 ; CHECK: # %bb.0: # %entry
1118 ; CHECK-NEXT: vmv1r.v v10, v0
1119 ; CHECK-NEXT: vmv1r.v v0, v9
1120 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1121 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
1122 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1125 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
1126 <vscale x 8 x i1> %0,
1127 <vscale x 8 x i8> %1,
1129 <vscale x 8 x i1> %3,
1132 ret <vscale x 8 x i1> %a
1135 declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
1140 define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1141 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8:
1142 ; CHECK: # %bb.0: # %entry
1143 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1144 ; CHECK-NEXT: vmsltu.vx v10, v8, a0
1145 ; CHECK-NEXT: vmnot.m v0, v10
1148 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
1149 <vscale x 16 x i8> %0,
1153 ret <vscale x 16 x i1> %a
1156 declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
1163 define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1164 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
1165 ; CHECK: # %bb.0: # %entry
1166 ; CHECK-NEXT: vmv1r.v v11, v0
1167 ; CHECK-NEXT: vmv1r.v v0, v10
1168 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1169 ; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
1170 ; CHECK-NEXT: vmxor.mm v0, v11, v10
1173 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
1174 <vscale x 16 x i1> %0,
1175 <vscale x 16 x i8> %1,
1177 <vscale x 16 x i1> %3,
1180 ret <vscale x 16 x i1> %a
1183 declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
1188 define <vscale x 32 x i1> @intrinsic_vmsgeu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1189 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8:
1190 ; CHECK: # %bb.0: # %entry
1191 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1192 ; CHECK-NEXT: vmsltu.vx v12, v8, a0
1193 ; CHECK-NEXT: vmnot.m v0, v12
1196 %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
1197 <vscale x 32 x i8> %0,
1201 ret <vscale x 32 x i1> %a
1204 declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
1211 define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1212 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
1213 ; CHECK: # %bb.0: # %entry
1214 ; CHECK-NEXT: vmv1r.v v13, v0
1215 ; CHECK-NEXT: vmv1r.v v0, v12
1216 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1217 ; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
1218 ; CHECK-NEXT: vmxor.mm v0, v13, v12
1221 %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
1222 <vscale x 32 x i1> %0,
1223 <vscale x 32 x i8> %1,
1225 <vscale x 32 x i1> %3,
1228 ret <vscale x 32 x i1> %a
1231 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
1236 define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1237 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16:
1238 ; CHECK: # %bb.0: # %entry
1239 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1240 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
1241 ; CHECK-NEXT: vmnot.m v0, v8
1244 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
1245 <vscale x 1 x i16> %0,
1249 ret <vscale x 1 x i1> %a
1252 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
1259 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1260 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
1261 ; CHECK: # %bb.0: # %entry
1262 ; CHECK-NEXT: vmv1r.v v10, v0
1263 ; CHECK-NEXT: vmv1r.v v0, v9
1264 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1265 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
1266 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1269 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
1270 <vscale x 1 x i1> %0,
1271 <vscale x 1 x i16> %1,
1273 <vscale x 1 x i1> %3,
1276 ret <vscale x 1 x i1> %a
1279 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
1284 define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1285 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16:
1286 ; CHECK: # %bb.0: # %entry
1287 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1288 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
1289 ; CHECK-NEXT: vmnot.m v0, v8
1292 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
1293 <vscale x 2 x i16> %0,
1297 ret <vscale x 2 x i1> %a
1300 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
1307 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1308 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
1309 ; CHECK: # %bb.0: # %entry
1310 ; CHECK-NEXT: vmv1r.v v10, v0
1311 ; CHECK-NEXT: vmv1r.v v0, v9
1312 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1313 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
1314 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1317 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
1318 <vscale x 2 x i1> %0,
1319 <vscale x 2 x i16> %1,
1321 <vscale x 2 x i1> %3,
1324 ret <vscale x 2 x i1> %a
1327 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
1332 define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1333 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16:
1334 ; CHECK: # %bb.0: # %entry
1335 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1336 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
1337 ; CHECK-NEXT: vmnot.m v0, v8
1340 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
1341 <vscale x 4 x i16> %0,
1345 ret <vscale x 4 x i1> %a
1348 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
1355 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1356 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
1357 ; CHECK: # %bb.0: # %entry
1358 ; CHECK-NEXT: vmv1r.v v10, v0
1359 ; CHECK-NEXT: vmv1r.v v0, v9
1360 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1361 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
1362 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1365 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
1366 <vscale x 4 x i1> %0,
1367 <vscale x 4 x i16> %1,
1369 <vscale x 4 x i1> %3,
1372 ret <vscale x 4 x i1> %a
1375 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
1380 define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1381 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16:
1382 ; CHECK: # %bb.0: # %entry
1383 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1384 ; CHECK-NEXT: vmsltu.vx v10, v8, a0
1385 ; CHECK-NEXT: vmnot.m v0, v10
1388 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
1389 <vscale x 8 x i16> %0,
1393 ret <vscale x 8 x i1> %a
1396 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
1403 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1404 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
1405 ; CHECK: # %bb.0: # %entry
1406 ; CHECK-NEXT: vmv1r.v v11, v0
1407 ; CHECK-NEXT: vmv1r.v v0, v10
1408 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1409 ; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
1410 ; CHECK-NEXT: vmxor.mm v0, v11, v10
1413 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
1414 <vscale x 8 x i1> %0,
1415 <vscale x 8 x i16> %1,
1417 <vscale x 8 x i1> %3,
1420 ret <vscale x 8 x i1> %a
1423 declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
1424 <vscale x 16 x i16>,
1428 define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1429 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16:
1430 ; CHECK: # %bb.0: # %entry
1431 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1432 ; CHECK-NEXT: vmsltu.vx v12, v8, a0
1433 ; CHECK-NEXT: vmnot.m v0, v12
1436 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
1437 <vscale x 16 x i16> %0,
1441 ret <vscale x 16 x i1> %a
1444 declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
1446 <vscale x 16 x i16>,
1451 define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1452 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
1453 ; CHECK: # %bb.0: # %entry
1454 ; CHECK-NEXT: vmv1r.v v13, v0
1455 ; CHECK-NEXT: vmv1r.v v0, v12
1456 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1457 ; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
1458 ; CHECK-NEXT: vmxor.mm v0, v13, v12
1461 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
1462 <vscale x 16 x i1> %0,
1463 <vscale x 16 x i16> %1,
1465 <vscale x 16 x i1> %3,
1468 ret <vscale x 16 x i1> %a
1471 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
1476 define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1477 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32:
1478 ; CHECK: # %bb.0: # %entry
1479 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1480 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
1481 ; CHECK-NEXT: vmnot.m v0, v8
1484 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
1485 <vscale x 1 x i32> %0,
1489 ret <vscale x 1 x i1> %a
1492 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
1499 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1500 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
1501 ; CHECK: # %bb.0: # %entry
1502 ; CHECK-NEXT: vmv1r.v v10, v0
1503 ; CHECK-NEXT: vmv1r.v v0, v9
1504 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1505 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
1506 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1509 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
1510 <vscale x 1 x i1> %0,
1511 <vscale x 1 x i32> %1,
1513 <vscale x 1 x i1> %3,
1516 ret <vscale x 1 x i1> %a
1519 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
1524 define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1525 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32:
1526 ; CHECK: # %bb.0: # %entry
1527 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1528 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
1529 ; CHECK-NEXT: vmnot.m v0, v8
1532 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
1533 <vscale x 2 x i32> %0,
1537 ret <vscale x 2 x i1> %a
1540 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
1547 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1548 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
1549 ; CHECK: # %bb.0: # %entry
1550 ; CHECK-NEXT: vmv1r.v v10, v0
1551 ; CHECK-NEXT: vmv1r.v v0, v9
1552 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1553 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
1554 ; CHECK-NEXT: vmxor.mm v0, v10, v9
1557 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
1558 <vscale x 2 x i1> %0,
1559 <vscale x 2 x i32> %1,
1561 <vscale x 2 x i1> %3,
1564 ret <vscale x 2 x i1> %a
1567 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
1572 define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1573 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32:
1574 ; CHECK: # %bb.0: # %entry
1575 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1576 ; CHECK-NEXT: vmsltu.vx v10, v8, a0
1577 ; CHECK-NEXT: vmnot.m v0, v10
1580 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
1581 <vscale x 4 x i32> %0,
1585 ret <vscale x 4 x i1> %a
1588 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
1595 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1596 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
1597 ; CHECK: # %bb.0: # %entry
1598 ; CHECK-NEXT: vmv1r.v v11, v0
1599 ; CHECK-NEXT: vmv1r.v v0, v10
1600 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1601 ; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
1602 ; CHECK-NEXT: vmxor.mm v0, v11, v10
1605 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
1606 <vscale x 4 x i1> %0,
1607 <vscale x 4 x i32> %1,
1609 <vscale x 4 x i1> %3,
1612 ret <vscale x 4 x i1> %a
1615 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
1620 define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1621 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32:
1622 ; CHECK: # %bb.0: # %entry
1623 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1624 ; CHECK-NEXT: vmsltu.vx v12, v8, a0
1625 ; CHECK-NEXT: vmnot.m v0, v12
1628 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
1629 <vscale x 8 x i32> %0,
1633 ret <vscale x 8 x i1> %a
1636 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
1643 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1644 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
1645 ; CHECK: # %bb.0: # %entry
1646 ; CHECK-NEXT: vmv1r.v v13, v0
1647 ; CHECK-NEXT: vmv1r.v v0, v12
1648 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1649 ; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
1650 ; CHECK-NEXT: vmxor.mm v0, v13, v12
1653 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
1654 <vscale x 8 x i1> %0,
1655 <vscale x 8 x i32> %1,
1657 <vscale x 8 x i1> %3,
1660 ret <vscale x 8 x i1> %a
1663 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
1668 define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1669 ; RV32-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64:
1670 ; RV32: # %bb.0: # %entry
1671 ; RV32-NEXT: addi sp, sp, -16
1672 ; RV32-NEXT: sw a1, 12(sp)
1673 ; RV32-NEXT: sw a0, 8(sp)
1674 ; RV32-NEXT: addi a0, sp, 8
1675 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1676 ; RV32-NEXT: vlse64.v v9, (a0), zero
1677 ; RV32-NEXT: vmsleu.vv v0, v9, v8
1678 ; RV32-NEXT: addi sp, sp, 16
1681 ; RV64-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64:
1682 ; RV64: # %bb.0: # %entry
1683 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1684 ; RV64-NEXT: vmsltu.vx v8, v8, a0
1685 ; RV64-NEXT: vmnot.m v0, v8
1688 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
1689 <vscale x 1 x i64> %0,
1693 ret <vscale x 1 x i1> %a
1696 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
1703 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1704 ; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
1705 ; RV32: # %bb.0: # %entry
1706 ; RV32-NEXT: addi sp, sp, -16
1707 ; RV32-NEXT: sw a1, 12(sp)
1708 ; RV32-NEXT: sw a0, 8(sp)
1709 ; RV32-NEXT: addi a0, sp, 8
1710 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1711 ; RV32-NEXT: vlse64.v v11, (a0), zero
1712 ; RV32-NEXT: vmv1r.v v10, v0
1713 ; RV32-NEXT: vmv1r.v v0, v9
1714 ; RV32-NEXT: vmsleu.vv v10, v11, v8, v0.t
1715 ; RV32-NEXT: vmv.v.v v0, v10
1716 ; RV32-NEXT: addi sp, sp, 16
1719 ; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
1720 ; RV64: # %bb.0: # %entry
1721 ; RV64-NEXT: vmv1r.v v10, v0
1722 ; RV64-NEXT: vmv1r.v v0, v9
1723 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1724 ; RV64-NEXT: vmsltu.vx v10, v8, a0, v0.t
1725 ; RV64-NEXT: vmxor.mm v0, v10, v9
1728 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
1729 <vscale x 1 x i1> %0,
1730 <vscale x 1 x i64> %1,
1732 <vscale x 1 x i1> %3,
1735 ret <vscale x 1 x i1> %a
1738 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
1743 define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1744 ; RV32-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64:
1745 ; RV32: # %bb.0: # %entry
1746 ; RV32-NEXT: addi sp, sp, -16
1747 ; RV32-NEXT: sw a1, 12(sp)
1748 ; RV32-NEXT: sw a0, 8(sp)
1749 ; RV32-NEXT: addi a0, sp, 8
1750 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1751 ; RV32-NEXT: vlse64.v v10, (a0), zero
1752 ; RV32-NEXT: vmsleu.vv v0, v10, v8
1753 ; RV32-NEXT: addi sp, sp, 16
1756 ; RV64-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64:
1757 ; RV64: # %bb.0: # %entry
1758 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1759 ; RV64-NEXT: vmsltu.vx v10, v8, a0
1760 ; RV64-NEXT: vmnot.m v0, v10
1763 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
1764 <vscale x 2 x i64> %0,
1768 ret <vscale x 2 x i1> %a
1771 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
1778 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1779 ; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
1780 ; RV32: # %bb.0: # %entry
1781 ; RV32-NEXT: addi sp, sp, -16
1782 ; RV32-NEXT: sw a1, 12(sp)
1783 ; RV32-NEXT: sw a0, 8(sp)
1784 ; RV32-NEXT: addi a0, sp, 8
1785 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
1786 ; RV32-NEXT: vlse64.v v12, (a0), zero
1787 ; RV32-NEXT: vmv1r.v v11, v0
1788 ; RV32-NEXT: vmv1r.v v0, v10
1789 ; RV32-NEXT: vmsleu.vv v11, v12, v8, v0.t
1790 ; RV32-NEXT: vmv1r.v v0, v11
1791 ; RV32-NEXT: addi sp, sp, 16
1794 ; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
1795 ; RV64: # %bb.0: # %entry
1796 ; RV64-NEXT: vmv1r.v v11, v0
1797 ; RV64-NEXT: vmv1r.v v0, v10
1798 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
1799 ; RV64-NEXT: vmsltu.vx v11, v8, a0, v0.t
1800 ; RV64-NEXT: vmxor.mm v0, v11, v10
1803 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
1804 <vscale x 2 x i1> %0,
1805 <vscale x 2 x i64> %1,
1807 <vscale x 2 x i1> %3,
1810 ret <vscale x 2 x i1> %a
1813 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
1818 define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
1819 ; RV32-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64:
1820 ; RV32: # %bb.0: # %entry
1821 ; RV32-NEXT: addi sp, sp, -16
1822 ; RV32-NEXT: sw a1, 12(sp)
1823 ; RV32-NEXT: sw a0, 8(sp)
1824 ; RV32-NEXT: addi a0, sp, 8
1825 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1826 ; RV32-NEXT: vlse64.v v12, (a0), zero
1827 ; RV32-NEXT: vmsleu.vv v0, v12, v8
1828 ; RV32-NEXT: addi sp, sp, 16
1831 ; RV64-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64:
1832 ; RV64: # %bb.0: # %entry
1833 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1834 ; RV64-NEXT: vmsltu.vx v12, v8, a0
1835 ; RV64-NEXT: vmnot.m v0, v12
1838 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
1839 <vscale x 4 x i64> %0,
1843 ret <vscale x 4 x i1> %a
1846 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
1853 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1854 ; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
1855 ; RV32: # %bb.0: # %entry
1856 ; RV32-NEXT: addi sp, sp, -16
1857 ; RV32-NEXT: sw a1, 12(sp)
1858 ; RV32-NEXT: sw a0, 8(sp)
1859 ; RV32-NEXT: addi a0, sp, 8
1860 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
1861 ; RV32-NEXT: vlse64.v v16, (a0), zero
1862 ; RV32-NEXT: vmv1r.v v13, v0
1863 ; RV32-NEXT: vmv1r.v v0, v12
1864 ; RV32-NEXT: vmsleu.vv v13, v16, v8, v0.t
1865 ; RV32-NEXT: vmv1r.v v0, v13
1866 ; RV32-NEXT: addi sp, sp, 16
1869 ; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
1870 ; RV64: # %bb.0: # %entry
1871 ; RV64-NEXT: vmv1r.v v13, v0
1872 ; RV64-NEXT: vmv1r.v v0, v12
1873 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
1874 ; RV64-NEXT: vmsltu.vx v13, v8, a0, v0.t
1875 ; RV64-NEXT: vmxor.mm v0, v13, v12
1878 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
1879 <vscale x 4 x i1> %0,
1880 <vscale x 4 x i64> %1,
1882 <vscale x 4 x i1> %3,
1885 ret <vscale x 4 x i1> %a
1888 define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
1889 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8:
1890 ; CHECK: # %bb.0: # %entry
1891 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1892 ; CHECK-NEXT: vmsgtu.vi v0, v8, -16
1895 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
1896 <vscale x 1 x i8> %0,
1900 ret <vscale x 1 x i1> %a
1903 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1904 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8:
1905 ; CHECK: # %bb.0: # %entry
1906 ; CHECK-NEXT: vmv1r.v v10, v0
1907 ; CHECK-NEXT: vmv1r.v v0, v9
1908 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
1909 ; CHECK-NEXT: vmsgtu.vi v10, v8, -15, v0.t
1910 ; CHECK-NEXT: vmv1r.v v0, v10
1913 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
1914 <vscale x 1 x i1> %0,
1915 <vscale x 1 x i8> %1,
1917 <vscale x 1 x i1> %2,
1920 ret <vscale x 1 x i1> %a
1923 define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
1924 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8:
1925 ; CHECK: # %bb.0: # %entry
1926 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1927 ; CHECK-NEXT: vmsgtu.vi v0, v8, -14
1930 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
1931 <vscale x 2 x i8> %0,
1935 ret <vscale x 2 x i1> %a
1938 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1939 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8:
1940 ; CHECK: # %bb.0: # %entry
1941 ; CHECK-NEXT: vmv1r.v v10, v0
1942 ; CHECK-NEXT: vmv1r.v v0, v9
1943 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
1944 ; CHECK-NEXT: vmsgtu.vi v10, v8, -13, v0.t
1945 ; CHECK-NEXT: vmv1r.v v0, v10
1948 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
1949 <vscale x 2 x i1> %0,
1950 <vscale x 2 x i8> %1,
1952 <vscale x 2 x i1> %2,
1955 ret <vscale x 2 x i1> %a
1958 define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
1959 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8:
1960 ; CHECK: # %bb.0: # %entry
1961 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1962 ; CHECK-NEXT: vmsgtu.vi v0, v8, -12
1965 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
1966 <vscale x 4 x i8> %0,
1970 ret <vscale x 4 x i1> %a
1973 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1974 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8:
1975 ; CHECK: # %bb.0: # %entry
1976 ; CHECK-NEXT: vmv1r.v v10, v0
1977 ; CHECK-NEXT: vmv1r.v v0, v9
1978 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
1979 ; CHECK-NEXT: vmsgtu.vi v10, v8, -11, v0.t
1980 ; CHECK-NEXT: vmv1r.v v0, v10
1983 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
1984 <vscale x 4 x i1> %0,
1985 <vscale x 4 x i8> %1,
1987 <vscale x 4 x i1> %2,
1990 ret <vscale x 4 x i1> %a
1993 define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
1994 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8:
1995 ; CHECK: # %bb.0: # %entry
1996 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1997 ; CHECK-NEXT: vmsgtu.vi v0, v8, -10
2000 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
2001 <vscale x 8 x i8> %0,
2005 ret <vscale x 8 x i1> %a
2008 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2009 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8:
2010 ; CHECK: # %bb.0: # %entry
2011 ; CHECK-NEXT: vmv1r.v v10, v0
2012 ; CHECK-NEXT: vmv1r.v v0, v9
2013 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
2014 ; CHECK-NEXT: vmsgtu.vi v10, v8, -9, v0.t
2015 ; CHECK-NEXT: vmv.v.v v0, v10
2018 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
2019 <vscale x 8 x i1> %0,
2020 <vscale x 8 x i8> %1,
2022 <vscale x 8 x i1> %2,
2025 ret <vscale x 8 x i1> %a
2028 define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
2029 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8:
2030 ; CHECK: # %bb.0: # %entry
2031 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2032 ; CHECK-NEXT: vmsgtu.vi v0, v8, -8
2035 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
2036 <vscale x 16 x i8> %0,
2040 ret <vscale x 16 x i1> %a
2043 define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2044 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8:
2045 ; CHECK: # %bb.0: # %entry
2046 ; CHECK-NEXT: vmv1r.v v11, v0
2047 ; CHECK-NEXT: vmv1r.v v0, v10
2048 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
2049 ; CHECK-NEXT: vmsgtu.vi v11, v8, -7, v0.t
2050 ; CHECK-NEXT: vmv1r.v v0, v11
2053 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
2054 <vscale x 16 x i1> %0,
2055 <vscale x 16 x i8> %1,
2057 <vscale x 16 x i1> %2,
2060 ret <vscale x 16 x i1> %a
2063 define <vscale x 32 x i1> @intrinsic_vmsgeu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
2064 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8:
2065 ; CHECK: # %bb.0: # %entry
2066 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2067 ; CHECK-NEXT: vmsgtu.vi v0, v8, -6
2070 %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
2071 <vscale x 32 x i8> %0,
2075 ret <vscale x 32 x i1> %a
2078 define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2079 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8:
2080 ; CHECK: # %bb.0: # %entry
2081 ; CHECK-NEXT: vmv1r.v v13, v0
2082 ; CHECK-NEXT: vmv1r.v v0, v12
2083 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
2084 ; CHECK-NEXT: vmsgtu.vi v13, v8, -5, v0.t
2085 ; CHECK-NEXT: vmv1r.v v0, v13
2088 %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
2089 <vscale x 32 x i1> %0,
2090 <vscale x 32 x i8> %1,
2092 <vscale x 32 x i1> %2,
2095 ret <vscale x 32 x i1> %a
2098 define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
2099 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16:
2100 ; CHECK: # %bb.0: # %entry
2101 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2102 ; CHECK-NEXT: vmsgtu.vi v0, v8, -4
2105 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
2106 <vscale x 1 x i16> %0,
2110 ret <vscale x 1 x i1> %a
2113 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2114 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16:
2115 ; CHECK: # %bb.0: # %entry
2116 ; CHECK-NEXT: vmv1r.v v10, v0
2117 ; CHECK-NEXT: vmv1r.v v0, v9
2118 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
2119 ; CHECK-NEXT: vmsgtu.vi v10, v8, -3, v0.t
2120 ; CHECK-NEXT: vmv1r.v v0, v10
2123 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
2124 <vscale x 1 x i1> %0,
2125 <vscale x 1 x i16> %1,
2127 <vscale x 1 x i1> %2,
2130 ret <vscale x 1 x i1> %a
2133 define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
2134 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16:
2135 ; CHECK: # %bb.0: # %entry
2136 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2137 ; CHECK-NEXT: vmsgtu.vi v0, v8, -2
2140 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
2141 <vscale x 2 x i16> %0,
2145 ret <vscale x 2 x i1> %a
2148 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2149 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16:
2150 ; CHECK: # %bb.0: # %entry
2151 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
2152 ; CHECK-NEXT: vmor.mm v0, v9, v0
2155 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
2156 <vscale x 2 x i1> %0,
2157 <vscale x 2 x i16> %1,
2159 <vscale x 2 x i1> %2,
2162 ret <vscale x 2 x i1> %a
2165 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2166 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff:
2167 ; CHECK: # %bb.0: # %entry
2170 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
2171 <vscale x 2 x i1> %0,
2172 <vscale x 2 x i16> %1,
2174 <vscale x 2 x i1> %0,
2177 ret <vscale x 2 x i1> %a
2180 define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2181 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16:
2182 ; CHECK: # %bb.0: # %entry
2183 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2184 ; CHECK-NEXT: vmset.m v0
2187 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
2188 <vscale x 4 x i16> %0,
2192 ret <vscale x 4 x i1> %a
2195 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2196 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16:
2197 ; CHECK: # %bb.0: # %entry
2198 ; CHECK-NEXT: vmv1r.v v10, v0
2199 ; CHECK-NEXT: vmv1r.v v0, v9
2200 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
2201 ; CHECK-NEXT: vmsgtu.vi v10, v8, 0, v0.t
2202 ; CHECK-NEXT: vmv.v.v v0, v10
2205 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
2206 <vscale x 4 x i1> %0,
2207 <vscale x 4 x i16> %1,
2209 <vscale x 4 x i1> %2,
2212 ret <vscale x 4 x i1> %a
2215 define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
2216 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16:
2217 ; CHECK: # %bb.0: # %entry
2218 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2219 ; CHECK-NEXT: vmsgtu.vi v0, v8, 1
2222 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
2223 <vscale x 8 x i16> %0,
2227 ret <vscale x 8 x i1> %a
2230 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2231 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16:
2232 ; CHECK: # %bb.0: # %entry
2233 ; CHECK-NEXT: vmv1r.v v11, v0
2234 ; CHECK-NEXT: vmv1r.v v0, v10
2235 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
2236 ; CHECK-NEXT: vmsgtu.vi v11, v8, 2, v0.t
2237 ; CHECK-NEXT: vmv1r.v v0, v11
2240 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
2241 <vscale x 8 x i1> %0,
2242 <vscale x 8 x i16> %1,
2244 <vscale x 8 x i1> %2,
2247 ret <vscale x 8 x i1> %a
2250 define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
2251 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16:
2252 ; CHECK: # %bb.0: # %entry
2253 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2254 ; CHECK-NEXT: vmsgtu.vi v0, v8, 3
2257 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
2258 <vscale x 16 x i16> %0,
2262 ret <vscale x 16 x i1> %a
2265 define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2266 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16:
2267 ; CHECK: # %bb.0: # %entry
2268 ; CHECK-NEXT: vmv1r.v v13, v0
2269 ; CHECK-NEXT: vmv1r.v v0, v12
2270 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
2271 ; CHECK-NEXT: vmsgtu.vi v13, v8, 4, v0.t
2272 ; CHECK-NEXT: vmv1r.v v0, v13
2275 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
2276 <vscale x 16 x i1> %0,
2277 <vscale x 16 x i16> %1,
2279 <vscale x 16 x i1> %2,
2282 ret <vscale x 16 x i1> %a
2285 define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
2286 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32:
2287 ; CHECK: # %bb.0: # %entry
2288 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2289 ; CHECK-NEXT: vmsgtu.vi v0, v8, 5
2292 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
2293 <vscale x 1 x i32> %0,
2297 ret <vscale x 1 x i1> %a
2300 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2301 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32:
2302 ; CHECK: # %bb.0: # %entry
2303 ; CHECK-NEXT: vmv1r.v v10, v0
2304 ; CHECK-NEXT: vmv1r.v v0, v9
2305 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
2306 ; CHECK-NEXT: vmsgtu.vi v10, v8, 6, v0.t
2307 ; CHECK-NEXT: vmv1r.v v0, v10
2310 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
2311 <vscale x 1 x i1> %0,
2312 <vscale x 1 x i32> %1,
2314 <vscale x 1 x i1> %2,
2317 ret <vscale x 1 x i1> %a
2320 define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
2321 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32:
2322 ; CHECK: # %bb.0: # %entry
2323 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2324 ; CHECK-NEXT: vmsgtu.vi v0, v8, 7
2327 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
2328 <vscale x 2 x i32> %0,
2332 ret <vscale x 2 x i1> %a
2335 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2336 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32:
2337 ; CHECK: # %bb.0: # %entry
2338 ; CHECK-NEXT: vmv1r.v v10, v0
2339 ; CHECK-NEXT: vmv1r.v v0, v9
2340 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
2341 ; CHECK-NEXT: vmsgtu.vi v10, v8, 8, v0.t
2342 ; CHECK-NEXT: vmv.v.v v0, v10
2345 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
2346 <vscale x 2 x i1> %0,
2347 <vscale x 2 x i32> %1,
2349 <vscale x 2 x i1> %2,
2352 ret <vscale x 2 x i1> %a
2355 define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
2356 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32:
2357 ; CHECK: # %bb.0: # %entry
2358 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2359 ; CHECK-NEXT: vmsgtu.vi v0, v8, 9
2362 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
2363 <vscale x 4 x i32> %0,
2367 ret <vscale x 4 x i1> %a
2370 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2371 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32:
2372 ; CHECK: # %bb.0: # %entry
2373 ; CHECK-NEXT: vmv1r.v v11, v0
2374 ; CHECK-NEXT: vmv1r.v v0, v10
2375 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
2376 ; CHECK-NEXT: vmsgtu.vi v11, v8, 10, v0.t
2377 ; CHECK-NEXT: vmv1r.v v0, v11
2380 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
2381 <vscale x 4 x i1> %0,
2382 <vscale x 4 x i32> %1,
2384 <vscale x 4 x i1> %2,
2387 ret <vscale x 4 x i1> %a
2390 define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
2391 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32:
2392 ; CHECK: # %bb.0: # %entry
2393 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2394 ; CHECK-NEXT: vmsgtu.vi v0, v8, 11
2397 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
2398 <vscale x 8 x i32> %0,
2402 ret <vscale x 8 x i1> %a
2405 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2406 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32:
2407 ; CHECK: # %bb.0: # %entry
2408 ; CHECK-NEXT: vmv1r.v v13, v0
2409 ; CHECK-NEXT: vmv1r.v v0, v12
2410 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
2411 ; CHECK-NEXT: vmsgtu.vi v13, v8, 12, v0.t
2412 ; CHECK-NEXT: vmv1r.v v0, v13
2415 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
2416 <vscale x 8 x i1> %0,
2417 <vscale x 8 x i32> %1,
2419 <vscale x 8 x i1> %2,
2422 ret <vscale x 8 x i1> %a
2425 define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
2426 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64:
2427 ; CHECK: # %bb.0: # %entry
2428 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2429 ; CHECK-NEXT: vmsgtu.vi v0, v8, 13
2432 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
2433 <vscale x 1 x i64> %0,
2437 ret <vscale x 1 x i1> %a
2440 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2441 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64:
2442 ; CHECK: # %bb.0: # %entry
2443 ; CHECK-NEXT: vmv1r.v v10, v0
2444 ; CHECK-NEXT: vmv1r.v v0, v9
2445 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
2446 ; CHECK-NEXT: vmsgtu.vi v10, v8, 14, v0.t
2447 ; CHECK-NEXT: vmv.v.v v0, v10
2450 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
2451 <vscale x 1 x i1> %0,
2452 <vscale x 1 x i64> %1,
2454 <vscale x 1 x i1> %2,
2457 ret <vscale x 1 x i1> %a
2460 define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
2461 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64:
2462 ; CHECK: # %bb.0: # %entry
2463 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2464 ; CHECK-NEXT: vmsgtu.vi v0, v8, 15
2467 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
2468 <vscale x 2 x i64> %0,
2472 ret <vscale x 2 x i1> %a
2475 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2476 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64:
2477 ; CHECK: # %bb.0: # %entry
2478 ; CHECK-NEXT: vmv1r.v v11, v0
2479 ; CHECK-NEXT: vmv1r.v v0, v10
2480 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
2481 ; CHECK-NEXT: vmsgtu.vi v11, v8, -16, v0.t
2482 ; CHECK-NEXT: vmv1r.v v0, v11
2485 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
2486 <vscale x 2 x i1> %0,
2487 <vscale x 2 x i64> %1,
2489 <vscale x 2 x i1> %2,
2492 ret <vscale x 2 x i1> %a
2495 define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
2496 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64:
2497 ; CHECK: # %bb.0: # %entry
2498 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2499 ; CHECK-NEXT: vmsgtu.vi v0, v8, -15
2502 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
2503 <vscale x 4 x i64> %0,
2507 ret <vscale x 4 x i1> %a
2510 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2511 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64:
2512 ; CHECK: # %bb.0: # %entry
2513 ; CHECK-NEXT: vmv1r.v v13, v0
2514 ; CHECK-NEXT: vmv1r.v v0, v12
2515 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
2516 ; CHECK-NEXT: vmsgtu.vi v13, v8, -14, v0.t
2517 ; CHECK-NEXT: vmv1r.v v0, v13
2520 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
2521 <vscale x 4 x i1> %0,
2522 <vscale x 4 x i64> %1,
2524 <vscale x 4 x i1> %2,
2527 ret <vscale x 4 x i1> %a
2530 ; Test cases where the mask and maskedoff are the same value.
2531 define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, iXLen %3) nounwind {
2532 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8:
2533 ; CHECK: # %bb.0: # %entry
2534 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2535 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
2536 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2539 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
2540 <vscale x 1 x i1> %0,
2541 <vscale x 1 x i8> %1,
2543 <vscale x 1 x i1> %0,
2546 ret <vscale x 1 x i1> %a
2549 define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, iXLen %3) nounwind {
2550 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8:
2551 ; CHECK: # %bb.0: # %entry
2552 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2553 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
2554 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2557 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
2558 <vscale x 2 x i1> %0,
2559 <vscale x 2 x i8> %1,
2561 <vscale x 2 x i1> %0,
2564 ret <vscale x 2 x i1> %a
2567 define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, iXLen %3) nounwind {
2568 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8:
2569 ; CHECK: # %bb.0: # %entry
2570 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
2571 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
2572 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2575 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
2576 <vscale x 4 x i1> %0,
2577 <vscale x 4 x i8> %1,
2579 <vscale x 4 x i1> %0,
2582 ret <vscale x 4 x i1> %a
2585 define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, iXLen %3) nounwind {
2586 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8:
2587 ; CHECK: # %bb.0: # %entry
2588 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2589 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
2590 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2593 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
2594 <vscale x 8 x i1> %0,
2595 <vscale x 8 x i8> %1,
2597 <vscale x 8 x i1> %0,
2600 ret <vscale x 8 x i1> %a
2603 define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, iXLen %3) nounwind {
2604 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8:
2605 ; CHECK: # %bb.0: # %entry
2606 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
2607 ; CHECK-NEXT: vmsltu.vx v10, v8, a0
2608 ; CHECK-NEXT: vmandn.mm v0, v0, v10
2611 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
2612 <vscale x 16 x i1> %0,
2613 <vscale x 16 x i8> %1,
2615 <vscale x 16 x i1> %0,
2618 ret <vscale x 16 x i1> %a
2621 define <vscale x 32 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, iXLen %3) nounwind {
2622 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8:
2623 ; CHECK: # %bb.0: # %entry
2624 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
2625 ; CHECK-NEXT: vmsltu.vx v12, v8, a0
2626 ; CHECK-NEXT: vmandn.mm v0, v0, v12
2629 %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
2630 <vscale x 32 x i1> %0,
2631 <vscale x 32 x i8> %1,
2633 <vscale x 32 x i1> %0,
2636 ret <vscale x 32 x i1> %a
2639 define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, iXLen %3) nounwind {
2640 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16:
2641 ; CHECK: # %bb.0: # %entry
2642 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2643 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
2644 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2647 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
2648 <vscale x 1 x i1> %0,
2649 <vscale x 1 x i16> %1,
2651 <vscale x 1 x i1> %0,
2654 ret <vscale x 1 x i1> %a
2657 define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, iXLen %3) nounwind {
2658 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16:
2659 ; CHECK: # %bb.0: # %entry
2660 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2661 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
2662 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2665 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
2666 <vscale x 2 x i1> %0,
2667 <vscale x 2 x i16> %1,
2669 <vscale x 2 x i1> %0,
2672 ret <vscale x 2 x i1> %a
2675 define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, iXLen %3) nounwind {
2676 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16:
2677 ; CHECK: # %bb.0: # %entry
2678 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2679 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
2680 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2683 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
2684 <vscale x 4 x i1> %0,
2685 <vscale x 4 x i16> %1,
2687 <vscale x 4 x i1> %0,
2690 ret <vscale x 4 x i1> %a
2693 define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, iXLen %3) nounwind {
2694 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16:
2695 ; CHECK: # %bb.0: # %entry
2696 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2697 ; CHECK-NEXT: vmsltu.vx v10, v8, a0
2698 ; CHECK-NEXT: vmandn.mm v0, v0, v10
2701 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
2702 <vscale x 8 x i1> %0,
2703 <vscale x 8 x i16> %1,
2705 <vscale x 8 x i1> %0,
2708 ret <vscale x 8 x i1> %a
2711 define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, iXLen %3) nounwind {
2712 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16:
2713 ; CHECK: # %bb.0: # %entry
2714 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2715 ; CHECK-NEXT: vmsltu.vx v12, v8, a0
2716 ; CHECK-NEXT: vmandn.mm v0, v0, v12
2719 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
2720 <vscale x 16 x i1> %0,
2721 <vscale x 16 x i16> %1,
2723 <vscale x 16 x i1> %0,
2726 ret <vscale x 16 x i1> %a
2729 define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, iXLen %3) nounwind {
2730 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32:
2731 ; CHECK: # %bb.0: # %entry
2732 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2733 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
2734 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2737 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
2738 <vscale x 1 x i1> %0,
2739 <vscale x 1 x i32> %1,
2741 <vscale x 1 x i1> %0,
2744 ret <vscale x 1 x i1> %a
2747 define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, iXLen %3) nounwind {
2748 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32:
2749 ; CHECK: # %bb.0: # %entry
2750 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2751 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
2752 ; CHECK-NEXT: vmandn.mm v0, v0, v8
2755 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
2756 <vscale x 2 x i1> %0,
2757 <vscale x 2 x i32> %1,
2759 <vscale x 2 x i1> %0,
2762 ret <vscale x 2 x i1> %a
2765 define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, iXLen %3) nounwind {
2766 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32:
2767 ; CHECK: # %bb.0: # %entry
2768 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2769 ; CHECK-NEXT: vmsltu.vx v10, v8, a0
2770 ; CHECK-NEXT: vmandn.mm v0, v0, v10
2773 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
2774 <vscale x 4 x i1> %0,
2775 <vscale x 4 x i32> %1,
2777 <vscale x 4 x i1> %0,
2780 ret <vscale x 4 x i1> %a
2783 define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, iXLen %3) nounwind {
2784 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32:
2785 ; CHECK: # %bb.0: # %entry
2786 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2787 ; CHECK-NEXT: vmsltu.vx v12, v8, a0
2788 ; CHECK-NEXT: vmandn.mm v0, v0, v12
2791 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
2792 <vscale x 8 x i1> %0,
2793 <vscale x 8 x i32> %1,
2795 <vscale x 8 x i1> %0,
2798 ret <vscale x 8 x i1> %a
2801 define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
2802 ; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64:
2803 ; RV32: # %bb.0: # %entry
2804 ; RV32-NEXT: addi sp, sp, -16
2805 ; RV32-NEXT: sw a1, 12(sp)
2806 ; RV32-NEXT: sw a0, 8(sp)
2807 ; RV32-NEXT: addi a0, sp, 8
2808 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
2809 ; RV32-NEXT: vlse64.v v9, (a0), zero
2810 ; RV32-NEXT: vmsleu.vv v0, v9, v8, v0.t
2811 ; RV32-NEXT: addi sp, sp, 16
2814 ; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64:
2815 ; RV64: # %bb.0: # %entry
2816 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2817 ; RV64-NEXT: vmsltu.vx v8, v8, a0
2818 ; RV64-NEXT: vmandn.mm v0, v0, v8
2821 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
2822 <vscale x 1 x i1> %0,
2823 <vscale x 1 x i64> %1,
2825 <vscale x 1 x i1> %0,
2828 ret <vscale x 1 x i1> %a
2831 define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, iXLen %3) nounwind {
2832 ; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64:
2833 ; RV32: # %bb.0: # %entry
2834 ; RV32-NEXT: addi sp, sp, -16
2835 ; RV32-NEXT: sw a1, 12(sp)
2836 ; RV32-NEXT: sw a0, 8(sp)
2837 ; RV32-NEXT: addi a0, sp, 8
2838 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
2839 ; RV32-NEXT: vlse64.v v12, (a0), zero
2840 ; RV32-NEXT: vmv1r.v v10, v0
2841 ; RV32-NEXT: vmsleu.vv v10, v12, v8, v0.t
2842 ; RV32-NEXT: vmv1r.v v0, v10
2843 ; RV32-NEXT: addi sp, sp, 16
2846 ; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64:
2847 ; RV64: # %bb.0: # %entry
2848 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2849 ; RV64-NEXT: vmsltu.vx v10, v8, a0
2850 ; RV64-NEXT: vmandn.mm v0, v0, v10
2853 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
2854 <vscale x 2 x i1> %0,
2855 <vscale x 2 x i64> %1,
2857 <vscale x 2 x i1> %0,
2860 ret <vscale x 2 x i1> %a
2863 define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, iXLen %3) nounwind {
2864 ; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64:
2865 ; RV32: # %bb.0: # %entry
2866 ; RV32-NEXT: addi sp, sp, -16
2867 ; RV32-NEXT: sw a1, 12(sp)
2868 ; RV32-NEXT: sw a0, 8(sp)
2869 ; RV32-NEXT: addi a0, sp, 8
2870 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2871 ; RV32-NEXT: vlse64.v v16, (a0), zero
2872 ; RV32-NEXT: vmv1r.v v12, v0
2873 ; RV32-NEXT: vmsleu.vv v12, v16, v8, v0.t
2874 ; RV32-NEXT: vmv1r.v v0, v12
2875 ; RV32-NEXT: addi sp, sp, 16
2878 ; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64:
2879 ; RV64: # %bb.0: # %entry
2880 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2881 ; RV64-NEXT: vmsltu.vx v12, v8, a0
2882 ; RV64-NEXT: vmandn.mm v0, v0, v12
2885 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
2886 <vscale x 4 x i1> %0,
2887 <vscale x 4 x i64> %1,
2889 <vscale x 4 x i1> %0,
2892 ret <vscale x 4 x i1> %a