1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
12 define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
13 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i8_nxv1i8:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
16 ; CHECK-NEXT: vmsne.vv v0, v8, v9
19 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
24 ret <vscale x 1 x i1> %a
27 declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
34 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
35 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8:
36 ; CHECK: # %bb.0: # %entry
37 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
38 ; CHECK-NEXT: vmv1r.v v11, v0
39 ; CHECK-NEXT: vmsne.vv v0, v8, v9
40 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
41 ; CHECK-NEXT: vmv1r.v v0, v11
44 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
48 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
52 <vscale x 1 x i1> %mask,
55 ret <vscale x 1 x i1> %a
58 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
63 define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
64 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i8_nxv2i8:
65 ; CHECK: # %bb.0: # %entry
66 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
67 ; CHECK-NEXT: vmsne.vv v0, v8, v9
70 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
75 ret <vscale x 2 x i1> %a
78 declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
85 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
86 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8:
87 ; CHECK: # %bb.0: # %entry
88 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
89 ; CHECK-NEXT: vmv1r.v v11, v0
90 ; CHECK-NEXT: vmsne.vv v0, v8, v9
91 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
92 ; CHECK-NEXT: vmv1r.v v0, v11
95 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
99 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
100 <vscale x 2 x i1> %0,
101 <vscale x 2 x i8> %2,
102 <vscale x 2 x i8> %3,
103 <vscale x 2 x i1> %mask,
106 ret <vscale x 2 x i1> %a
109 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
114 define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
115 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i8_nxv4i8:
116 ; CHECK: # %bb.0: # %entry
117 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
118 ; CHECK-NEXT: vmsne.vv v0, v8, v9
121 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
122 <vscale x 4 x i8> %0,
123 <vscale x 4 x i8> %1,
126 ret <vscale x 4 x i1> %a
129 declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
136 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
137 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8:
138 ; CHECK: # %bb.0: # %entry
139 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
140 ; CHECK-NEXT: vmv1r.v v11, v0
141 ; CHECK-NEXT: vmsne.vv v0, v8, v9
142 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
143 ; CHECK-NEXT: vmv1r.v v0, v11
146 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
147 <vscale x 4 x i8> %1,
148 <vscale x 4 x i8> %2,
150 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
151 <vscale x 4 x i1> %0,
152 <vscale x 4 x i8> %2,
153 <vscale x 4 x i8> %3,
154 <vscale x 4 x i1> %mask,
157 ret <vscale x 4 x i1> %a
160 declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
165 define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
166 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i8_nxv8i8:
167 ; CHECK: # %bb.0: # %entry
168 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
169 ; CHECK-NEXT: vmsne.vv v0, v8, v9
172 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
173 <vscale x 8 x i8> %0,
174 <vscale x 8 x i8> %1,
177 ret <vscale x 8 x i1> %a
180 declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
187 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
188 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8:
189 ; CHECK: # %bb.0: # %entry
190 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
191 ; CHECK-NEXT: vmv1r.v v11, v0
192 ; CHECK-NEXT: vmsne.vv v0, v8, v9
193 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
194 ; CHECK-NEXT: vmv.v.v v0, v11
197 %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
198 <vscale x 8 x i8> %1,
199 <vscale x 8 x i8> %2,
201 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
202 <vscale x 8 x i1> %0,
203 <vscale x 8 x i8> %2,
204 <vscale x 8 x i8> %3,
205 <vscale x 8 x i1> %mask,
208 ret <vscale x 8 x i1> %a
211 declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
216 define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
217 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i8_nxv16i8:
218 ; CHECK: # %bb.0: # %entry
219 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
220 ; CHECK-NEXT: vmsne.vv v0, v8, v10
223 %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
224 <vscale x 16 x i8> %0,
225 <vscale x 16 x i8> %1,
228 ret <vscale x 16 x i1> %a
231 declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
238 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
239 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8:
240 ; CHECK: # %bb.0: # %entry
241 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
242 ; CHECK-NEXT: vmv1r.v v14, v0
243 ; CHECK-NEXT: vmsne.vv v0, v8, v10
244 ; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t
245 ; CHECK-NEXT: vmv1r.v v0, v14
248 %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
249 <vscale x 16 x i8> %1,
250 <vscale x 16 x i8> %2,
252 %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
253 <vscale x 16 x i1> %0,
254 <vscale x 16 x i8> %2,
255 <vscale x 16 x i8> %3,
256 <vscale x 16 x i1> %mask,
259 ret <vscale x 16 x i1> %a
262 declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
267 define <vscale x 32 x i1> @intrinsic_vmsne_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
268 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv32i8_nxv32i8:
269 ; CHECK: # %bb.0: # %entry
270 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
271 ; CHECK-NEXT: vmsne.vv v0, v8, v12
274 %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
275 <vscale x 32 x i8> %0,
276 <vscale x 32 x i8> %1,
279 ret <vscale x 32 x i1> %a
282 declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
289 define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
290 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8:
291 ; CHECK: # %bb.0: # %entry
292 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
293 ; CHECK-NEXT: vmv1r.v v20, v0
294 ; CHECK-NEXT: vmsne.vv v0, v8, v12
295 ; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t
296 ; CHECK-NEXT: vmv1r.v v0, v20
299 %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
300 <vscale x 32 x i8> %1,
301 <vscale x 32 x i8> %2,
303 %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
304 <vscale x 32 x i1> %0,
305 <vscale x 32 x i8> %2,
306 <vscale x 32 x i8> %3,
307 <vscale x 32 x i1> %mask,
310 ret <vscale x 32 x i1> %a
313 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
318 define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
319 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i16_nxv1i16:
320 ; CHECK: # %bb.0: # %entry
321 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
322 ; CHECK-NEXT: vmsne.vv v0, v8, v9
325 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
326 <vscale x 1 x i16> %0,
327 <vscale x 1 x i16> %1,
330 ret <vscale x 1 x i1> %a
333 declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
340 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
341 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16:
342 ; CHECK: # %bb.0: # %entry
343 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
344 ; CHECK-NEXT: vmv1r.v v11, v0
345 ; CHECK-NEXT: vmsne.vv v0, v8, v9
346 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
347 ; CHECK-NEXT: vmv1r.v v0, v11
350 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
351 <vscale x 1 x i16> %1,
352 <vscale x 1 x i16> %2,
354 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
355 <vscale x 1 x i1> %0,
356 <vscale x 1 x i16> %2,
357 <vscale x 1 x i16> %3,
358 <vscale x 1 x i1> %mask,
361 ret <vscale x 1 x i1> %a
364 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
369 define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
370 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i16_nxv2i16:
371 ; CHECK: # %bb.0: # %entry
372 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
373 ; CHECK-NEXT: vmsne.vv v0, v8, v9
376 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
377 <vscale x 2 x i16> %0,
378 <vscale x 2 x i16> %1,
381 ret <vscale x 2 x i1> %a
384 declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
391 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
392 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16:
393 ; CHECK: # %bb.0: # %entry
394 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
395 ; CHECK-NEXT: vmv1r.v v11, v0
396 ; CHECK-NEXT: vmsne.vv v0, v8, v9
397 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
398 ; CHECK-NEXT: vmv1r.v v0, v11
401 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
402 <vscale x 2 x i16> %1,
403 <vscale x 2 x i16> %2,
405 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
406 <vscale x 2 x i1> %0,
407 <vscale x 2 x i16> %2,
408 <vscale x 2 x i16> %3,
409 <vscale x 2 x i1> %mask,
412 ret <vscale x 2 x i1> %a
415 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
420 define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
421 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i16_nxv4i16:
422 ; CHECK: # %bb.0: # %entry
423 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
424 ; CHECK-NEXT: vmsne.vv v0, v8, v9
427 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
428 <vscale x 4 x i16> %0,
429 <vscale x 4 x i16> %1,
432 ret <vscale x 4 x i1> %a
435 declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
442 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
443 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16:
444 ; CHECK: # %bb.0: # %entry
445 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
446 ; CHECK-NEXT: vmv1r.v v11, v0
447 ; CHECK-NEXT: vmsne.vv v0, v8, v9
448 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
449 ; CHECK-NEXT: vmv.v.v v0, v11
452 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
453 <vscale x 4 x i16> %1,
454 <vscale x 4 x i16> %2,
456 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
457 <vscale x 4 x i1> %0,
458 <vscale x 4 x i16> %2,
459 <vscale x 4 x i16> %3,
460 <vscale x 4 x i1> %mask,
463 ret <vscale x 4 x i1> %a
466 declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
471 define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
472 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i16_nxv8i16:
473 ; CHECK: # %bb.0: # %entry
474 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
475 ; CHECK-NEXT: vmsne.vv v0, v8, v10
478 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
479 <vscale x 8 x i16> %0,
480 <vscale x 8 x i16> %1,
483 ret <vscale x 8 x i1> %a
486 declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
493 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
494 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16:
495 ; CHECK: # %bb.0: # %entry
496 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
497 ; CHECK-NEXT: vmv1r.v v14, v0
498 ; CHECK-NEXT: vmsne.vv v0, v8, v10
499 ; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t
500 ; CHECK-NEXT: vmv1r.v v0, v14
503 %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
504 <vscale x 8 x i16> %1,
505 <vscale x 8 x i16> %2,
507 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
508 <vscale x 8 x i1> %0,
509 <vscale x 8 x i16> %2,
510 <vscale x 8 x i16> %3,
511 <vscale x 8 x i1> %mask,
514 ret <vscale x 8 x i1> %a
517 declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
522 define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
523 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i16_nxv16i16:
524 ; CHECK: # %bb.0: # %entry
525 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
526 ; CHECK-NEXT: vmsne.vv v0, v8, v12
529 %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
530 <vscale x 16 x i16> %0,
531 <vscale x 16 x i16> %1,
534 ret <vscale x 16 x i1> %a
537 declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
544 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
545 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16:
546 ; CHECK: # %bb.0: # %entry
547 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
548 ; CHECK-NEXT: vmv1r.v v20, v0
549 ; CHECK-NEXT: vmsne.vv v0, v8, v12
550 ; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t
551 ; CHECK-NEXT: vmv1r.v v0, v20
554 %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
555 <vscale x 16 x i16> %1,
556 <vscale x 16 x i16> %2,
558 %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
559 <vscale x 16 x i1> %0,
560 <vscale x 16 x i16> %2,
561 <vscale x 16 x i16> %3,
562 <vscale x 16 x i1> %mask,
565 ret <vscale x 16 x i1> %a
568 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
573 define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
574 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i32_nxv1i32:
575 ; CHECK: # %bb.0: # %entry
576 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
577 ; CHECK-NEXT: vmsne.vv v0, v8, v9
580 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
581 <vscale x 1 x i32> %0,
582 <vscale x 1 x i32> %1,
585 ret <vscale x 1 x i1> %a
588 declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
595 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
596 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32:
597 ; CHECK: # %bb.0: # %entry
598 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
599 ; CHECK-NEXT: vmv1r.v v11, v0
600 ; CHECK-NEXT: vmsne.vv v0, v8, v9
601 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
602 ; CHECK-NEXT: vmv1r.v v0, v11
605 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
606 <vscale x 1 x i32> %1,
607 <vscale x 1 x i32> %2,
609 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
610 <vscale x 1 x i1> %0,
611 <vscale x 1 x i32> %2,
612 <vscale x 1 x i32> %3,
613 <vscale x 1 x i1> %mask,
616 ret <vscale x 1 x i1> %a
619 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
624 define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
625 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i32_nxv2i32:
626 ; CHECK: # %bb.0: # %entry
627 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
628 ; CHECK-NEXT: vmsne.vv v0, v8, v9
631 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
632 <vscale x 2 x i32> %0,
633 <vscale x 2 x i32> %1,
636 ret <vscale x 2 x i1> %a
639 declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
646 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
647 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32:
648 ; CHECK: # %bb.0: # %entry
649 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
650 ; CHECK-NEXT: vmv1r.v v11, v0
651 ; CHECK-NEXT: vmsne.vv v0, v8, v9
652 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
653 ; CHECK-NEXT: vmv.v.v v0, v11
656 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
657 <vscale x 2 x i32> %1,
658 <vscale x 2 x i32> %2,
660 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
661 <vscale x 2 x i1> %0,
662 <vscale x 2 x i32> %2,
663 <vscale x 2 x i32> %3,
664 <vscale x 2 x i1> %mask,
667 ret <vscale x 2 x i1> %a
670 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
675 define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
676 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i32_nxv4i32:
677 ; CHECK: # %bb.0: # %entry
678 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
679 ; CHECK-NEXT: vmsne.vv v0, v8, v10
682 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
683 <vscale x 4 x i32> %0,
684 <vscale x 4 x i32> %1,
687 ret <vscale x 4 x i1> %a
690 declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
697 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
698 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32:
699 ; CHECK: # %bb.0: # %entry
700 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
701 ; CHECK-NEXT: vmv1r.v v14, v0
702 ; CHECK-NEXT: vmsne.vv v0, v8, v10
703 ; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t
704 ; CHECK-NEXT: vmv1r.v v0, v14
707 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
708 <vscale x 4 x i32> %1,
709 <vscale x 4 x i32> %2,
711 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
712 <vscale x 4 x i1> %0,
713 <vscale x 4 x i32> %2,
714 <vscale x 4 x i32> %3,
715 <vscale x 4 x i1> %mask,
718 ret <vscale x 4 x i1> %a
721 declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
726 define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
727 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i32_nxv8i32:
728 ; CHECK: # %bb.0: # %entry
729 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
730 ; CHECK-NEXT: vmsne.vv v0, v8, v12
733 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
734 <vscale x 8 x i32> %0,
735 <vscale x 8 x i32> %1,
738 ret <vscale x 8 x i1> %a
741 declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
748 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
749 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32:
750 ; CHECK: # %bb.0: # %entry
751 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
752 ; CHECK-NEXT: vmv1r.v v20, v0
753 ; CHECK-NEXT: vmsne.vv v0, v8, v12
754 ; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t
755 ; CHECK-NEXT: vmv1r.v v0, v20
758 %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
759 <vscale x 8 x i32> %1,
760 <vscale x 8 x i32> %2,
762 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
763 <vscale x 8 x i1> %0,
764 <vscale x 8 x i32> %2,
765 <vscale x 8 x i32> %3,
766 <vscale x 8 x i1> %mask,
769 ret <vscale x 8 x i1> %a
772 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
777 define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
778 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i64_nxv1i64:
779 ; CHECK: # %bb.0: # %entry
780 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
781 ; CHECK-NEXT: vmsne.vv v0, v8, v9
784 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
785 <vscale x 1 x i64> %0,
786 <vscale x 1 x i64> %1,
789 ret <vscale x 1 x i1> %a
792 declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
799 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
800 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64:
801 ; CHECK: # %bb.0: # %entry
802 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
803 ; CHECK-NEXT: vmv1r.v v11, v0
804 ; CHECK-NEXT: vmsne.vv v0, v8, v9
805 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
806 ; CHECK-NEXT: vmv.v.v v0, v11
809 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
810 <vscale x 1 x i64> %1,
811 <vscale x 1 x i64> %2,
813 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
814 <vscale x 1 x i1> %0,
815 <vscale x 1 x i64> %2,
816 <vscale x 1 x i64> %3,
817 <vscale x 1 x i1> %mask,
820 ret <vscale x 1 x i1> %a
823 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
828 define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
829 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i64_nxv2i64:
830 ; CHECK: # %bb.0: # %entry
831 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
832 ; CHECK-NEXT: vmsne.vv v0, v8, v10
835 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
836 <vscale x 2 x i64> %0,
837 <vscale x 2 x i64> %1,
840 ret <vscale x 2 x i1> %a
843 declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
850 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
851 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64:
852 ; CHECK: # %bb.0: # %entry
853 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
854 ; CHECK-NEXT: vmv1r.v v14, v0
855 ; CHECK-NEXT: vmsne.vv v0, v8, v10
856 ; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t
857 ; CHECK-NEXT: vmv1r.v v0, v14
860 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
861 <vscale x 2 x i64> %1,
862 <vscale x 2 x i64> %2,
864 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
865 <vscale x 2 x i1> %0,
866 <vscale x 2 x i64> %2,
867 <vscale x 2 x i64> %3,
868 <vscale x 2 x i1> %mask,
871 ret <vscale x 2 x i1> %a
874 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
879 define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
880 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i64_nxv4i64:
881 ; CHECK: # %bb.0: # %entry
882 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
883 ; CHECK-NEXT: vmsne.vv v0, v8, v12
886 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
887 <vscale x 4 x i64> %0,
888 <vscale x 4 x i64> %1,
891 ret <vscale x 4 x i1> %a
894 declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
901 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
902 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64:
903 ; CHECK: # %bb.0: # %entry
904 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
905 ; CHECK-NEXT: vmv1r.v v20, v0
906 ; CHECK-NEXT: vmsne.vv v0, v8, v12
907 ; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t
908 ; CHECK-NEXT: vmv1r.v v0, v20
911 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
912 <vscale x 4 x i64> %1,
913 <vscale x 4 x i64> %2,
915 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
916 <vscale x 4 x i1> %0,
917 <vscale x 4 x i64> %2,
918 <vscale x 4 x i64> %3,
919 <vscale x 4 x i1> %mask,
922 ret <vscale x 4 x i1> %a
925 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
930 define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
931 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i8_i8:
932 ; CHECK: # %bb.0: # %entry
933 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
934 ; CHECK-NEXT: vmsne.vx v0, v8, a0
937 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
938 <vscale x 1 x i8> %0,
942 ret <vscale x 1 x i1> %a
945 declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
952 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
953 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
954 ; CHECK: # %bb.0: # %entry
955 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
956 ; CHECK-NEXT: vmv1r.v v10, v0
957 ; CHECK-NEXT: vmv1r.v v0, v9
958 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
959 ; CHECK-NEXT: vmv1r.v v0, v10
962 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
963 <vscale x 1 x i1> %0,
964 <vscale x 1 x i8> %1,
966 <vscale x 1 x i1> %3,
969 ret <vscale x 1 x i1> %a
972 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
977 define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
978 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i8_i8:
979 ; CHECK: # %bb.0: # %entry
980 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
981 ; CHECK-NEXT: vmsne.vx v0, v8, a0
984 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
985 <vscale x 2 x i8> %0,
989 ret <vscale x 2 x i1> %a
992 declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
999 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1000 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
1001 ; CHECK: # %bb.0: # %entry
1002 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1003 ; CHECK-NEXT: vmv1r.v v10, v0
1004 ; CHECK-NEXT: vmv1r.v v0, v9
1005 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
1006 ; CHECK-NEXT: vmv1r.v v0, v10
1009 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
1010 <vscale x 2 x i1> %0,
1011 <vscale x 2 x i8> %1,
1013 <vscale x 2 x i1> %3,
1016 ret <vscale x 2 x i1> %a
1019 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
1024 define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1025 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i8_i8:
1026 ; CHECK: # %bb.0: # %entry
1027 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1028 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1031 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
1032 <vscale x 4 x i8> %0,
1036 ret <vscale x 4 x i1> %a
1039 declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
1046 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1047 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
1048 ; CHECK: # %bb.0: # %entry
1049 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1050 ; CHECK-NEXT: vmv1r.v v10, v0
1051 ; CHECK-NEXT: vmv1r.v v0, v9
1052 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
1053 ; CHECK-NEXT: vmv1r.v v0, v10
1056 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
1057 <vscale x 4 x i1> %0,
1058 <vscale x 4 x i8> %1,
1060 <vscale x 4 x i1> %3,
1063 ret <vscale x 4 x i1> %a
1066 declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
1071 define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1072 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i8_i8:
1073 ; CHECK: # %bb.0: # %entry
1074 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1075 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1078 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
1079 <vscale x 8 x i8> %0,
1083 ret <vscale x 8 x i1> %a
1086 declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
1093 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1094 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
1095 ; CHECK: # %bb.0: # %entry
1096 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1097 ; CHECK-NEXT: vmv1r.v v10, v0
1098 ; CHECK-NEXT: vmv1r.v v0, v9
1099 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
1100 ; CHECK-NEXT: vmv.v.v v0, v10
1103 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
1104 <vscale x 8 x i1> %0,
1105 <vscale x 8 x i8> %1,
1107 <vscale x 8 x i1> %3,
1110 ret <vscale x 8 x i1> %a
1113 declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
1118 define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1119 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i8_i8:
1120 ; CHECK: # %bb.0: # %entry
1121 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1122 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1125 %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
1126 <vscale x 16 x i8> %0,
1130 ret <vscale x 16 x i1> %a
1133 declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
1140 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1141 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
1142 ; CHECK: # %bb.0: # %entry
1143 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1144 ; CHECK-NEXT: vmv1r.v v11, v0
1145 ; CHECK-NEXT: vmv1r.v v0, v10
1146 ; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t
1147 ; CHECK-NEXT: vmv1r.v v0, v11
1150 %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
1151 <vscale x 16 x i1> %0,
1152 <vscale x 16 x i8> %1,
1154 <vscale x 16 x i1> %3,
1157 ret <vscale x 16 x i1> %a
1160 declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
1165 define <vscale x 32 x i1> @intrinsic_vmsne_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1166 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv32i8_i8:
1167 ; CHECK: # %bb.0: # %entry
1168 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1169 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1172 %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
1173 <vscale x 32 x i8> %0,
1177 ret <vscale x 32 x i1> %a
1180 declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
1187 define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1188 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
1189 ; CHECK: # %bb.0: # %entry
1190 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1191 ; CHECK-NEXT: vmv1r.v v13, v0
1192 ; CHECK-NEXT: vmv1r.v v0, v12
1193 ; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t
1194 ; CHECK-NEXT: vmv1r.v v0, v13
1197 %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
1198 <vscale x 32 x i1> %0,
1199 <vscale x 32 x i8> %1,
1201 <vscale x 32 x i1> %3,
1204 ret <vscale x 32 x i1> %a
1207 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
1212 define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1213 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i16_i16:
1214 ; CHECK: # %bb.0: # %entry
1215 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1216 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1219 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
1220 <vscale x 1 x i16> %0,
1224 ret <vscale x 1 x i1> %a
1227 declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
1234 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1235 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
1236 ; CHECK: # %bb.0: # %entry
1237 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1238 ; CHECK-NEXT: vmv1r.v v10, v0
1239 ; CHECK-NEXT: vmv1r.v v0, v9
1240 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
1241 ; CHECK-NEXT: vmv1r.v v0, v10
1244 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
1245 <vscale x 1 x i1> %0,
1246 <vscale x 1 x i16> %1,
1248 <vscale x 1 x i1> %3,
1251 ret <vscale x 1 x i1> %a
1254 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
1259 define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1260 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i16_i16:
1261 ; CHECK: # %bb.0: # %entry
1262 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1263 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1266 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
1267 <vscale x 2 x i16> %0,
1271 ret <vscale x 2 x i1> %a
1274 declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
1281 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1282 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
1283 ; CHECK: # %bb.0: # %entry
1284 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1285 ; CHECK-NEXT: vmv1r.v v10, v0
1286 ; CHECK-NEXT: vmv1r.v v0, v9
1287 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
1288 ; CHECK-NEXT: vmv1r.v v0, v10
1291 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
1292 <vscale x 2 x i1> %0,
1293 <vscale x 2 x i16> %1,
1295 <vscale x 2 x i1> %3,
1298 ret <vscale x 2 x i1> %a
1301 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
1306 define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1307 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i16_i16:
1308 ; CHECK: # %bb.0: # %entry
1309 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1310 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1313 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
1314 <vscale x 4 x i16> %0,
1318 ret <vscale x 4 x i1> %a
1321 declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
1328 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1329 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
1330 ; CHECK: # %bb.0: # %entry
1331 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1332 ; CHECK-NEXT: vmv1r.v v10, v0
1333 ; CHECK-NEXT: vmv1r.v v0, v9
1334 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
1335 ; CHECK-NEXT: vmv.v.v v0, v10
1338 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
1339 <vscale x 4 x i1> %0,
1340 <vscale x 4 x i16> %1,
1342 <vscale x 4 x i1> %3,
1345 ret <vscale x 4 x i1> %a
1348 declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
1353 define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1354 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i16_i16:
1355 ; CHECK: # %bb.0: # %entry
1356 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1357 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1360 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
1361 <vscale x 8 x i16> %0,
1365 ret <vscale x 8 x i1> %a
1368 declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
1375 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1376 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
1377 ; CHECK: # %bb.0: # %entry
1378 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1379 ; CHECK-NEXT: vmv1r.v v11, v0
1380 ; CHECK-NEXT: vmv1r.v v0, v10
1381 ; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t
1382 ; CHECK-NEXT: vmv1r.v v0, v11
1385 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
1386 <vscale x 8 x i1> %0,
1387 <vscale x 8 x i16> %1,
1389 <vscale x 8 x i1> %3,
1392 ret <vscale x 8 x i1> %a
1395 declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
1396 <vscale x 16 x i16>,
1400 define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1401 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i16_i16:
1402 ; CHECK: # %bb.0: # %entry
1403 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1404 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1407 %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
1408 <vscale x 16 x i16> %0,
1412 ret <vscale x 16 x i1> %a
1415 declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
1417 <vscale x 16 x i16>,
1422 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1423 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
1424 ; CHECK: # %bb.0: # %entry
1425 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1426 ; CHECK-NEXT: vmv1r.v v13, v0
1427 ; CHECK-NEXT: vmv1r.v v0, v12
1428 ; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t
1429 ; CHECK-NEXT: vmv1r.v v0, v13
1432 %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
1433 <vscale x 16 x i1> %0,
1434 <vscale x 16 x i16> %1,
1436 <vscale x 16 x i1> %3,
1439 ret <vscale x 16 x i1> %a
1442 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
1447 define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1448 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i32_i32:
1449 ; CHECK: # %bb.0: # %entry
1450 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1451 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1454 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
1455 <vscale x 1 x i32> %0,
1459 ret <vscale x 1 x i1> %a
1462 declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
1469 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1470 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
1471 ; CHECK: # %bb.0: # %entry
1472 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1473 ; CHECK-NEXT: vmv1r.v v10, v0
1474 ; CHECK-NEXT: vmv1r.v v0, v9
1475 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
1476 ; CHECK-NEXT: vmv1r.v v0, v10
1479 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
1480 <vscale x 1 x i1> %0,
1481 <vscale x 1 x i32> %1,
1483 <vscale x 1 x i1> %3,
1486 ret <vscale x 1 x i1> %a
1489 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
1494 define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1495 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i32_i32:
1496 ; CHECK: # %bb.0: # %entry
1497 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1498 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1501 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
1502 <vscale x 2 x i32> %0,
1506 ret <vscale x 2 x i1> %a
1509 declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
1516 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1517 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
1518 ; CHECK: # %bb.0: # %entry
1519 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1520 ; CHECK-NEXT: vmv1r.v v10, v0
1521 ; CHECK-NEXT: vmv1r.v v0, v9
1522 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
1523 ; CHECK-NEXT: vmv.v.v v0, v10
1526 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
1527 <vscale x 2 x i1> %0,
1528 <vscale x 2 x i32> %1,
1530 <vscale x 2 x i1> %3,
1533 ret <vscale x 2 x i1> %a
1536 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
1541 define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1542 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i32_i32:
1543 ; CHECK: # %bb.0: # %entry
1544 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1545 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1548 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
1549 <vscale x 4 x i32> %0,
1553 ret <vscale x 4 x i1> %a
1556 declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
1563 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1564 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
1565 ; CHECK: # %bb.0: # %entry
1566 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1567 ; CHECK-NEXT: vmv1r.v v11, v0
1568 ; CHECK-NEXT: vmv1r.v v0, v10
1569 ; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t
1570 ; CHECK-NEXT: vmv1r.v v0, v11
1573 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
1574 <vscale x 4 x i1> %0,
1575 <vscale x 4 x i32> %1,
1577 <vscale x 4 x i1> %3,
1580 ret <vscale x 4 x i1> %a
1583 declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
1588 define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1589 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i32_i32:
1590 ; CHECK: # %bb.0: # %entry
1591 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1592 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1595 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
1596 <vscale x 8 x i32> %0,
1600 ret <vscale x 8 x i1> %a
1603 declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
1610 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1611 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
1612 ; CHECK: # %bb.0: # %entry
1613 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1614 ; CHECK-NEXT: vmv1r.v v13, v0
1615 ; CHECK-NEXT: vmv1r.v v0, v12
1616 ; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t
1617 ; CHECK-NEXT: vmv1r.v v0, v13
1620 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
1621 <vscale x 8 x i1> %0,
1622 <vscale x 8 x i32> %1,
1624 <vscale x 8 x i1> %3,
1627 ret <vscale x 8 x i1> %a
1630 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
1635 define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1636 ; RV32-LABEL: intrinsic_vmsne_vx_nxv1i64_i64:
1637 ; RV32: # %bb.0: # %entry
1638 ; RV32-NEXT: addi sp, sp, -16
1639 ; RV32-NEXT: sw a0, 8(sp)
1640 ; RV32-NEXT: sw a1, 12(sp)
1641 ; RV32-NEXT: addi a0, sp, 8
1642 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1643 ; RV32-NEXT: vlse64.v v9, (a0), zero
1644 ; RV32-NEXT: vmsne.vv v0, v8, v9
1645 ; RV32-NEXT: addi sp, sp, 16
1648 ; RV64-LABEL: intrinsic_vmsne_vx_nxv1i64_i64:
1649 ; RV64: # %bb.0: # %entry
1650 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1651 ; RV64-NEXT: vmsne.vx v0, v8, a0
1654 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
1655 <vscale x 1 x i64> %0,
1659 ret <vscale x 1 x i1> %a
1662 declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
1669 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1670 ; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
1671 ; RV32: # %bb.0: # %entry
1672 ; RV32-NEXT: addi sp, sp, -16
1673 ; RV32-NEXT: sw a0, 8(sp)
1674 ; RV32-NEXT: sw a1, 12(sp)
1675 ; RV32-NEXT: addi a0, sp, 8
1676 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1677 ; RV32-NEXT: vlse64.v v11, (a0), zero
1678 ; RV32-NEXT: vmv1r.v v10, v0
1679 ; RV32-NEXT: vmv1r.v v0, v9
1680 ; RV32-NEXT: vmsne.vv v10, v8, v11, v0.t
1681 ; RV32-NEXT: vmv.v.v v0, v10
1682 ; RV32-NEXT: addi sp, sp, 16
1685 ; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
1686 ; RV64: # %bb.0: # %entry
1687 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1688 ; RV64-NEXT: vmv1r.v v10, v0
1689 ; RV64-NEXT: vmv1r.v v0, v9
1690 ; RV64-NEXT: vmsne.vx v10, v8, a0, v0.t
1691 ; RV64-NEXT: vmv.v.v v0, v10
1694 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
1695 <vscale x 1 x i1> %0,
1696 <vscale x 1 x i64> %1,
1698 <vscale x 1 x i1> %3,
1701 ret <vscale x 1 x i1> %a
1704 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
1709 define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1710 ; RV32-LABEL: intrinsic_vmsne_vx_nxv2i64_i64:
1711 ; RV32: # %bb.0: # %entry
1712 ; RV32-NEXT: addi sp, sp, -16
1713 ; RV32-NEXT: sw a0, 8(sp)
1714 ; RV32-NEXT: sw a1, 12(sp)
1715 ; RV32-NEXT: addi a0, sp, 8
1716 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1717 ; RV32-NEXT: vlse64.v v10, (a0), zero
1718 ; RV32-NEXT: vmsne.vv v0, v8, v10
1719 ; RV32-NEXT: addi sp, sp, 16
1722 ; RV64-LABEL: intrinsic_vmsne_vx_nxv2i64_i64:
1723 ; RV64: # %bb.0: # %entry
1724 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1725 ; RV64-NEXT: vmsne.vx v0, v8, a0
1728 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
1729 <vscale x 2 x i64> %0,
1733 ret <vscale x 2 x i1> %a
1736 declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
1743 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1744 ; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
1745 ; RV32: # %bb.0: # %entry
1746 ; RV32-NEXT: addi sp, sp, -16
1747 ; RV32-NEXT: sw a0, 8(sp)
1748 ; RV32-NEXT: sw a1, 12(sp)
1749 ; RV32-NEXT: addi a0, sp, 8
1750 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
1751 ; RV32-NEXT: vlse64.v v12, (a0), zero
1752 ; RV32-NEXT: vmv1r.v v11, v0
1753 ; RV32-NEXT: vmv1r.v v0, v10
1754 ; RV32-NEXT: vmsne.vv v11, v8, v12, v0.t
1755 ; RV32-NEXT: vmv1r.v v0, v11
1756 ; RV32-NEXT: addi sp, sp, 16
1759 ; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
1760 ; RV64: # %bb.0: # %entry
1761 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
1762 ; RV64-NEXT: vmv1r.v v11, v0
1763 ; RV64-NEXT: vmv1r.v v0, v10
1764 ; RV64-NEXT: vmsne.vx v11, v8, a0, v0.t
1765 ; RV64-NEXT: vmv1r.v v0, v11
1768 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
1769 <vscale x 2 x i1> %0,
1770 <vscale x 2 x i64> %1,
1772 <vscale x 2 x i1> %3,
1775 ret <vscale x 2 x i1> %a
1778 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
1783 define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
1784 ; RV32-LABEL: intrinsic_vmsne_vx_nxv4i64_i64:
1785 ; RV32: # %bb.0: # %entry
1786 ; RV32-NEXT: addi sp, sp, -16
1787 ; RV32-NEXT: sw a0, 8(sp)
1788 ; RV32-NEXT: sw a1, 12(sp)
1789 ; RV32-NEXT: addi a0, sp, 8
1790 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1791 ; RV32-NEXT: vlse64.v v12, (a0), zero
1792 ; RV32-NEXT: vmsne.vv v0, v8, v12
1793 ; RV32-NEXT: addi sp, sp, 16
1796 ; RV64-LABEL: intrinsic_vmsne_vx_nxv4i64_i64:
1797 ; RV64: # %bb.0: # %entry
1798 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1799 ; RV64-NEXT: vmsne.vx v0, v8, a0
1802 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
1803 <vscale x 4 x i64> %0,
1807 ret <vscale x 4 x i1> %a
1810 declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
1817 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1818 ; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
1819 ; RV32: # %bb.0: # %entry
1820 ; RV32-NEXT: addi sp, sp, -16
1821 ; RV32-NEXT: sw a0, 8(sp)
1822 ; RV32-NEXT: sw a1, 12(sp)
1823 ; RV32-NEXT: addi a0, sp, 8
1824 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
1825 ; RV32-NEXT: vlse64.v v16, (a0), zero
1826 ; RV32-NEXT: vmv1r.v v13, v0
1827 ; RV32-NEXT: vmv1r.v v0, v12
1828 ; RV32-NEXT: vmsne.vv v13, v8, v16, v0.t
1829 ; RV32-NEXT: vmv1r.v v0, v13
1830 ; RV32-NEXT: addi sp, sp, 16
1833 ; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
1834 ; RV64: # %bb.0: # %entry
1835 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
1836 ; RV64-NEXT: vmv1r.v v13, v0
1837 ; RV64-NEXT: vmv1r.v v0, v12
1838 ; RV64-NEXT: vmsne.vx v13, v8, a0, v0.t
1839 ; RV64-NEXT: vmv1r.v v0, v13
1842 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
1843 <vscale x 4 x i1> %0,
1844 <vscale x 4 x i64> %1,
1846 <vscale x 4 x i1> %3,
1849 ret <vscale x 4 x i1> %a
1852 define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
1853 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i8_i8:
1854 ; CHECK: # %bb.0: # %entry
1855 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1856 ; CHECK-NEXT: vmsne.vi v0, v8, 9
1859 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
1860 <vscale x 1 x i8> %0,
1864 ret <vscale x 1 x i1> %a
1867 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1868 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8:
1869 ; CHECK: # %bb.0: # %entry
1870 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
1871 ; CHECK-NEXT: vmv1r.v v10, v0
1872 ; CHECK-NEXT: vmv1r.v v0, v9
1873 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
1874 ; CHECK-NEXT: vmv1r.v v0, v10
1877 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
1878 <vscale x 1 x i1> %0,
1879 <vscale x 1 x i8> %1,
1881 <vscale x 1 x i1> %2,
1884 ret <vscale x 1 x i1> %a
1887 define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
1888 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i8_i8:
1889 ; CHECK: # %bb.0: # %entry
1890 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1891 ; CHECK-NEXT: vmsne.vi v0, v8, 9
1894 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
1895 <vscale x 2 x i8> %0,
1899 ret <vscale x 2 x i1> %a
1902 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1903 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8:
1904 ; CHECK: # %bb.0: # %entry
1905 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
1906 ; CHECK-NEXT: vmv1r.v v10, v0
1907 ; CHECK-NEXT: vmv1r.v v0, v9
1908 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
1909 ; CHECK-NEXT: vmv1r.v v0, v10
1912 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
1913 <vscale x 2 x i1> %0,
1914 <vscale x 2 x i8> %1,
1916 <vscale x 2 x i1> %2,
1919 ret <vscale x 2 x i1> %a
1922 define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
1923 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i8_i8:
1924 ; CHECK: # %bb.0: # %entry
1925 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1926 ; CHECK-NEXT: vmsne.vi v0, v8, 9
1929 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
1930 <vscale x 4 x i8> %0,
1934 ret <vscale x 4 x i1> %a
1937 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1938 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8:
1939 ; CHECK: # %bb.0: # %entry
1940 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
1941 ; CHECK-NEXT: vmv1r.v v10, v0
1942 ; CHECK-NEXT: vmv1r.v v0, v9
1943 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
1944 ; CHECK-NEXT: vmv1r.v v0, v10
1947 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
1948 <vscale x 4 x i1> %0,
1949 <vscale x 4 x i8> %1,
1951 <vscale x 4 x i1> %2,
1954 ret <vscale x 4 x i1> %a
1957 define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
1958 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i8_i8:
1959 ; CHECK: # %bb.0: # %entry
1960 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1961 ; CHECK-NEXT: vmsne.vi v0, v8, 9
1964 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
1965 <vscale x 8 x i8> %0,
1969 ret <vscale x 8 x i1> %a
1972 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1973 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8:
1974 ; CHECK: # %bb.0: # %entry
1975 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
1976 ; CHECK-NEXT: vmv1r.v v10, v0
1977 ; CHECK-NEXT: vmv1r.v v0, v9
1978 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
1979 ; CHECK-NEXT: vmv.v.v v0, v10
1982 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
1983 <vscale x 8 x i1> %0,
1984 <vscale x 8 x i8> %1,
1986 <vscale x 8 x i1> %2,
1989 ret <vscale x 8 x i1> %a
1992 define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
1993 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i8_i8:
1994 ; CHECK: # %bb.0: # %entry
1995 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
1996 ; CHECK-NEXT: vmsne.vi v0, v8, 9
1999 %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
2000 <vscale x 16 x i8> %0,
2004 ret <vscale x 16 x i1> %a
2007 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2008 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8:
2009 ; CHECK: # %bb.0: # %entry
2010 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
2011 ; CHECK-NEXT: vmv1r.v v11, v0
2012 ; CHECK-NEXT: vmv1r.v v0, v10
2013 ; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
2014 ; CHECK-NEXT: vmv1r.v v0, v11
2017 %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
2018 <vscale x 16 x i1> %0,
2019 <vscale x 16 x i8> %1,
2021 <vscale x 16 x i1> %2,
2024 ret <vscale x 16 x i1> %a
2027 define <vscale x 32 x i1> @intrinsic_vmsne_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
2028 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv32i8_i8:
2029 ; CHECK: # %bb.0: # %entry
2030 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2031 ; CHECK-NEXT: vmsne.vi v0, v8, 9
2034 %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
2035 <vscale x 32 x i8> %0,
2039 ret <vscale x 32 x i1> %a
2042 define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2043 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8:
2044 ; CHECK: # %bb.0: # %entry
2045 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
2046 ; CHECK-NEXT: vmv1r.v v13, v0
2047 ; CHECK-NEXT: vmv1r.v v0, v12
2048 ; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
2049 ; CHECK-NEXT: vmv1r.v v0, v13
2052 %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
2053 <vscale x 32 x i1> %0,
2054 <vscale x 32 x i8> %1,
2056 <vscale x 32 x i1> %2,
2059 ret <vscale x 32 x i1> %a
2062 define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
2063 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i16_i16:
2064 ; CHECK: # %bb.0: # %entry
2065 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2066 ; CHECK-NEXT: vmsne.vi v0, v8, 9
2069 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
2070 <vscale x 1 x i16> %0,
2074 ret <vscale x 1 x i1> %a
2077 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2078 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16:
2079 ; CHECK: # %bb.0: # %entry
2080 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
2081 ; CHECK-NEXT: vmv1r.v v10, v0
2082 ; CHECK-NEXT: vmv1r.v v0, v9
2083 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
2084 ; CHECK-NEXT: vmv1r.v v0, v10
2087 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
2088 <vscale x 1 x i1> %0,
2089 <vscale x 1 x i16> %1,
2091 <vscale x 1 x i1> %2,
2094 ret <vscale x 1 x i1> %a
2097 define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
2098 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i16_i16:
2099 ; CHECK: # %bb.0: # %entry
2100 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2101 ; CHECK-NEXT: vmsne.vi v0, v8, 9
2104 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
2105 <vscale x 2 x i16> %0,
2109 ret <vscale x 2 x i1> %a
2112 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2113 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16:
2114 ; CHECK: # %bb.0: # %entry
2115 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
2116 ; CHECK-NEXT: vmv1r.v v10, v0
2117 ; CHECK-NEXT: vmv1r.v v0, v9
2118 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
2119 ; CHECK-NEXT: vmv1r.v v0, v10
2122 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
2123 <vscale x 2 x i1> %0,
2124 <vscale x 2 x i16> %1,
2126 <vscale x 2 x i1> %2,
2129 ret <vscale x 2 x i1> %a
2132 define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2133 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i16_i16:
2134 ; CHECK: # %bb.0: # %entry
2135 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2136 ; CHECK-NEXT: vmsne.vi v0, v8, 9
2139 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
2140 <vscale x 4 x i16> %0,
2144 ret <vscale x 4 x i1> %a
2147 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2148 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16:
2149 ; CHECK: # %bb.0: # %entry
2150 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
2151 ; CHECK-NEXT: vmv1r.v v10, v0
2152 ; CHECK-NEXT: vmv1r.v v0, v9
2153 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
2154 ; CHECK-NEXT: vmv.v.v v0, v10
2157 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
2158 <vscale x 4 x i1> %0,
2159 <vscale x 4 x i16> %1,
2161 <vscale x 4 x i1> %2,
2164 ret <vscale x 4 x i1> %a
2167 define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
2168 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i16_i16:
2169 ; CHECK: # %bb.0: # %entry
2170 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2171 ; CHECK-NEXT: vmsne.vi v0, v8, 9
2174 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
2175 <vscale x 8 x i16> %0,
2179 ret <vscale x 8 x i1> %a
2182 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2183 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16:
2184 ; CHECK: # %bb.0: # %entry
2185 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
2186 ; CHECK-NEXT: vmv1r.v v11, v0
2187 ; CHECK-NEXT: vmv1r.v v0, v10
2188 ; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
2189 ; CHECK-NEXT: vmv1r.v v0, v11
2192 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
2193 <vscale x 8 x i1> %0,
2194 <vscale x 8 x i16> %1,
2196 <vscale x 8 x i1> %2,
2199 ret <vscale x 8 x i1> %a
2202 define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
2203 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i16_i16:
2204 ; CHECK: # %bb.0: # %entry
2205 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2206 ; CHECK-NEXT: vmsne.vi v0, v8, 9
2209 %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
2210 <vscale x 16 x i16> %0,
2214 ret <vscale x 16 x i1> %a
2217 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2218 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16:
2219 ; CHECK: # %bb.0: # %entry
2220 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
2221 ; CHECK-NEXT: vmv1r.v v13, v0
2222 ; CHECK-NEXT: vmv1r.v v0, v12
2223 ; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
2224 ; CHECK-NEXT: vmv1r.v v0, v13
2227 %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
2228 <vscale x 16 x i1> %0,
2229 <vscale x 16 x i16> %1,
2231 <vscale x 16 x i1> %2,
2234 ret <vscale x 16 x i1> %a
2237 define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
2238 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i32_i32:
2239 ; CHECK: # %bb.0: # %entry
2240 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2241 ; CHECK-NEXT: vmsne.vi v0, v8, 9
2244 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
2245 <vscale x 1 x i32> %0,
2249 ret <vscale x 1 x i1> %a
2252 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2253 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32:
2254 ; CHECK: # %bb.0: # %entry
2255 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
2256 ; CHECK-NEXT: vmv1r.v v10, v0
2257 ; CHECK-NEXT: vmv1r.v v0, v9
2258 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
2259 ; CHECK-NEXT: vmv1r.v v0, v10
2262 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
2263 <vscale x 1 x i1> %0,
2264 <vscale x 1 x i32> %1,
2266 <vscale x 1 x i1> %2,
2269 ret <vscale x 1 x i1> %a
2272 define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
2273 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i32_i32:
2274 ; CHECK: # %bb.0: # %entry
2275 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2276 ; CHECK-NEXT: vmsne.vi v0, v8, 9
2279 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
2280 <vscale x 2 x i32> %0,
2284 ret <vscale x 2 x i1> %a
2287 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2288 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32:
2289 ; CHECK: # %bb.0: # %entry
2290 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
2291 ; CHECK-NEXT: vmv1r.v v10, v0
2292 ; CHECK-NEXT: vmv1r.v v0, v9
2293 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
2294 ; CHECK-NEXT: vmv.v.v v0, v10
2297 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
2298 <vscale x 2 x i1> %0,
2299 <vscale x 2 x i32> %1,
2301 <vscale x 2 x i1> %2,
2304 ret <vscale x 2 x i1> %a
2307 define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
2308 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i32_i32:
2309 ; CHECK: # %bb.0: # %entry
2310 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2311 ; CHECK-NEXT: vmsne.vi v0, v8, 9
2314 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
2315 <vscale x 4 x i32> %0,
2319 ret <vscale x 4 x i1> %a
2322 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2323 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32:
2324 ; CHECK: # %bb.0: # %entry
2325 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
2326 ; CHECK-NEXT: vmv1r.v v11, v0
2327 ; CHECK-NEXT: vmv1r.v v0, v10
2328 ; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
2329 ; CHECK-NEXT: vmv1r.v v0, v11
2332 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
2333 <vscale x 4 x i1> %0,
2334 <vscale x 4 x i32> %1,
2336 <vscale x 4 x i1> %2,
2339 ret <vscale x 4 x i1> %a
2342 define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
2343 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i32_i32:
2344 ; CHECK: # %bb.0: # %entry
2345 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2346 ; CHECK-NEXT: vmsne.vi v0, v8, 9
2349 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
2350 <vscale x 8 x i32> %0,
2354 ret <vscale x 8 x i1> %a
2357 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2358 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32:
2359 ; CHECK: # %bb.0: # %entry
2360 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
2361 ; CHECK-NEXT: vmv1r.v v13, v0
2362 ; CHECK-NEXT: vmv1r.v v0, v12
2363 ; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
2364 ; CHECK-NEXT: vmv1r.v v0, v13
2367 %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
2368 <vscale x 8 x i1> %0,
2369 <vscale x 8 x i32> %1,
2371 <vscale x 8 x i1> %2,
2374 ret <vscale x 8 x i1> %a
2377 define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
2378 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i64_i64:
2379 ; CHECK: # %bb.0: # %entry
2380 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2381 ; CHECK-NEXT: vmsne.vi v0, v8, 9
2384 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
2385 <vscale x 1 x i64> %0,
2389 ret <vscale x 1 x i1> %a
2392 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2393 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64:
2394 ; CHECK: # %bb.0: # %entry
2395 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
2396 ; CHECK-NEXT: vmv1r.v v10, v0
2397 ; CHECK-NEXT: vmv1r.v v0, v9
2398 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
2399 ; CHECK-NEXT: vmv.v.v v0, v10
2402 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
2403 <vscale x 1 x i1> %0,
2404 <vscale x 1 x i64> %1,
2406 <vscale x 1 x i1> %2,
2409 ret <vscale x 1 x i1> %a
2412 define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
2413 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i64_i64:
2414 ; CHECK: # %bb.0: # %entry
2415 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2416 ; CHECK-NEXT: vmsne.vi v0, v8, 9
2419 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
2420 <vscale x 2 x i64> %0,
2424 ret <vscale x 2 x i1> %a
2427 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2428 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64:
2429 ; CHECK: # %bb.0: # %entry
2430 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
2431 ; CHECK-NEXT: vmv1r.v v11, v0
2432 ; CHECK-NEXT: vmv1r.v v0, v10
2433 ; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
2434 ; CHECK-NEXT: vmv1r.v v0, v11
2437 %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
2438 <vscale x 2 x i1> %0,
2439 <vscale x 2 x i64> %1,
2441 <vscale x 2 x i1> %2,
2444 ret <vscale x 2 x i1> %a
2447 define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
2448 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i64_i64:
2449 ; CHECK: # %bb.0: # %entry
2450 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2451 ; CHECK-NEXT: vmsne.vi v0, v8, 9
2454 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
2455 <vscale x 4 x i64> %0,
2459 ret <vscale x 4 x i1> %a
2462 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2463 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64:
2464 ; CHECK: # %bb.0: # %entry
2465 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
2466 ; CHECK-NEXT: vmv1r.v v13, v0
2467 ; CHECK-NEXT: vmv1r.v v0, v12
2468 ; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
2469 ; CHECK-NEXT: vmv1r.v v0, v13
2472 %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
2473 <vscale x 4 x i1> %0,
2474 <vscale x 4 x i64> %1,
2476 <vscale x 4 x i1> %2,
2479 ret <vscale x 4 x i1> %a