1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
5 ; FIXME: We use exclusively byte types here because the MVT we use for the
6 ; stores is calculated assuming byte elements. We need to deal with mismatched
7 ; subvector "casts" to make other elements work.
9 define void @seteq_vv_v16i8(ptr %x, ptr %y) {
10 ; CHECK-LABEL: seteq_vv_v16i8:
12 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
13 ; CHECK-NEXT: vle8.v v8, (a0)
14 ; CHECK-NEXT: vle8.v v9, (a1)
15 ; CHECK-NEXT: vmseq.vv v0, v8, v9
16 ; CHECK-NEXT: vmv.v.i v8, 0
17 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
18 ; CHECK-NEXT: vse8.v v8, (a0)
20 %a = load <16 x i8>, ptr %x
21 %b = load <16 x i8>, ptr %y
22 %c = icmp eq <16 x i8> %a, %b
23 %d = sext <16 x i1> %c to <16 x i8>
24 store <16 x i8> %d, ptr %x
28 define void @setne_vv_v32i8(ptr %x, ptr %y) {
29 ; CHECK-LABEL: setne_vv_v32i8:
31 ; CHECK-NEXT: li a2, 32
32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
33 ; CHECK-NEXT: vle8.v v8, (a0)
34 ; CHECK-NEXT: vle8.v v10, (a1)
35 ; CHECK-NEXT: vmsne.vv v0, v8, v10
36 ; CHECK-NEXT: vmv.v.i v8, 0
37 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
38 ; CHECK-NEXT: vse8.v v8, (a0)
40 %a = load <32 x i8>, ptr %x
41 %b = load <32 x i8>, ptr %y
42 %c = icmp ne <32 x i8> %a, %b
43 %d = zext <32 x i1> %c to <32 x i8>
44 store <32 x i8> %d, ptr %x
48 define void @setgt_vv_v64i8(ptr %x, ptr %y, ptr %z) {
49 ; CHECK-LABEL: setgt_vv_v64i8:
51 ; CHECK-NEXT: li a3, 64
52 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
53 ; CHECK-NEXT: vle8.v v8, (a0)
54 ; CHECK-NEXT: vle8.v v12, (a1)
55 ; CHECK-NEXT: vmslt.vv v16, v12, v8
56 ; CHECK-NEXT: vsm.v v16, (a2)
58 %a = load <64 x i8>, ptr %x
59 %b = load <64 x i8>, ptr %y
60 %c = icmp sgt <64 x i8> %a, %b
61 store <64 x i1> %c, ptr %z
65 define void @setlt_vv_v128i8(ptr %x, ptr %y, ptr %z) {
66 ; CHECK-LABEL: setlt_vv_v128i8:
68 ; CHECK-NEXT: li a3, 128
69 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
70 ; CHECK-NEXT: vle8.v v8, (a0)
71 ; CHECK-NEXT: vle8.v v16, (a1)
72 ; CHECK-NEXT: vmslt.vv v24, v8, v16
73 ; CHECK-NEXT: vsm.v v24, (a2)
75 %a = load <128 x i8>, ptr %x
76 %b = load <128 x i8>, ptr %y
77 %c = icmp slt <128 x i8> %a, %b
78 store <128 x i1> %c, ptr %z
82 define void @setge_vv_v8i8(ptr %x, ptr %y, ptr %z) {
83 ; CHECK-LABEL: setge_vv_v8i8:
85 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
86 ; CHECK-NEXT: vle8.v v8, (a0)
87 ; CHECK-NEXT: vle8.v v9, (a1)
88 ; CHECK-NEXT: vmsle.vv v8, v9, v8
89 ; CHECK-NEXT: vsm.v v8, (a2)
91 %a = load <8 x i8>, ptr %x
92 %b = load <8 x i8>, ptr %y
93 %c = icmp sge <8 x i8> %a, %b
94 store <8 x i1> %c, ptr %z
98 define void @setle_vv_v16i8(ptr %x, ptr %y, ptr %z) {
99 ; CHECK-LABEL: setle_vv_v16i8:
101 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
102 ; CHECK-NEXT: vle8.v v8, (a0)
103 ; CHECK-NEXT: vle8.v v9, (a1)
104 ; CHECK-NEXT: vmsle.vv v8, v8, v9
105 ; CHECK-NEXT: vsm.v v8, (a2)
107 %a = load <16 x i8>, ptr %x
108 %b = load <16 x i8>, ptr %y
109 %c = icmp sle <16 x i8> %a, %b
110 store <16 x i1> %c, ptr %z
114 define void @setugt_vv_v32i8(ptr %x, ptr %y, ptr %z) {
115 ; CHECK-LABEL: setugt_vv_v32i8:
117 ; CHECK-NEXT: li a3, 32
118 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma
119 ; CHECK-NEXT: vle8.v v8, (a0)
120 ; CHECK-NEXT: vle8.v v10, (a1)
121 ; CHECK-NEXT: vmsltu.vv v12, v10, v8
122 ; CHECK-NEXT: vsm.v v12, (a2)
124 %a = load <32 x i8>, ptr %x
125 %b = load <32 x i8>, ptr %y
126 %c = icmp ugt <32 x i8> %a, %b
127 store <32 x i1> %c, ptr %z
131 define void @setult_vv_v64i8(ptr %x, ptr %y, ptr %z) {
132 ; CHECK-LABEL: setult_vv_v64i8:
134 ; CHECK-NEXT: li a3, 64
135 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
136 ; CHECK-NEXT: vle8.v v8, (a0)
137 ; CHECK-NEXT: vle8.v v12, (a1)
138 ; CHECK-NEXT: vmsltu.vv v16, v8, v12
139 ; CHECK-NEXT: vsm.v v16, (a2)
141 %a = load <64 x i8>, ptr %x
142 %b = load <64 x i8>, ptr %y
143 %c = icmp ult <64 x i8> %a, %b
144 store <64 x i1> %c, ptr %z
148 define void @setuge_vv_v128i8(ptr %x, ptr %y, ptr %z) {
149 ; CHECK-LABEL: setuge_vv_v128i8:
151 ; CHECK-NEXT: li a3, 128
152 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
153 ; CHECK-NEXT: vle8.v v8, (a0)
154 ; CHECK-NEXT: vle8.v v16, (a1)
155 ; CHECK-NEXT: vmsleu.vv v24, v16, v8
156 ; CHECK-NEXT: vsm.v v24, (a2)
158 %a = load <128 x i8>, ptr %x
159 %b = load <128 x i8>, ptr %y
160 %c = icmp uge <128 x i8> %a, %b
161 store <128 x i1> %c, ptr %z
165 define void @setule_vv_v8i8(ptr %x, ptr %y, ptr %z) {
166 ; CHECK-LABEL: setule_vv_v8i8:
168 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
169 ; CHECK-NEXT: vle8.v v8, (a0)
170 ; CHECK-NEXT: vle8.v v9, (a1)
171 ; CHECK-NEXT: vmsleu.vv v8, v8, v9
172 ; CHECK-NEXT: vsm.v v8, (a2)
174 %a = load <8 x i8>, ptr %x
175 %b = load <8 x i8>, ptr %y
176 %c = icmp ule <8 x i8> %a, %b
177 store <8 x i1> %c, ptr %z
181 define void @seteq_vx_v16i8(ptr %x, i8 %y, ptr %z) {
182 ; CHECK-LABEL: seteq_vx_v16i8:
184 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
185 ; CHECK-NEXT: vle8.v v8, (a0)
186 ; CHECK-NEXT: vmseq.vx v8, v8, a1
187 ; CHECK-NEXT: vsm.v v8, (a2)
189 %a = load <16 x i8>, ptr %x
190 %b = insertelement <16 x i8> poison, i8 %y, i32 0
191 %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
192 %d = icmp eq <16 x i8> %a, %c
193 store <16 x i1> %d, ptr %z
197 define void @setne_vx_v32i8(ptr %x, i8 %y, ptr %z) {
198 ; CHECK-LABEL: setne_vx_v32i8:
200 ; CHECK-NEXT: li a3, 32
201 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma
202 ; CHECK-NEXT: vle8.v v8, (a0)
203 ; CHECK-NEXT: vmsne.vx v10, v8, a1
204 ; CHECK-NEXT: vsm.v v10, (a2)
206 %a = load <32 x i8>, ptr %x
207 %b = insertelement <32 x i8> poison, i8 %y, i32 0
208 %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
209 %d = icmp ne <32 x i8> %a, %c
210 store <32 x i1> %d, ptr %z
214 define void @setgt_vx_v64i8(ptr %x, i8 %y, ptr %z) {
215 ; CHECK-LABEL: setgt_vx_v64i8:
217 ; CHECK-NEXT: li a3, 64
218 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
219 ; CHECK-NEXT: vle8.v v8, (a0)
220 ; CHECK-NEXT: vmsgt.vx v12, v8, a1
221 ; CHECK-NEXT: vsm.v v12, (a2)
223 %a = load <64 x i8>, ptr %x
224 %b = insertelement <64 x i8> poison, i8 %y, i32 0
225 %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
226 %d = icmp sgt <64 x i8> %a, %c
227 store <64 x i1> %d, ptr %z
231 define void @setlt_vx_v128i8(ptr %x, i8 %y, ptr %z) {
232 ; CHECK-LABEL: setlt_vx_v128i8:
234 ; CHECK-NEXT: li a3, 128
235 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
236 ; CHECK-NEXT: vle8.v v8, (a0)
237 ; CHECK-NEXT: vmslt.vx v16, v8, a1
238 ; CHECK-NEXT: vsm.v v16, (a2)
240 %a = load <128 x i8>, ptr %x
241 %b = insertelement <128 x i8> poison, i8 %y, i32 0
242 %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
243 %d = icmp slt <128 x i8> %a, %c
244 store <128 x i1> %d, ptr %z
248 define void @setge_vx_v8i8(ptr %x, i8 %y, ptr %z) {
249 ; CHECK-LABEL: setge_vx_v8i8:
251 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
252 ; CHECK-NEXT: vle8.v v8, (a0)
253 ; CHECK-NEXT: vmv.v.x v9, a1
254 ; CHECK-NEXT: vmsle.vv v8, v9, v8
255 ; CHECK-NEXT: vsm.v v8, (a2)
257 %a = load <8 x i8>, ptr %x
258 %b = insertelement <8 x i8> poison, i8 %y, i32 0
259 %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
260 %d = icmp sge <8 x i8> %a, %c
261 store <8 x i1> %d, ptr %z
265 define void @setle_vx_v16i8(ptr %x, i8 %y, ptr %z) {
266 ; CHECK-LABEL: setle_vx_v16i8:
268 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
269 ; CHECK-NEXT: vle8.v v8, (a0)
270 ; CHECK-NEXT: vmsle.vx v8, v8, a1
271 ; CHECK-NEXT: vsm.v v8, (a2)
273 %a = load <16 x i8>, ptr %x
274 %b = insertelement <16 x i8> poison, i8 %y, i32 0
275 %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
276 %d = icmp sle <16 x i8> %a, %c
277 store <16 x i1> %d, ptr %z
281 define void @setugt_vx_v32i8(ptr %x, i8 %y, ptr %z) {
282 ; CHECK-LABEL: setugt_vx_v32i8:
284 ; CHECK-NEXT: li a3, 32
285 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma
286 ; CHECK-NEXT: vle8.v v8, (a0)
287 ; CHECK-NEXT: vmsgtu.vx v10, v8, a1
288 ; CHECK-NEXT: vsm.v v10, (a2)
290 %a = load <32 x i8>, ptr %x
291 %b = insertelement <32 x i8> poison, i8 %y, i32 0
292 %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
293 %d = icmp ugt <32 x i8> %a, %c
294 store <32 x i1> %d, ptr %z
298 define void @setult_vx_v64i8(ptr %x, i8 %y, ptr %z) {
299 ; CHECK-LABEL: setult_vx_v64i8:
301 ; CHECK-NEXT: li a3, 64
302 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
303 ; CHECK-NEXT: vle8.v v8, (a0)
304 ; CHECK-NEXT: vmsltu.vx v12, v8, a1
305 ; CHECK-NEXT: vsm.v v12, (a2)
307 %a = load <64 x i8>, ptr %x
308 %b = insertelement <64 x i8> poison, i8 %y, i32 0
309 %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
310 %d = icmp ult <64 x i8> %a, %c
311 store <64 x i1> %d, ptr %z
315 define void @setuge_vx_v128i8(ptr %x, i8 %y, ptr %z) {
316 ; CHECK-LABEL: setuge_vx_v128i8:
318 ; CHECK-NEXT: li a3, 128
319 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
320 ; CHECK-NEXT: vle8.v v8, (a0)
321 ; CHECK-NEXT: vmv.v.x v16, a1
322 ; CHECK-NEXT: vmsleu.vv v24, v16, v8
323 ; CHECK-NEXT: vsm.v v24, (a2)
325 %a = load <128 x i8>, ptr %x
326 %b = insertelement <128 x i8> poison, i8 %y, i32 0
327 %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
328 %d = icmp uge <128 x i8> %a, %c
329 store <128 x i1> %d, ptr %z
333 define void @setule_vx_v8i8(ptr %x, i8 %y, ptr %z) {
334 ; CHECK-LABEL: setule_vx_v8i8:
336 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
337 ; CHECK-NEXT: vle8.v v8, (a0)
338 ; CHECK-NEXT: vmsleu.vx v8, v8, a1
339 ; CHECK-NEXT: vsm.v v8, (a2)
341 %a = load <8 x i8>, ptr %x
342 %b = insertelement <8 x i8> poison, i8 %y, i32 0
343 %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
344 %d = icmp ule <8 x i8> %a, %c
345 store <8 x i1> %d, ptr %z
349 define void @seteq_xv_v16i8(ptr %x, i8 %y, ptr %z) {
350 ; CHECK-LABEL: seteq_xv_v16i8:
352 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
353 ; CHECK-NEXT: vle8.v v8, (a0)
354 ; CHECK-NEXT: vmseq.vx v8, v8, a1
355 ; CHECK-NEXT: vsm.v v8, (a2)
357 %a = load <16 x i8>, ptr %x
358 %b = insertelement <16 x i8> poison, i8 %y, i32 0
359 %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
360 %d = icmp eq <16 x i8> %c, %a
361 store <16 x i1> %d, ptr %z
365 define void @setne_xv_v32i8(ptr %x, i8 %y, ptr %z) {
366 ; CHECK-LABEL: setne_xv_v32i8:
368 ; CHECK-NEXT: li a3, 32
369 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma
370 ; CHECK-NEXT: vle8.v v8, (a0)
371 ; CHECK-NEXT: vmsne.vx v10, v8, a1
372 ; CHECK-NEXT: vsm.v v10, (a2)
374 %a = load <32 x i8>, ptr %x
375 %b = insertelement <32 x i8> poison, i8 %y, i32 0
376 %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
377 %d = icmp ne <32 x i8> %c, %a
378 store <32 x i1> %d, ptr %z
382 define void @setgt_xv_v64i8(ptr %x, i8 %y, ptr %z) {
383 ; CHECK-LABEL: setgt_xv_v64i8:
385 ; CHECK-NEXT: li a3, 64
386 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
387 ; CHECK-NEXT: vle8.v v8, (a0)
388 ; CHECK-NEXT: vmslt.vx v12, v8, a1
389 ; CHECK-NEXT: vsm.v v12, (a2)
391 %a = load <64 x i8>, ptr %x
392 %b = insertelement <64 x i8> poison, i8 %y, i32 0
393 %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
394 %d = icmp sgt <64 x i8> %c, %a
395 store <64 x i1> %d, ptr %z
399 define void @setlt_xv_v128i8(ptr %x, i8 %y, ptr %z) {
400 ; CHECK-LABEL: setlt_xv_v128i8:
402 ; CHECK-NEXT: li a3, 128
403 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
404 ; CHECK-NEXT: vle8.v v8, (a0)
405 ; CHECK-NEXT: vmsgt.vx v16, v8, a1
406 ; CHECK-NEXT: vsm.v v16, (a2)
408 %a = load <128 x i8>, ptr %x
409 %b = insertelement <128 x i8> poison, i8 %y, i32 0
410 %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
411 %d = icmp slt <128 x i8> %c, %a
412 store <128 x i1> %d, ptr %z
416 define void @setge_xv_v8i8(ptr %x, i8 %y, ptr %z) {
417 ; CHECK-LABEL: setge_xv_v8i8:
419 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
420 ; CHECK-NEXT: vle8.v v8, (a0)
421 ; CHECK-NEXT: vmsle.vx v8, v8, a1
422 ; CHECK-NEXT: vsm.v v8, (a2)
424 %a = load <8 x i8>, ptr %x
425 %b = insertelement <8 x i8> poison, i8 %y, i32 0
426 %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
427 %d = icmp sge <8 x i8> %c, %a
428 store <8 x i1> %d, ptr %z
432 define void @setle_xv_v16i8(ptr %x, i8 %y, ptr %z) {
433 ; CHECK-LABEL: setle_xv_v16i8:
435 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
436 ; CHECK-NEXT: vle8.v v8, (a0)
437 ; CHECK-NEXT: vmv.v.x v9, a1
438 ; CHECK-NEXT: vmsle.vv v8, v9, v8
439 ; CHECK-NEXT: vsm.v v8, (a2)
441 %a = load <16 x i8>, ptr %x
442 %b = insertelement <16 x i8> poison, i8 %y, i32 0
443 %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
444 %d = icmp sle <16 x i8> %c, %a
445 store <16 x i1> %d, ptr %z
449 define void @setugt_xv_v32i8(ptr %x, i8 %y, ptr %z) {
450 ; CHECK-LABEL: setugt_xv_v32i8:
452 ; CHECK-NEXT: li a3, 32
453 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma
454 ; CHECK-NEXT: vle8.v v8, (a0)
455 ; CHECK-NEXT: vmsltu.vx v10, v8, a1
456 ; CHECK-NEXT: vsm.v v10, (a2)
458 %a = load <32 x i8>, ptr %x
459 %b = insertelement <32 x i8> poison, i8 %y, i32 0
460 %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
461 %d = icmp ugt <32 x i8> %c, %a
462 store <32 x i1> %d, ptr %z
466 define void @setult_xv_v64i8(ptr %x, i8 %y, ptr %z) {
467 ; CHECK-LABEL: setult_xv_v64i8:
469 ; CHECK-NEXT: li a3, 64
470 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
471 ; CHECK-NEXT: vle8.v v8, (a0)
472 ; CHECK-NEXT: vmsgtu.vx v12, v8, a1
473 ; CHECK-NEXT: vsm.v v12, (a2)
475 %a = load <64 x i8>, ptr %x
476 %b = insertelement <64 x i8> poison, i8 %y, i32 0
477 %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
478 %d = icmp ult <64 x i8> %c, %a
479 store <64 x i1> %d, ptr %z
483 define void @setuge_xv_v128i8(ptr %x, i8 %y, ptr %z) {
484 ; CHECK-LABEL: setuge_xv_v128i8:
486 ; CHECK-NEXT: li a3, 128
487 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
488 ; CHECK-NEXT: vle8.v v8, (a0)
489 ; CHECK-NEXT: vmsleu.vx v16, v8, a1
490 ; CHECK-NEXT: vsm.v v16, (a2)
492 %a = load <128 x i8>, ptr %x
493 %b = insertelement <128 x i8> poison, i8 %y, i32 0
494 %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
495 %d = icmp uge <128 x i8> %c, %a
496 store <128 x i1> %d, ptr %z
500 define void @setule_xv_v8i8(ptr %x, i8 %y, ptr %z) {
501 ; CHECK-LABEL: setule_xv_v8i8:
503 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
504 ; CHECK-NEXT: vle8.v v8, (a0)
505 ; CHECK-NEXT: vmv.v.x v9, a1
506 ; CHECK-NEXT: vmsleu.vv v8, v9, v8
507 ; CHECK-NEXT: vsm.v v8, (a2)
509 %a = load <8 x i8>, ptr %x
510 %b = insertelement <8 x i8> poison, i8 %y, i32 0
511 %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
512 %d = icmp ule <8 x i8> %c, %a
513 store <8 x i1> %d, ptr %z
517 define void @seteq_vi_v16i8(ptr %x, ptr %z) {
518 ; CHECK-LABEL: seteq_vi_v16i8:
520 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
521 ; CHECK-NEXT: vle8.v v8, (a0)
522 ; CHECK-NEXT: vmseq.vi v8, v8, 0
523 ; CHECK-NEXT: vsm.v v8, (a1)
525 %a = load <16 x i8>, ptr %x
526 %b = insertelement <16 x i8> poison, i8 0, i32 0
527 %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
528 %d = icmp eq <16 x i8> %a, %c
529 store <16 x i1> %d, ptr %z
533 define void @setne_vi_v32i8(ptr %x, ptr %z) {
534 ; CHECK-LABEL: setne_vi_v32i8:
536 ; CHECK-NEXT: li a2, 32
537 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
538 ; CHECK-NEXT: vle8.v v8, (a0)
539 ; CHECK-NEXT: vmsne.vi v10, v8, 0
540 ; CHECK-NEXT: vsm.v v10, (a1)
542 %a = load <32 x i8>, ptr %x
543 %b = insertelement <32 x i8> poison, i8 0, i32 0
544 %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
545 %d = icmp ne <32 x i8> %a, %c
546 store <32 x i1> %d, ptr %z
550 define void @setgt_vi_v64i8(ptr %x, ptr %z) {
551 ; CHECK-LABEL: setgt_vi_v64i8:
553 ; CHECK-NEXT: li a2, 64
554 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
555 ; CHECK-NEXT: vle8.v v8, (a0)
556 ; CHECK-NEXT: vmsgt.vi v12, v8, 0
557 ; CHECK-NEXT: vsm.v v12, (a1)
559 %a = load <64 x i8>, ptr %x
560 %b = insertelement <64 x i8> poison, i8 0, i32 0
561 %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
562 %d = icmp sgt <64 x i8> %a, %c
563 store <64 x i1> %d, ptr %z
567 define void @setgt_vi_v64i8_nonzero(ptr %x, ptr %z) {
568 ; CHECK-LABEL: setgt_vi_v64i8_nonzero:
570 ; CHECK-NEXT: li a2, 64
571 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
572 ; CHECK-NEXT: vle8.v v8, (a0)
573 ; CHECK-NEXT: vmsgt.vi v12, v8, 5
574 ; CHECK-NEXT: vsm.v v12, (a1)
576 %a = load <64 x i8>, ptr %x
577 %b = insertelement <64 x i8> poison, i8 5, i32 0
578 %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
579 %d = icmp sgt <64 x i8> %a, %c
580 store <64 x i1> %d, ptr %z
584 define void @setlt_vi_v128i8(ptr %x, ptr %z) {
585 ; CHECK-LABEL: setlt_vi_v128i8:
587 ; CHECK-NEXT: li a2, 128
588 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
589 ; CHECK-NEXT: vle8.v v8, (a0)
590 ; CHECK-NEXT: vmsle.vi v16, v8, -1
591 ; CHECK-NEXT: vsm.v v16, (a1)
593 %a = load <128 x i8>, ptr %x
594 %b = insertelement <128 x i8> poison, i8 0, i32 0
595 %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
596 %d = icmp slt <128 x i8> %a, %c
597 store <128 x i1> %d, ptr %z
601 define void @setge_vi_v8i8(ptr %x, ptr %z) {
602 ; CHECK-LABEL: setge_vi_v8i8:
604 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
605 ; CHECK-NEXT: vle8.v v8, (a0)
606 ; CHECK-NEXT: vmsgt.vi v8, v8, -1
607 ; CHECK-NEXT: vsm.v v8, (a1)
609 %a = load <8 x i8>, ptr %x
610 %b = insertelement <8 x i8> poison, i8 0, i32 0
611 %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
612 %d = icmp sge <8 x i8> %a, %c
613 store <8 x i1> %d, ptr %z
617 define void @setle_vi_v16i8(ptr %x, ptr %z) {
618 ; CHECK-LABEL: setle_vi_v16i8:
620 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
621 ; CHECK-NEXT: vle8.v v8, (a0)
622 ; CHECK-NEXT: vmsle.vi v8, v8, 0
623 ; CHECK-NEXT: vsm.v v8, (a1)
625 %a = load <16 x i8>, ptr %x
626 %b = insertelement <16 x i8> poison, i8 0, i32 0
627 %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
628 %d = icmp sle <16 x i8> %a, %c
629 store <16 x i1> %d, ptr %z
633 define void @setugt_vi_v32i8(ptr %x, ptr %z) {
634 ; CHECK-LABEL: setugt_vi_v32i8:
636 ; CHECK-NEXT: li a2, 32
637 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
638 ; CHECK-NEXT: vle8.v v8, (a0)
639 ; CHECK-NEXT: vmsgtu.vi v10, v8, 5
640 ; CHECK-NEXT: vsm.v v10, (a1)
642 %a = load <32 x i8>, ptr %x
643 %b = insertelement <32 x i8> poison, i8 5, i32 0
644 %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
645 %d = icmp ugt <32 x i8> %a, %c
646 store <32 x i1> %d, ptr %z
650 define void @setult_vi_v64i8(ptr %x, ptr %z) {
651 ; CHECK-LABEL: setult_vi_v64i8:
653 ; CHECK-NEXT: li a2, 64
654 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
655 ; CHECK-NEXT: vle8.v v8, (a0)
656 ; CHECK-NEXT: vmsleu.vi v12, v8, 4
657 ; CHECK-NEXT: vsm.v v12, (a1)
659 %a = load <64 x i8>, ptr %x
660 %b = insertelement <64 x i8> poison, i8 5, i32 0
661 %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
662 %d = icmp ult <64 x i8> %a, %c
663 store <64 x i1> %d, ptr %z
667 define void @setuge_vi_v128i8(ptr %x, ptr %z) {
668 ; CHECK-LABEL: setuge_vi_v128i8:
670 ; CHECK-NEXT: li a2, 128
671 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
672 ; CHECK-NEXT: vle8.v v8, (a0)
673 ; CHECK-NEXT: vmsgtu.vi v16, v8, 4
674 ; CHECK-NEXT: vsm.v v16, (a1)
676 %a = load <128 x i8>, ptr %x
677 %b = insertelement <128 x i8> poison, i8 5, i32 0
678 %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
679 %d = icmp uge <128 x i8> %a, %c
680 store <128 x i1> %d, ptr %z
684 define void @setule_vi_v8i8(ptr %x, ptr %z) {
685 ; CHECK-LABEL: setule_vi_v8i8:
687 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
688 ; CHECK-NEXT: vle8.v v8, (a0)
689 ; CHECK-NEXT: vmsleu.vi v8, v8, 5
690 ; CHECK-NEXT: vsm.v v8, (a1)
692 %a = load <8 x i8>, ptr %x
693 %b = insertelement <8 x i8> poison, i8 5, i32 0
694 %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
695 %d = icmp ule <8 x i8> %a, %c
696 store <8 x i1> %d, ptr %z
700 define void @seteq_vv_v8i16(ptr %x, ptr %y) {
701 ; CHECK-LABEL: seteq_vv_v8i16:
703 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
704 ; CHECK-NEXT: vle16.v v8, (a0)
705 ; CHECK-NEXT: vle16.v v9, (a1)
706 ; CHECK-NEXT: vmseq.vv v0, v8, v9
707 ; CHECK-NEXT: vmv.v.i v8, 0
708 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
709 ; CHECK-NEXT: vse16.v v8, (a0)
711 %a = load <8 x i16>, ptr %x
712 %b = load <8 x i16>, ptr %y
713 %c = icmp eq <8 x i16> %a, %b
714 %d = sext <8 x i1> %c to <8 x i16>
715 store <8 x i16> %d, ptr %x
719 define void @setne_vv_v4i32(ptr %x, ptr %y) {
720 ; CHECK-LABEL: setne_vv_v4i32:
722 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
723 ; CHECK-NEXT: vle32.v v8, (a0)
724 ; CHECK-NEXT: vle32.v v9, (a1)
725 ; CHECK-NEXT: vmsne.vv v0, v8, v9
726 ; CHECK-NEXT: vmv.v.i v8, 0
727 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
728 ; CHECK-NEXT: vse32.v v8, (a0)
730 %a = load <4 x i32>, ptr %x
731 %b = load <4 x i32>, ptr %y
732 %c = icmp ne <4 x i32> %a, %b
733 %d = sext <4 x i1> %c to <4 x i32>
734 store <4 x i32> %d, ptr %x
738 define void @setgt_vv_v2i64(ptr %x, ptr %y) {
739 ; CHECK-LABEL: setgt_vv_v2i64:
741 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
742 ; CHECK-NEXT: vle64.v v8, (a0)
743 ; CHECK-NEXT: vle64.v v9, (a1)
744 ; CHECK-NEXT: vmslt.vv v0, v9, v8
745 ; CHECK-NEXT: vmv.v.i v8, 0
746 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
747 ; CHECK-NEXT: vse64.v v8, (a0)
749 %a = load <2 x i64>, ptr %x
750 %b = load <2 x i64>, ptr %y
751 %c = icmp sgt <2 x i64> %a, %b
752 %d = sext <2 x i1> %c to <2 x i64>
753 store <2 x i64> %d, ptr %x
757 define void @setlt_vv_v16i16(ptr %x, ptr %y) {
758 ; CHECK-LABEL: setlt_vv_v16i16:
760 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
761 ; CHECK-NEXT: vle16.v v8, (a0)
762 ; CHECK-NEXT: vle16.v v10, (a1)
763 ; CHECK-NEXT: vmslt.vv v0, v8, v10
764 ; CHECK-NEXT: vmv.v.i v8, 0
765 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
766 ; CHECK-NEXT: vse16.v v8, (a0)
768 %a = load <16 x i16>, ptr %x
769 %b = load <16 x i16>, ptr %y
770 %c = icmp slt <16 x i16> %a, %b
771 %d = zext <16 x i1> %c to <16 x i16>
772 store <16 x i16> %d, ptr %x
776 define void @setugt_vv_v8i32(ptr %x, ptr %y) {
777 ; CHECK-LABEL: setugt_vv_v8i32:
779 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
780 ; CHECK-NEXT: vle32.v v8, (a0)
781 ; CHECK-NEXT: vle32.v v10, (a1)
782 ; CHECK-NEXT: vmsltu.vv v0, v10, v8
783 ; CHECK-NEXT: vmv.v.i v8, 0
784 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
785 ; CHECK-NEXT: vse32.v v8, (a0)
787 %a = load <8 x i32>, ptr %x
788 %b = load <8 x i32>, ptr %y
789 %c = icmp ugt <8 x i32> %a, %b
790 %d = zext <8 x i1> %c to <8 x i32>
791 store <8 x i32> %d, ptr %x
795 define void @setult_vv_v4i64(ptr %x, ptr %y) {
796 ; CHECK-LABEL: setult_vv_v4i64:
798 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
799 ; CHECK-NEXT: vle64.v v8, (a0)
800 ; CHECK-NEXT: vle64.v v10, (a1)
801 ; CHECK-NEXT: vmsltu.vv v0, v8, v10
802 ; CHECK-NEXT: vmv.v.i v8, 0
803 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
804 ; CHECK-NEXT: vse64.v v8, (a0)
806 %a = load <4 x i64>, ptr %x
807 %b = load <4 x i64>, ptr %y
808 %c = icmp ult <4 x i64> %a, %b
809 %d = zext <4 x i1> %c to <4 x i64>
810 store <4 x i64> %d, ptr %x