1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
5 ; FIXME: We use exclusively byte types here because the MVT we use for the
6 ; stores is calculated assuming byte elements. We need to deal with mismatched
7 ; subvector "casts" to make other elements work.
9 define void @seteq_vv_v16i8(ptr %x, ptr %y) {
10 ; CHECK-LABEL: seteq_vv_v16i8:
12 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
13 ; CHECK-NEXT: vle8.v v8, (a0)
14 ; CHECK-NEXT: vle8.v v9, (a1)
15 ; CHECK-NEXT: vmseq.vv v0, v8, v9
16 ; CHECK-NEXT: vmv.v.i v8, 0
17 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
18 ; CHECK-NEXT: vse8.v v8, (a0)
20 %a = load <16 x i8>, ptr %x
21 %b = load <16 x i8>, ptr %y
22 %c = icmp eq <16 x i8> %a, %b
23 %d = sext <16 x i1> %c to <16 x i8>
24 store <16 x i8> %d, ptr %x
28 define void @setne_vv_v32i8(ptr %x, ptr %y) {
29 ; CHECK-LABEL: setne_vv_v32i8:
31 ; CHECK-NEXT: li a2, 32
32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
33 ; CHECK-NEXT: vle8.v v8, (a0)
34 ; CHECK-NEXT: vle8.v v10, (a1)
35 ; CHECK-NEXT: vmsne.vv v0, v8, v10
36 ; CHECK-NEXT: vmv.v.i v8, 0
37 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
38 ; CHECK-NEXT: vse8.v v8, (a0)
40 %a = load <32 x i8>, ptr %x
41 %b = load <32 x i8>, ptr %y
42 %c = icmp ne <32 x i8> %a, %b
43 %d = zext <32 x i1> %c to <32 x i8>
44 store <32 x i8> %d, ptr %x
48 define void @setgt_vv_v64i8(ptr %x, ptr %y, ptr %z) {
49 ; CHECK-LABEL: setgt_vv_v64i8:
51 ; CHECK-NEXT: li a3, 64
52 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
53 ; CHECK-NEXT: vle8.v v8, (a0)
54 ; CHECK-NEXT: vle8.v v12, (a1)
55 ; CHECK-NEXT: vmslt.vv v16, v12, v8
56 ; CHECK-NEXT: vsm.v v16, (a2)
58 %a = load <64 x i8>, ptr %x
59 %b = load <64 x i8>, ptr %y
60 %c = icmp sgt <64 x i8> %a, %b
61 store <64 x i1> %c, ptr %z
65 define void @setlt_vv_v128i8(ptr %x, ptr %y, ptr %z) {
66 ; CHECK-LABEL: setlt_vv_v128i8:
68 ; CHECK-NEXT: li a3, 128
69 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
70 ; CHECK-NEXT: vle8.v v8, (a0)
71 ; CHECK-NEXT: vle8.v v16, (a1)
72 ; CHECK-NEXT: vmslt.vv v24, v8, v16
73 ; CHECK-NEXT: vsm.v v24, (a2)
75 %a = load <128 x i8>, ptr %x
76 %b = load <128 x i8>, ptr %y
77 %c = icmp slt <128 x i8> %a, %b
78 store <128 x i1> %c, ptr %z
82 define void @setge_vv_v8i8(ptr %x, ptr %y, ptr %z) {
83 ; CHECK-LABEL: setge_vv_v8i8:
85 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
86 ; CHECK-NEXT: vle8.v v8, (a0)
87 ; CHECK-NEXT: vle8.v v9, (a1)
88 ; CHECK-NEXT: vmsle.vv v8, v9, v8
89 ; CHECK-NEXT: vsm.v v8, (a2)
91 %a = load <8 x i8>, ptr %x
92 %b = load <8 x i8>, ptr %y
93 %c = icmp sge <8 x i8> %a, %b
94 store <8 x i1> %c, ptr %z
98 define void @setle_vv_v16i8(ptr %x, ptr %y, ptr %z) {
99 ; CHECK-LABEL: setle_vv_v16i8:
101 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
102 ; CHECK-NEXT: vle8.v v8, (a0)
103 ; CHECK-NEXT: vle8.v v9, (a1)
104 ; CHECK-NEXT: vmsle.vv v8, v8, v9
105 ; CHECK-NEXT: vsm.v v8, (a2)
107 %a = load <16 x i8>, ptr %x
108 %b = load <16 x i8>, ptr %y
109 %c = icmp sle <16 x i8> %a, %b
110 store <16 x i1> %c, ptr %z
114 define void @setugt_vv_v32i8(ptr %x, ptr %y, ptr %z) {
115 ; CHECK-LABEL: setugt_vv_v32i8:
117 ; CHECK-NEXT: li a3, 32
118 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma
119 ; CHECK-NEXT: vle8.v v8, (a0)
120 ; CHECK-NEXT: vle8.v v10, (a1)
121 ; CHECK-NEXT: vmsltu.vv v12, v10, v8
122 ; CHECK-NEXT: vsm.v v12, (a2)
124 %a = load <32 x i8>, ptr %x
125 %b = load <32 x i8>, ptr %y
126 %c = icmp ugt <32 x i8> %a, %b
127 store <32 x i1> %c, ptr %z
131 define void @setult_vv_v64i8(ptr %x, ptr %y, ptr %z) {
132 ; CHECK-LABEL: setult_vv_v64i8:
134 ; CHECK-NEXT: li a3, 64
135 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
136 ; CHECK-NEXT: vle8.v v8, (a0)
137 ; CHECK-NEXT: vle8.v v12, (a1)
138 ; CHECK-NEXT: vmsltu.vv v16, v8, v12
139 ; CHECK-NEXT: vsm.v v16, (a2)
141 %a = load <64 x i8>, ptr %x
142 %b = load <64 x i8>, ptr %y
143 %c = icmp ult <64 x i8> %a, %b
144 store <64 x i1> %c, ptr %z
148 define void @setuge_vv_v128i8(ptr %x, ptr %y, ptr %z) {
149 ; CHECK-LABEL: setuge_vv_v128i8:
151 ; CHECK-NEXT: li a3, 128
152 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
153 ; CHECK-NEXT: vle8.v v8, (a0)
154 ; CHECK-NEXT: vle8.v v16, (a1)
155 ; CHECK-NEXT: vmsleu.vv v24, v16, v8
156 ; CHECK-NEXT: vsm.v v24, (a2)
158 %a = load <128 x i8>, ptr %x
159 %b = load <128 x i8>, ptr %y
160 %c = icmp uge <128 x i8> %a, %b
161 store <128 x i1> %c, ptr %z
165 define void @setule_vv_v8i8(ptr %x, ptr %y, ptr %z) {
166 ; CHECK-LABEL: setule_vv_v8i8:
168 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
169 ; CHECK-NEXT: vle8.v v8, (a0)
170 ; CHECK-NEXT: vle8.v v9, (a1)
171 ; CHECK-NEXT: vmsleu.vv v8, v8, v9
172 ; CHECK-NEXT: vsm.v v8, (a2)
174 %a = load <8 x i8>, ptr %x
175 %b = load <8 x i8>, ptr %y
176 %c = icmp ule <8 x i8> %a, %b
177 store <8 x i1> %c, ptr %z
181 define void @seteq_vx_v16i8(ptr %x, i8 %y, ptr %z) {
182 ; CHECK-LABEL: seteq_vx_v16i8:
184 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
185 ; CHECK-NEXT: vle8.v v8, (a0)
186 ; CHECK-NEXT: vmseq.vx v8, v8, a1
187 ; CHECK-NEXT: vsm.v v8, (a2)
189 %a = load <16 x i8>, ptr %x
190 %b = insertelement <16 x i8> poison, i8 %y, i32 0
191 %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
192 %d = icmp eq <16 x i8> %a, %c
193 store <16 x i1> %d, ptr %z
197 define void @setne_vx_v32i8(ptr %x, i8 %y, ptr %z) {
198 ; CHECK-LABEL: setne_vx_v32i8:
200 ; CHECK-NEXT: li a3, 32
201 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma
202 ; CHECK-NEXT: vle8.v v8, (a0)
203 ; CHECK-NEXT: vmsne.vx v10, v8, a1
204 ; CHECK-NEXT: vsm.v v10, (a2)
206 %a = load <32 x i8>, ptr %x
207 %b = insertelement <32 x i8> poison, i8 %y, i32 0
208 %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
209 %d = icmp ne <32 x i8> %a, %c
210 store <32 x i1> %d, ptr %z
214 define void @setgt_vx_v64i8(ptr %x, i8 %y, ptr %z) {
215 ; CHECK-LABEL: setgt_vx_v64i8:
217 ; CHECK-NEXT: li a3, 64
218 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
219 ; CHECK-NEXT: vle8.v v8, (a0)
220 ; CHECK-NEXT: vmsgt.vx v12, v8, a1
221 ; CHECK-NEXT: vsm.v v12, (a2)
223 %a = load <64 x i8>, ptr %x
224 %b = insertelement <64 x i8> poison, i8 %y, i32 0
225 %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
226 %d = icmp sgt <64 x i8> %a, %c
227 store <64 x i1> %d, ptr %z
231 define void @setlt_vx_v128i8(ptr %x, i8 %y, ptr %z) {
232 ; CHECK-LABEL: setlt_vx_v128i8:
234 ; CHECK-NEXT: li a3, 128
235 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
236 ; CHECK-NEXT: vle8.v v8, (a0)
237 ; CHECK-NEXT: vmslt.vx v16, v8, a1
238 ; CHECK-NEXT: vsm.v v16, (a2)
240 %a = load <128 x i8>, ptr %x
241 %b = insertelement <128 x i8> poison, i8 %y, i32 0
242 %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
243 %d = icmp slt <128 x i8> %a, %c
244 store <128 x i1> %d, ptr %z
248 define void @setge_vx_v8i8(ptr %x, i8 %y, ptr %z) {
249 ; CHECK-LABEL: setge_vx_v8i8:
251 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
252 ; CHECK-NEXT: vle8.v v8, (a0)
253 ; CHECK-NEXT: vmv.v.x v9, a1
254 ; CHECK-NEXT: vmsle.vv v8, v9, v8
255 ; CHECK-NEXT: vsm.v v8, (a2)
257 %a = load <8 x i8>, ptr %x
258 %b = insertelement <8 x i8> poison, i8 %y, i32 0
259 %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
260 %d = icmp sge <8 x i8> %a, %c
261 store <8 x i1> %d, ptr %z
265 define void @setle_vx_v16i8(ptr %x, i8 %y, ptr %z) {
266 ; CHECK-LABEL: setle_vx_v16i8:
268 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
269 ; CHECK-NEXT: vle8.v v8, (a0)
270 ; CHECK-NEXT: vmsle.vx v8, v8, a1
271 ; CHECK-NEXT: vsm.v v8, (a2)
273 %a = load <16 x i8>, ptr %x
274 %b = insertelement <16 x i8> poison, i8 %y, i32 0
275 %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
276 %d = icmp sle <16 x i8> %a, %c
277 store <16 x i1> %d, ptr %z
281 define void @setugt_vx_v32i8(ptr %x, i8 %y, ptr %z) {
282 ; CHECK-LABEL: setugt_vx_v32i8:
284 ; CHECK-NEXT: li a3, 32
285 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma
286 ; CHECK-NEXT: vle8.v v8, (a0)
287 ; CHECK-NEXT: vmsgtu.vx v10, v8, a1
288 ; CHECK-NEXT: vsm.v v10, (a2)
290 %a = load <32 x i8>, ptr %x
291 %b = insertelement <32 x i8> poison, i8 %y, i32 0
292 %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
293 %d = icmp ugt <32 x i8> %a, %c
294 store <32 x i1> %d, ptr %z
298 define void @setult_vx_v64i8(ptr %x, i8 %y, ptr %z) {
299 ; CHECK-LABEL: setult_vx_v64i8:
301 ; CHECK-NEXT: li a3, 64
302 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
303 ; CHECK-NEXT: vle8.v v8, (a0)
304 ; CHECK-NEXT: vmsltu.vx v12, v8, a1
305 ; CHECK-NEXT: vsm.v v12, (a2)
307 %a = load <64 x i8>, ptr %x
308 %b = insertelement <64 x i8> poison, i8 %y, i32 0
309 %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
310 %d = icmp ult <64 x i8> %a, %c
311 store <64 x i1> %d, ptr %z
315 define void @setuge_vx_v128i8(ptr %x, i8 %y, ptr %z) {
316 ; CHECK-LABEL: setuge_vx_v128i8:
318 ; CHECK-NEXT: li a3, 128
319 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
320 ; CHECK-NEXT: vle8.v v8, (a0)
321 ; CHECK-NEXT: vmv.v.x v16, a1
322 ; CHECK-NEXT: vmsleu.vv v24, v16, v8
323 ; CHECK-NEXT: vsm.v v24, (a2)
325 %a = load <128 x i8>, ptr %x
326 %b = insertelement <128 x i8> poison, i8 %y, i32 0
327 %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
328 %d = icmp uge <128 x i8> %a, %c
329 store <128 x i1> %d, ptr %z
333 define void @setule_vx_v8i8(ptr %x, i8 %y, ptr %z) {
334 ; CHECK-LABEL: setule_vx_v8i8:
336 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
337 ; CHECK-NEXT: vle8.v v8, (a0)
338 ; CHECK-NEXT: vmsleu.vx v8, v8, a1
339 ; CHECK-NEXT: vsm.v v8, (a2)
341 %a = load <8 x i8>, ptr %x
342 %b = insertelement <8 x i8> poison, i8 %y, i32 0
343 %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
344 %d = icmp ule <8 x i8> %a, %c
345 store <8 x i1> %d, ptr %z
349 define void @seteq_xv_v16i8(ptr %x, i8 %y, ptr %z) {
350 ; CHECK-LABEL: seteq_xv_v16i8:
352 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
353 ; CHECK-NEXT: vle8.v v8, (a0)
354 ; CHECK-NEXT: vmseq.vx v8, v8, a1
355 ; CHECK-NEXT: vsm.v v8, (a2)
357 %a = load <16 x i8>, ptr %x
358 %b = insertelement <16 x i8> poison, i8 %y, i32 0
359 %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
360 %d = icmp eq <16 x i8> %c, %a
361 store <16 x i1> %d, ptr %z
365 define void @setne_xv_v32i8(ptr %x, i8 %y, ptr %z) {
366 ; CHECK-LABEL: setne_xv_v32i8:
368 ; CHECK-NEXT: li a3, 32
369 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma
370 ; CHECK-NEXT: vle8.v v8, (a0)
371 ; CHECK-NEXT: vmsne.vx v10, v8, a1
372 ; CHECK-NEXT: vsm.v v10, (a2)
374 %a = load <32 x i8>, ptr %x
375 %b = insertelement <32 x i8> poison, i8 %y, i32 0
376 %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
377 %d = icmp ne <32 x i8> %c, %a
378 store <32 x i1> %d, ptr %z
382 define void @setgt_xv_v64i8(ptr %x, i8 %y, ptr %z) {
383 ; CHECK-LABEL: setgt_xv_v64i8:
385 ; CHECK-NEXT: li a3, 64
386 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
387 ; CHECK-NEXT: vle8.v v8, (a0)
388 ; CHECK-NEXT: vmslt.vx v12, v8, a1
389 ; CHECK-NEXT: vsm.v v12, (a2)
391 %a = load <64 x i8>, ptr %x
392 %b = insertelement <64 x i8> poison, i8 %y, i32 0
393 %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
394 %d = icmp sgt <64 x i8> %c, %a
395 store <64 x i1> %d, ptr %z
399 define void @setlt_xv_v128i8(ptr %x, i8 %y, ptr %z) {
400 ; CHECK-LABEL: setlt_xv_v128i8:
402 ; CHECK-NEXT: li a3, 128
403 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
404 ; CHECK-NEXT: vle8.v v8, (a0)
405 ; CHECK-NEXT: vmsgt.vx v16, v8, a1
406 ; CHECK-NEXT: vsm.v v16, (a2)
408 %a = load <128 x i8>, ptr %x
409 %b = insertelement <128 x i8> poison, i8 %y, i32 0
410 %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
411 %d = icmp slt <128 x i8> %c, %a
412 store <128 x i1> %d, ptr %z
416 define void @setge_xv_v8i8(ptr %x, i8 %y, ptr %z) {
417 ; CHECK-LABEL: setge_xv_v8i8:
419 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
420 ; CHECK-NEXT: vle8.v v8, (a0)
421 ; CHECK-NEXT: vmsle.vx v8, v8, a1
422 ; CHECK-NEXT: vsm.v v8, (a2)
424 %a = load <8 x i8>, ptr %x
425 %b = insertelement <8 x i8> poison, i8 %y, i32 0
426 %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
427 %d = icmp sge <8 x i8> %c, %a
428 store <8 x i1> %d, ptr %z
432 define void @setle_xv_v16i8(ptr %x, i8 %y, ptr %z) {
433 ; CHECK-LABEL: setle_xv_v16i8:
435 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
436 ; CHECK-NEXT: vle8.v v8, (a0)
437 ; CHECK-NEXT: vmv.v.x v9, a1
438 ; CHECK-NEXT: vmsle.vv v8, v9, v8
439 ; CHECK-NEXT: vsm.v v8, (a2)
441 %a = load <16 x i8>, ptr %x
442 %b = insertelement <16 x i8> poison, i8 %y, i32 0
443 %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
444 %d = icmp sle <16 x i8> %c, %a
445 store <16 x i1> %d, ptr %z
449 define void @setugt_xv_v32i8(ptr %x, i8 %y, ptr %z) {
450 ; CHECK-LABEL: setugt_xv_v32i8:
452 ; CHECK-NEXT: li a3, 32
453 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma
454 ; CHECK-NEXT: vle8.v v8, (a0)
455 ; CHECK-NEXT: vmsltu.vx v10, v8, a1
456 ; CHECK-NEXT: vsm.v v10, (a2)
458 %a = load <32 x i8>, ptr %x
459 %b = insertelement <32 x i8> poison, i8 %y, i32 0
460 %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
461 %d = icmp ugt <32 x i8> %c, %a
462 store <32 x i1> %d, ptr %z
466 define void @setult_xv_v64i8(ptr %x, i8 %y, ptr %z) {
467 ; CHECK-LABEL: setult_xv_v64i8:
469 ; CHECK-NEXT: li a3, 64
470 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
471 ; CHECK-NEXT: vle8.v v8, (a0)
472 ; CHECK-NEXT: vmsgtu.vx v12, v8, a1
473 ; CHECK-NEXT: vsm.v v12, (a2)
475 %a = load <64 x i8>, ptr %x
476 %b = insertelement <64 x i8> poison, i8 %y, i32 0
477 %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
478 %d = icmp ult <64 x i8> %c, %a
479 store <64 x i1> %d, ptr %z
483 define void @setuge_xv_v128i8(ptr %x, i8 %y, ptr %z) {
484 ; CHECK-LABEL: setuge_xv_v128i8:
486 ; CHECK-NEXT: li a3, 128
487 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
488 ; CHECK-NEXT: vle8.v v8, (a0)
489 ; CHECK-NEXT: vmsleu.vx v16, v8, a1
490 ; CHECK-NEXT: vsm.v v16, (a2)
492 %a = load <128 x i8>, ptr %x
493 %b = insertelement <128 x i8> poison, i8 %y, i32 0
494 %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
495 %d = icmp uge <128 x i8> %c, %a
496 store <128 x i1> %d, ptr %z
500 define void @setule_xv_v8i8(ptr %x, i8 %y, ptr %z) {
501 ; CHECK-LABEL: setule_xv_v8i8:
503 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
504 ; CHECK-NEXT: vle8.v v8, (a0)
505 ; CHECK-NEXT: vmv.v.x v9, a1
506 ; CHECK-NEXT: vmsleu.vv v8, v9, v8
507 ; CHECK-NEXT: vsm.v v8, (a2)
509 %a = load <8 x i8>, ptr %x
510 %b = insertelement <8 x i8> poison, i8 %y, i32 0
511 %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
512 %d = icmp ule <8 x i8> %c, %a
513 store <8 x i1> %d, ptr %z
517 define void @seteq_vi_v16i8(ptr %x, ptr %z) {
518 ; CHECK-LABEL: seteq_vi_v16i8:
520 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
521 ; CHECK-NEXT: vle8.v v8, (a0)
522 ; CHECK-NEXT: vmseq.vi v8, v8, 0
523 ; CHECK-NEXT: vsm.v v8, (a1)
525 %a = load <16 x i8>, ptr %x
526 %d = icmp eq <16 x i8> %a, splat (i8 0)
527 store <16 x i1> %d, ptr %z
531 define void @setne_vi_v32i8(ptr %x, ptr %z) {
532 ; CHECK-LABEL: setne_vi_v32i8:
534 ; CHECK-NEXT: li a2, 32
535 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
536 ; CHECK-NEXT: vle8.v v8, (a0)
537 ; CHECK-NEXT: vmsne.vi v10, v8, 0
538 ; CHECK-NEXT: vsm.v v10, (a1)
540 %a = load <32 x i8>, ptr %x
541 %d = icmp ne <32 x i8> %a, splat (i8 0)
542 store <32 x i1> %d, ptr %z
546 define void @setgt_vi_v64i8(ptr %x, ptr %z) {
547 ; CHECK-LABEL: setgt_vi_v64i8:
549 ; CHECK-NEXT: li a2, 64
550 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
551 ; CHECK-NEXT: vle8.v v8, (a0)
552 ; CHECK-NEXT: vmsgt.vi v12, v8, 0
553 ; CHECK-NEXT: vsm.v v12, (a1)
555 %a = load <64 x i8>, ptr %x
556 %d = icmp sgt <64 x i8> %a, splat (i8 0)
557 store <64 x i1> %d, ptr %z
561 define void @setgt_vi_v64i8_nonzero(ptr %x, ptr %z) {
562 ; CHECK-LABEL: setgt_vi_v64i8_nonzero:
564 ; CHECK-NEXT: li a2, 64
565 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
566 ; CHECK-NEXT: vle8.v v8, (a0)
567 ; CHECK-NEXT: vmsgt.vi v12, v8, 5
568 ; CHECK-NEXT: vsm.v v12, (a1)
570 %a = load <64 x i8>, ptr %x
571 %d = icmp sgt <64 x i8> %a, splat (i8 5)
572 store <64 x i1> %d, ptr %z
576 define void @setlt_vi_v128i8(ptr %x, ptr %z) {
577 ; CHECK-LABEL: setlt_vi_v128i8:
579 ; CHECK-NEXT: li a2, 128
580 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
581 ; CHECK-NEXT: vle8.v v8, (a0)
582 ; CHECK-NEXT: vmsle.vi v16, v8, -1
583 ; CHECK-NEXT: vsm.v v16, (a1)
585 %a = load <128 x i8>, ptr %x
586 %d = icmp slt <128 x i8> %a, splat (i8 0)
587 store <128 x i1> %d, ptr %z
591 define void @setge_vi_v8i8(ptr %x, ptr %z) {
592 ; CHECK-LABEL: setge_vi_v8i8:
594 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
595 ; CHECK-NEXT: vle8.v v8, (a0)
596 ; CHECK-NEXT: vmsgt.vi v8, v8, -1
597 ; CHECK-NEXT: vsm.v v8, (a1)
599 %a = load <8 x i8>, ptr %x
600 %d = icmp sge <8 x i8> %a, splat (i8 0)
601 store <8 x i1> %d, ptr %z
605 define void @setle_vi_v16i8(ptr %x, ptr %z) {
606 ; CHECK-LABEL: setle_vi_v16i8:
608 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
609 ; CHECK-NEXT: vle8.v v8, (a0)
610 ; CHECK-NEXT: vmsle.vi v8, v8, 0
611 ; CHECK-NEXT: vsm.v v8, (a1)
613 %a = load <16 x i8>, ptr %x
614 %d = icmp sle <16 x i8> %a, splat (i8 0)
615 store <16 x i1> %d, ptr %z
619 define void @setugt_vi_v32i8(ptr %x, ptr %z) {
620 ; CHECK-LABEL: setugt_vi_v32i8:
622 ; CHECK-NEXT: li a2, 32
623 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
624 ; CHECK-NEXT: vle8.v v8, (a0)
625 ; CHECK-NEXT: vmsgtu.vi v10, v8, 5
626 ; CHECK-NEXT: vsm.v v10, (a1)
628 %a = load <32 x i8>, ptr %x
629 %d = icmp ugt <32 x i8> %a, splat (i8 5)
630 store <32 x i1> %d, ptr %z
634 define void @setult_vi_v64i8(ptr %x, ptr %z) {
635 ; CHECK-LABEL: setult_vi_v64i8:
637 ; CHECK-NEXT: li a2, 64
638 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
639 ; CHECK-NEXT: vle8.v v8, (a0)
640 ; CHECK-NEXT: vmsleu.vi v12, v8, 4
641 ; CHECK-NEXT: vsm.v v12, (a1)
643 %a = load <64 x i8>, ptr %x
644 %d = icmp ult <64 x i8> %a, splat (i8 5)
645 store <64 x i1> %d, ptr %z
649 define void @setuge_vi_v128i8(ptr %x, ptr %z) {
650 ; CHECK-LABEL: setuge_vi_v128i8:
652 ; CHECK-NEXT: li a2, 128
653 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
654 ; CHECK-NEXT: vle8.v v8, (a0)
655 ; CHECK-NEXT: vmsgtu.vi v16, v8, 4
656 ; CHECK-NEXT: vsm.v v16, (a1)
658 %a = load <128 x i8>, ptr %x
659 %d = icmp uge <128 x i8> %a, splat (i8 5)
660 store <128 x i1> %d, ptr %z
664 define void @setule_vi_v8i8(ptr %x, ptr %z) {
665 ; CHECK-LABEL: setule_vi_v8i8:
667 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
668 ; CHECK-NEXT: vle8.v v8, (a0)
669 ; CHECK-NEXT: vmsleu.vi v8, v8, 5
670 ; CHECK-NEXT: vsm.v v8, (a1)
672 %a = load <8 x i8>, ptr %x
673 %d = icmp ule <8 x i8> %a, splat (i8 5)
674 store <8 x i1> %d, ptr %z
678 define void @seteq_vv_v8i16(ptr %x, ptr %y) {
679 ; CHECK-LABEL: seteq_vv_v8i16:
681 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
682 ; CHECK-NEXT: vle16.v v8, (a0)
683 ; CHECK-NEXT: vle16.v v9, (a1)
684 ; CHECK-NEXT: vmseq.vv v0, v8, v9
685 ; CHECK-NEXT: vmv.v.i v8, 0
686 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
687 ; CHECK-NEXT: vse16.v v8, (a0)
689 %a = load <8 x i16>, ptr %x
690 %b = load <8 x i16>, ptr %y
691 %c = icmp eq <8 x i16> %a, %b
692 %d = sext <8 x i1> %c to <8 x i16>
693 store <8 x i16> %d, ptr %x
697 define void @setne_vv_v4i32(ptr %x, ptr %y) {
698 ; CHECK-LABEL: setne_vv_v4i32:
700 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
701 ; CHECK-NEXT: vle32.v v8, (a0)
702 ; CHECK-NEXT: vle32.v v9, (a1)
703 ; CHECK-NEXT: vmsne.vv v0, v8, v9
704 ; CHECK-NEXT: vmv.v.i v8, 0
705 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
706 ; CHECK-NEXT: vse32.v v8, (a0)
708 %a = load <4 x i32>, ptr %x
709 %b = load <4 x i32>, ptr %y
710 %c = icmp ne <4 x i32> %a, %b
711 %d = sext <4 x i1> %c to <4 x i32>
712 store <4 x i32> %d, ptr %x
716 define void @setgt_vv_v2i64(ptr %x, ptr %y) {
717 ; CHECK-LABEL: setgt_vv_v2i64:
719 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
720 ; CHECK-NEXT: vle64.v v8, (a0)
721 ; CHECK-NEXT: vle64.v v9, (a1)
722 ; CHECK-NEXT: vmslt.vv v0, v9, v8
723 ; CHECK-NEXT: vmv.v.i v8, 0
724 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
725 ; CHECK-NEXT: vse64.v v8, (a0)
727 %a = load <2 x i64>, ptr %x
728 %b = load <2 x i64>, ptr %y
729 %c = icmp sgt <2 x i64> %a, %b
730 %d = sext <2 x i1> %c to <2 x i64>
731 store <2 x i64> %d, ptr %x
735 define void @setlt_vv_v16i16(ptr %x, ptr %y) {
736 ; CHECK-LABEL: setlt_vv_v16i16:
738 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
739 ; CHECK-NEXT: vle16.v v8, (a0)
740 ; CHECK-NEXT: vle16.v v10, (a1)
741 ; CHECK-NEXT: vmslt.vv v0, v8, v10
742 ; CHECK-NEXT: vmv.v.i v8, 0
743 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
744 ; CHECK-NEXT: vse16.v v8, (a0)
746 %a = load <16 x i16>, ptr %x
747 %b = load <16 x i16>, ptr %y
748 %c = icmp slt <16 x i16> %a, %b
749 %d = zext <16 x i1> %c to <16 x i16>
750 store <16 x i16> %d, ptr %x
754 define void @setugt_vv_v8i32(ptr %x, ptr %y) {
755 ; CHECK-LABEL: setugt_vv_v8i32:
757 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
758 ; CHECK-NEXT: vle32.v v8, (a0)
759 ; CHECK-NEXT: vle32.v v10, (a1)
760 ; CHECK-NEXT: vmsltu.vv v0, v10, v8
761 ; CHECK-NEXT: vmv.v.i v8, 0
762 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
763 ; CHECK-NEXT: vse32.v v8, (a0)
765 %a = load <8 x i32>, ptr %x
766 %b = load <8 x i32>, ptr %y
767 %c = icmp ugt <8 x i32> %a, %b
768 %d = zext <8 x i1> %c to <8 x i32>
769 store <8 x i32> %d, ptr %x
773 define void @setult_vv_v4i64(ptr %x, ptr %y) {
774 ; CHECK-LABEL: setult_vv_v4i64:
776 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
777 ; CHECK-NEXT: vle64.v v8, (a0)
778 ; CHECK-NEXT: vle64.v v10, (a1)
779 ; CHECK-NEXT: vmsltu.vv v0, v8, v10
780 ; CHECK-NEXT: vmv.v.i v8, 0
781 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
782 ; CHECK-NEXT: vse64.v v8, (a0)
784 %a = load <4 x i64>, ptr %x
785 %b = load <4 x i64>, ptr %y
786 %c = icmp ult <4 x i64> %a, %b
787 %d = zext <4 x i1> %c to <4 x i64>
788 store <4 x i64> %d, ptr %x