1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -march=hexagon < %s | FileCheck %s
4 declare i32 @llvm.hexagon.S2.tstbit.i(i32, i32 immarg) #0
6 define i1 @f0(i32 %a0, i32 %a1) #1 {
8 ; CHECK: // %bb.0: // %b0
10 ; CHECK-NEXT: p0 = tstbit(r0,#3)
13 ; CHECK-NEXT: p1 = tstbit(r1,#3)
16 ; CHECK-NEXT: p0 = and(p0,p1)
22 ; CHECK-NEXT: jumpr r31
25 %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
26 %v1 = trunc i32 %v0 to i1
27 %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
28 %v3 = trunc i32 %v2 to i1
33 define i1 @f1(i32 %a0, i32 %a1) #1 {
35 ; CHECK: // %bb.0: // %b0
37 ; CHECK-NEXT: p0 = tstbit(r0,#3)
40 ; CHECK-NEXT: p1 = tstbit(r1,#3)
43 ; CHECK-NEXT: p0 = or(p0,p1)
49 ; CHECK-NEXT: jumpr r31
52 %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
53 %v1 = trunc i32 %v0 to i1
54 %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
55 %v3 = trunc i32 %v2 to i1
60 define i1 @f2(i32 %a0, i32 %a1) #1 {
62 ; CHECK: // %bb.0: // %b0
64 ; CHECK-NEXT: p0 = tstbit(r0,#3)
67 ; CHECK-NEXT: p1 = tstbit(r1,#3)
70 ; CHECK-NEXT: p0 = xor(p0,p1)
76 ; CHECK-NEXT: jumpr r31
79 %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
80 %v1 = trunc i32 %v0 to i1
81 %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
82 %v3 = trunc i32 %v2 to i1
87 define i1 @f3(i32 %a0, i32 %a1) #1 {
89 ; CHECK: // %bb.0: // %b0
91 ; CHECK-NEXT: p0 = tstbit(r0,#3)
94 ; CHECK-NEXT: p1 = tstbit(r1,#3)
100 ; CHECK-NEXT: r1 = p1
103 ; CHECK-NEXT: p0 = tstbit(r0,#0)
106 ; CHECK-NEXT: p1 = tstbit(r1,#0)
109 ; CHECK-NEXT: p0 = and(p0,!p1)
112 ; CHECK-NEXT: r0 = mux(p0,#1,#0)
115 ; CHECK-NEXT: jumpr r31
118 %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
119 %v1 = trunc i32 %v0 to i1
120 %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
121 %v3 = trunc i32 %v2 to i1
122 %v4 = xor i1 %v3, true
123 %v5 = and i1 %v1, %v4
127 define i1 @f4(i32 %a0, i32 %a1) #1 {
129 ; CHECK: // %bb.0: // %b0
131 ; CHECK-NEXT: p0 = tstbit(r0,#3)
134 ; CHECK-NEXT: p1 = tstbit(r1,#3)
137 ; CHECK-NEXT: r0 = p0
140 ; CHECK-NEXT: r1 = p1
143 ; CHECK-NEXT: p0 = tstbit(r0,#0)
146 ; CHECK-NEXT: p1 = tstbit(r1,#0)
149 ; CHECK-NEXT: p0 = or(p0,!p1)
152 ; CHECK-NEXT: r0 = mux(p0,#1,#0)
155 ; CHECK-NEXT: jumpr r31
158 %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
159 %v1 = trunc i32 %v0 to i1
160 %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
161 %v3 = trunc i32 %v2 to i1
162 %v4 = xor i1 %v3, true
167 define i1 @f5(i32 %a0, i32 %a1, i32 %a2) #1 {
169 ; CHECK: // %bb.0: // %b0
171 ; CHECK-NEXT: p0 = tstbit(r0,#3)
174 ; CHECK-NEXT: p1 = tstbit(r1,#3)
177 ; CHECK-NEXT: p2 = tstbit(r2,#3)
180 ; CHECK-NEXT: p0 = and(p2,and(p0,p1))
183 ; CHECK-NEXT: r0 = p0
186 ; CHECK-NEXT: jumpr r31
189 %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
190 %v1 = trunc i32 %v0 to i1
191 %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
192 %v3 = trunc i32 %v2 to i1
193 %v4 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
194 %v5 = trunc i32 %v4 to i1
195 %v6 = and i1 %v1, %v3
196 %v7 = and i1 %v5, %v6
200 define i1 @f6(i32 %a0, i32 %a1, i32 %a2) #1 {
202 ; CHECK: // %bb.0: // %b0
204 ; CHECK-NEXT: p0 = tstbit(r0,#3)
207 ; CHECK-NEXT: p1 = tstbit(r1,#3)
210 ; CHECK-NEXT: p2 = tstbit(r2,#3)
213 ; CHECK-NEXT: p0 = and(p2,or(p0,p1))
216 ; CHECK-NEXT: r0 = p0
219 ; CHECK-NEXT: jumpr r31
222 %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
223 %v1 = trunc i32 %v0 to i1
224 %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
225 %v3 = trunc i32 %v2 to i1
226 %v4 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
227 %v5 = trunc i32 %v4 to i1
229 %v7 = and i1 %v5, %v6
233 define i1 @f7(i32 %a0, i32 %a1, i32 %a2) #1 {
235 ; CHECK: // %bb.0: // %b0
237 ; CHECK-NEXT: p0 = tstbit(r0,#3)
240 ; CHECK-NEXT: p1 = tstbit(r1,#3)
243 ; CHECK-NEXT: p2 = tstbit(r2,#3)
246 ; CHECK-NEXT: p0 = or(p2,and(p0,p1))
249 ; CHECK-NEXT: r0 = p0
252 ; CHECK-NEXT: jumpr r31
255 %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
256 %v1 = trunc i32 %v0 to i1
257 %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
258 %v3 = trunc i32 %v2 to i1
259 %v4 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
260 %v5 = trunc i32 %v4 to i1
261 %v6 = and i1 %v1, %v3
266 define i1 @f8(i32 %a0, i32 %a1, i32 %a2) #1 {
268 ; CHECK: // %bb.0: // %b0
270 ; CHECK-NEXT: p0 = tstbit(r0,#3)
273 ; CHECK-NEXT: p1 = tstbit(r1,#3)
276 ; CHECK-NEXT: p2 = tstbit(r2,#3)
279 ; CHECK-NEXT: p0 = or(p2,or(p0,p1))
282 ; CHECK-NEXT: r0 = p0
285 ; CHECK-NEXT: jumpr r31
288 %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
289 %v1 = trunc i32 %v0 to i1
290 %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
291 %v3 = trunc i32 %v2 to i1
292 %v4 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
293 %v5 = trunc i32 %v4 to i1
299 define i1 @f9(i32 %a0, i32 %a1, i32 %a2) #1 {
301 ; CHECK: // %bb.0: // %b0
303 ; CHECK-NEXT: p0 = tstbit(r0,#3)
306 ; CHECK-NEXT: p1 = tstbit(r1,#3)
309 ; CHECK-NEXT: p2 = tstbit(r2,#3)
312 ; CHECK-NEXT: r0 = p0
315 ; CHECK-NEXT: r1 = p1
318 ; CHECK-NEXT: r2 = p2
321 ; CHECK-NEXT: p0 = tstbit(r0,#0)
324 ; CHECK-NEXT: p1 = tstbit(r1,#0)
327 ; CHECK-NEXT: p2 = tstbit(r2,#0)
330 ; CHECK-NEXT: p0 = and(p2,and(p0,!p1))
333 ; CHECK-NEXT: r0 = mux(p0,#1,#0)
336 ; CHECK-NEXT: jumpr r31
339 %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
340 %v1 = trunc i32 %v0 to i1
341 %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
342 %v3 = trunc i32 %v2 to i1
343 %v4 = xor i1 %v3, true
344 %v5 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
345 %v6 = trunc i32 %v5 to i1
346 %v7 = and i1 %v1, %v4
347 %v8 = and i1 %v6, %v7
351 define i1 @f10(i32 %a0, i32 %a1, i32 %a2) #1 {
353 ; CHECK: // %bb.0: // %b0
355 ; CHECK-NEXT: p0 = tstbit(r0,#3)
358 ; CHECK-NEXT: p1 = tstbit(r1,#3)
361 ; CHECK-NEXT: p2 = tstbit(r2,#3)
364 ; CHECK-NEXT: r0 = p0
367 ; CHECK-NEXT: r1 = p1
370 ; CHECK-NEXT: r2 = p2
373 ; CHECK-NEXT: p0 = tstbit(r0,#0)
376 ; CHECK-NEXT: p1 = tstbit(r1,#0)
379 ; CHECK-NEXT: p2 = tstbit(r2,#0)
382 ; CHECK-NEXT: p0 = and(p2,or(p0,!p1))
385 ; CHECK-NEXT: r0 = mux(p0,#1,#0)
388 ; CHECK-NEXT: jumpr r31
391 %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
392 %v1 = trunc i32 %v0 to i1
393 %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
394 %v3 = trunc i32 %v2 to i1
395 %v4 = xor i1 %v3, true
396 %v5 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
397 %v6 = trunc i32 %v5 to i1
399 %v8 = and i1 %v6, %v7
403 define i1 @f11(i32 %a0, i32 %a1, i32 %a2) #1 {
405 ; CHECK: // %bb.0: // %b0
407 ; CHECK-NEXT: p0 = tstbit(r0,#3)
410 ; CHECK-NEXT: p1 = tstbit(r1,#3)
413 ; CHECK-NEXT: p2 = tstbit(r2,#3)
416 ; CHECK-NEXT: r0 = p0
419 ; CHECK-NEXT: r1 = p1
422 ; CHECK-NEXT: r2 = p2
425 ; CHECK-NEXT: p0 = tstbit(r0,#0)
428 ; CHECK-NEXT: p1 = tstbit(r1,#0)
431 ; CHECK-NEXT: p2 = tstbit(r2,#0)
434 ; CHECK-NEXT: p0 = or(p2,and(p0,!p1))
437 ; CHECK-NEXT: r0 = mux(p0,#1,#0)
440 ; CHECK-NEXT: jumpr r31
443 %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
444 %v1 = trunc i32 %v0 to i1
445 %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
446 %v3 = trunc i32 %v2 to i1
447 %v4 = xor i1 %v3, true
448 %v5 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
449 %v6 = trunc i32 %v5 to i1
450 %v7 = and i1 %v1, %v4
455 define i1 @f12(i32 %a0, i32 %a1, i32 %a2) #1 {
457 ; CHECK: // %bb.0: // %b0
459 ; CHECK-NEXT: p0 = tstbit(r0,#3)
462 ; CHECK-NEXT: p1 = tstbit(r1,#3)
465 ; CHECK-NEXT: p2 = tstbit(r2,#3)
468 ; CHECK-NEXT: r0 = p0
471 ; CHECK-NEXT: r1 = p1
474 ; CHECK-NEXT: r2 = p2
477 ; CHECK-NEXT: p0 = tstbit(r0,#0)
480 ; CHECK-NEXT: p1 = tstbit(r1,#0)
483 ; CHECK-NEXT: p2 = tstbit(r2,#0)
486 ; CHECK-NEXT: p0 = or(p2,or(p0,!p1))
489 ; CHECK-NEXT: r0 = mux(p0,#1,#0)
492 ; CHECK-NEXT: jumpr r31
495 %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
496 %v1 = trunc i32 %v0 to i1
497 %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
498 %v3 = trunc i32 %v2 to i1
499 %v4 = xor i1 %v3, true
500 %v5 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
501 %v6 = trunc i32 %v5 to i1
507 define <2 x i32> @f13(<2 x i32> %a0, <2 x i32> %a1) #1 {
509 ; CHECK: // %bb.0: // %b0
511 ; CHECK-NEXT: r5:4 = combine(#1,#1)
514 ; CHECK-NEXT: p0 = vcmpw.eq(r1:0,r5:4)
517 ; CHECK-NEXT: p1 = vcmpw.eq(r3:2,r5:4)
520 ; CHECK-NEXT: p0 = and(p0,p1)
523 ; CHECK-NEXT: r1:0 = mask(p0)
526 ; CHECK-NEXT: jumpr r31
529 %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
530 %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
531 %v2 = and <2 x i1> %v0, %v1
532 %v3 = sext <2 x i1> %v2 to <2 x i32>
536 define <2 x i32> @f14(<2 x i32> %a0, <2 x i32> %a1) #1 {
538 ; CHECK: // %bb.0: // %b0
540 ; CHECK-NEXT: r5:4 = combine(#1,#1)
543 ; CHECK-NEXT: p0 = vcmpw.eq(r1:0,r5:4)
546 ; CHECK-NEXT: p1 = vcmpw.eq(r3:2,r5:4)
549 ; CHECK-NEXT: p0 = or(p0,p1)
552 ; CHECK-NEXT: r1:0 = mask(p0)
555 ; CHECK-NEXT: jumpr r31
558 %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
559 %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
560 %v2 = or <2 x i1> %v0, %v1
561 %v3 = sext <2 x i1> %v2 to <2 x i32>
565 define <2 x i32> @f15(<2 x i32> %a0, <2 x i32> %a1) #1 {
567 ; CHECK: // %bb.0: // %b0
569 ; CHECK-NEXT: r5:4 = combine(#1,#1)
572 ; CHECK-NEXT: p0 = vcmpw.eq(r1:0,r5:4)
575 ; CHECK-NEXT: p1 = vcmpw.eq(r3:2,r5:4)
578 ; CHECK-NEXT: p0 = xor(p0,p1)
581 ; CHECK-NEXT: r1:0 = mask(p0)
584 ; CHECK-NEXT: jumpr r31
587 %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
588 %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
589 %v2 = xor <2 x i1> %v0, %v1
590 %v3 = sext <2 x i1> %v2 to <2 x i32>
594 define <2 x i32> @f16(<2 x i32> %a0, <2 x i32> %a1) #1 {
596 ; CHECK: // %bb.0: // %b0
598 ; CHECK-NEXT: r5:4 = combine(#1,#1)
601 ; CHECK-NEXT: p0 = vcmpw.eq(r1:0,r5:4)
604 ; CHECK-NEXT: p1 = vcmpw.eq(r3:2,r5:4)
607 ; CHECK-NEXT: p0 = and(p0,!p1)
610 ; CHECK-NEXT: r1:0 = mask(p0)
613 ; CHECK-NEXT: jumpr r31
616 %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
617 %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
618 %v2 = xor <2 x i1> %v1, <i1 true, i1 true>
619 %v3 = and <2 x i1> %v0, %v2
620 %v4 = sext <2 x i1> %v3 to <2 x i32>
624 define <2 x i32> @f17(<2 x i32> %a0, <2 x i32> %a1) #1 {
626 ; CHECK: // %bb.0: // %b0
628 ; CHECK-NEXT: r5:4 = combine(#1,#1)
631 ; CHECK-NEXT: p0 = vcmpw.eq(r1:0,r5:4)
634 ; CHECK-NEXT: p1 = vcmpw.eq(r3:2,r5:4)
637 ; CHECK-NEXT: p0 = or(p0,!p1)
640 ; CHECK-NEXT: r1:0 = mask(p0)
643 ; CHECK-NEXT: jumpr r31
646 %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
647 %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
648 %v2 = xor <2 x i1> %v1, <i1 true, i1 true>
649 %v3 = or <2 x i1> %v0, %v2
650 %v4 = sext <2 x i1> %v3 to <2 x i32>
654 define <2 x i32> @f18(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
656 ; CHECK: // %bb.0: // %b0
658 ; CHECK-NEXT: r7:6 = combine(#1,#1)
661 ; CHECK-NEXT: p0 = vcmpw.eq(r1:0,r7:6)
664 ; CHECK-NEXT: p1 = vcmpw.eq(r3:2,r7:6)
667 ; CHECK-NEXT: p2 = vcmpw.eq(r5:4,r7:6)
670 ; CHECK-NEXT: p0 = and(p2,and(p0,p1))
673 ; CHECK-NEXT: r1:0 = mask(p0)
676 ; CHECK-NEXT: jumpr r31
679 %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
680 %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
681 %v2 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
682 %v3 = and <2 x i1> %v0, %v1
683 %v4 = and <2 x i1> %v2, %v3
684 %v5 = sext <2 x i1> %v4 to <2 x i32>
688 define <2 x i32> @f19(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
690 ; CHECK: // %bb.0: // %b0
692 ; CHECK-NEXT: r7:6 = combine(#1,#1)
695 ; CHECK-NEXT: p0 = vcmpw.eq(r1:0,r7:6)
698 ; CHECK-NEXT: p1 = vcmpw.eq(r3:2,r7:6)
701 ; CHECK-NEXT: p2 = vcmpw.eq(r5:4,r7:6)
704 ; CHECK-NEXT: p0 = and(p2,or(p0,p1))
707 ; CHECK-NEXT: r1:0 = mask(p0)
710 ; CHECK-NEXT: jumpr r31
713 %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
714 %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
715 %v2 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
716 %v3 = or <2 x i1> %v0, %v1
717 %v4 = and <2 x i1> %v2, %v3
718 %v5 = sext <2 x i1> %v4 to <2 x i32>
722 define <2 x i32> @f20(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
724 ; CHECK: // %bb.0: // %b0
726 ; CHECK-NEXT: r7:6 = combine(#1,#1)
729 ; CHECK-NEXT: p0 = vcmpw.eq(r1:0,r7:6)
732 ; CHECK-NEXT: p1 = vcmpw.eq(r3:2,r7:6)
735 ; CHECK-NEXT: p2 = vcmpw.eq(r5:4,r7:6)
738 ; CHECK-NEXT: p0 = or(p2,and(p0,p1))
741 ; CHECK-NEXT: r1:0 = mask(p0)
744 ; CHECK-NEXT: jumpr r31
747 %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
748 %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
749 %v2 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
750 %v3 = and <2 x i1> %v0, %v1
751 %v4 = or <2 x i1> %v2, %v3
752 %v5 = sext <2 x i1> %v4 to <2 x i32>
756 define <2 x i32> @f21(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
758 ; CHECK: // %bb.0: // %b0
760 ; CHECK-NEXT: r7:6 = combine(#1,#1)
763 ; CHECK-NEXT: p0 = vcmpw.eq(r1:0,r7:6)
766 ; CHECK-NEXT: p1 = vcmpw.eq(r3:2,r7:6)
769 ; CHECK-NEXT: p2 = vcmpw.eq(r5:4,r7:6)
772 ; CHECK-NEXT: p0 = or(p2,or(p0,p1))
775 ; CHECK-NEXT: r1:0 = mask(p0)
778 ; CHECK-NEXT: jumpr r31
781 %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
782 %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
783 %v2 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
784 %v3 = or <2 x i1> %v0, %v1
785 %v4 = or <2 x i1> %v2, %v3
786 %v5 = sext <2 x i1> %v4 to <2 x i32>
790 define <2 x i32> @f22(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
792 ; CHECK: // %bb.0: // %b0
794 ; CHECK-NEXT: r7:6 = combine(#1,#1)
797 ; CHECK-NEXT: p0 = vcmpw.eq(r1:0,r7:6)
800 ; CHECK-NEXT: p1 = vcmpw.eq(r3:2,r7:6)
803 ; CHECK-NEXT: p2 = vcmpw.eq(r5:4,r7:6)
806 ; CHECK-NEXT: p0 = and(p2,and(p0,!p1))
809 ; CHECK-NEXT: r1:0 = mask(p0)
812 ; CHECK-NEXT: jumpr r31
815 %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
816 %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
817 %v2 = xor <2 x i1> %v1, <i1 true, i1 true>
818 %v3 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
819 %v4 = and <2 x i1> %v0, %v2
820 %v5 = and <2 x i1> %v3, %v4
821 %v6 = sext <2 x i1> %v5 to <2 x i32>
825 define <2 x i32> @f23(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
827 ; CHECK: // %bb.0: // %b0
829 ; CHECK-NEXT: r7:6 = combine(#1,#1)
832 ; CHECK-NEXT: p0 = vcmpw.eq(r1:0,r7:6)
835 ; CHECK-NEXT: p1 = vcmpw.eq(r3:2,r7:6)
838 ; CHECK-NEXT: p2 = vcmpw.eq(r5:4,r7:6)
841 ; CHECK-NEXT: p0 = and(p2,or(p0,!p1))
844 ; CHECK-NEXT: r1:0 = mask(p0)
847 ; CHECK-NEXT: jumpr r31
850 %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
851 %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
852 %v2 = xor <2 x i1> %v1, <i1 true, i1 true>
853 %v3 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
854 %v4 = or <2 x i1> %v0, %v2
855 %v5 = and <2 x i1> %v3, %v4
856 %v6 = sext <2 x i1> %v5 to <2 x i32>
860 define <2 x i32> @f24(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
862 ; CHECK: // %bb.0: // %b0
864 ; CHECK-NEXT: r7:6 = combine(#1,#1)
867 ; CHECK-NEXT: p0 = vcmpw.eq(r1:0,r7:6)
870 ; CHECK-NEXT: p1 = vcmpw.eq(r3:2,r7:6)
873 ; CHECK-NEXT: p2 = vcmpw.eq(r5:4,r7:6)
876 ; CHECK-NEXT: p0 = or(p2,and(p0,!p1))
879 ; CHECK-NEXT: r1:0 = mask(p0)
882 ; CHECK-NEXT: jumpr r31
885 %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
886 %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
887 %v2 = xor <2 x i1> %v1, <i1 true, i1 true>
888 %v3 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
889 %v4 = and <2 x i1> %v0, %v2
890 %v5 = or <2 x i1> %v3, %v4
891 %v6 = sext <2 x i1> %v5 to <2 x i32>
895 define <2 x i32> @f25(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
897 ; CHECK: // %bb.0: // %b0
899 ; CHECK-NEXT: r7:6 = combine(#1,#1)
902 ; CHECK-NEXT: p0 = vcmpw.eq(r1:0,r7:6)
905 ; CHECK-NEXT: p1 = vcmpw.eq(r3:2,r7:6)
908 ; CHECK-NEXT: p2 = vcmpw.eq(r5:4,r7:6)
911 ; CHECK-NEXT: p0 = or(p2,or(p0,!p1))
914 ; CHECK-NEXT: r1:0 = mask(p0)
917 ; CHECK-NEXT: jumpr r31
920 %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
921 %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
922 %v2 = xor <2 x i1> %v1, <i1 true, i1 true>
923 %v3 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
924 %v4 = or <2 x i1> %v0, %v2
925 %v5 = or <2 x i1> %v3, %v4
926 %v6 = sext <2 x i1> %v5 to <2 x i32>
930 define <4 x i16> @f26(<4 x i16> %a0, <4 x i16> %a1) #1 {
932 ; CHECK: // %bb.0: // %b0
934 ; CHECK-NEXT: r4 = ##65537
937 ; CHECK-NEXT: r5 = ##65537
940 ; CHECK-NEXT: p0 = vcmph.eq(r1:0,r5:4)
943 ; CHECK-NEXT: p1 = vcmph.eq(r3:2,r5:4)
946 ; CHECK-NEXT: p0 = and(p0,p1)
949 ; CHECK-NEXT: r1:0 = mask(p0)
952 ; CHECK-NEXT: jumpr r31
955 %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
956 %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
957 %v2 = and <4 x i1> %v0, %v1
958 %v3 = sext <4 x i1> %v2 to <4 x i16>
962 define <4 x i16> @f27(<4 x i16> %a0, <4 x i16> %a1) #1 {
964 ; CHECK: // %bb.0: // %b0
966 ; CHECK-NEXT: r4 = ##65537
969 ; CHECK-NEXT: r5 = ##65537
972 ; CHECK-NEXT: p0 = vcmph.eq(r1:0,r5:4)
975 ; CHECK-NEXT: p1 = vcmph.eq(r3:2,r5:4)
978 ; CHECK-NEXT: p0 = or(p0,p1)
981 ; CHECK-NEXT: r1:0 = mask(p0)
984 ; CHECK-NEXT: jumpr r31
987 %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
988 %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
989 %v2 = or <4 x i1> %v0, %v1
990 %v3 = sext <4 x i1> %v2 to <4 x i16>
994 define <4 x i16> @f28(<4 x i16> %a0, <4 x i16> %a1) #1 {
996 ; CHECK: // %bb.0: // %b0
998 ; CHECK-NEXT: r4 = ##65537
1001 ; CHECK-NEXT: r5 = ##65537
1004 ; CHECK-NEXT: p0 = vcmph.eq(r1:0,r5:4)
1007 ; CHECK-NEXT: p1 = vcmph.eq(r3:2,r5:4)
1010 ; CHECK-NEXT: p0 = xor(p0,p1)
1013 ; CHECK-NEXT: r1:0 = mask(p0)
1016 ; CHECK-NEXT: jumpr r31
1019 %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
1020 %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
1021 %v2 = xor <4 x i1> %v0, %v1
1022 %v3 = sext <4 x i1> %v2 to <4 x i16>
1026 define <4 x i16> @f29(<4 x i16> %a0, <4 x i16> %a1) #1 {
1028 ; CHECK: // %bb.0: // %b0
1030 ; CHECK-NEXT: r4 = ##65537
1033 ; CHECK-NEXT: r5 = ##65537
1036 ; CHECK-NEXT: p0 = vcmph.eq(r1:0,r5:4)
1039 ; CHECK-NEXT: p1 = vcmph.eq(r3:2,r5:4)
1042 ; CHECK-NEXT: p0 = and(p0,!p1)
1045 ; CHECK-NEXT: r1:0 = mask(p0)
1048 ; CHECK-NEXT: jumpr r31
1051 %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
1052 %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
1053 %v2 = xor <4 x i1> %v1, <i1 true, i1 true, i1 true, i1 true>
1054 %v3 = and <4 x i1> %v0, %v2
1055 %v4 = sext <4 x i1> %v3 to <4 x i16>
1059 define <4 x i16> @f30(<4 x i16> %a0, <4 x i16> %a1) #1 {
1061 ; CHECK: // %bb.0: // %b0
1063 ; CHECK-NEXT: r4 = ##65537
1066 ; CHECK-NEXT: r5 = ##65537
1069 ; CHECK-NEXT: p0 = vcmph.eq(r1:0,r5:4)
1072 ; CHECK-NEXT: p1 = vcmph.eq(r3:2,r5:4)
1075 ; CHECK-NEXT: p0 = or(p0,!p1)
1078 ; CHECK-NEXT: r1:0 = mask(p0)
1081 ; CHECK-NEXT: jumpr r31
1084 %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
1085 %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
1086 %v2 = xor <4 x i1> %v1, <i1 true, i1 true, i1 true, i1 true>
1087 %v3 = or <4 x i1> %v0, %v2
1088 %v4 = sext <4 x i1> %v3 to <4 x i16>
1092 define <4 x i16> @f31(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
1094 ; CHECK: // %bb.0: // %b0
1096 ; CHECK-NEXT: r6 = ##65537
1099 ; CHECK-NEXT: r7 = ##65537
1102 ; CHECK-NEXT: p0 = vcmph.eq(r1:0,r7:6)
1105 ; CHECK-NEXT: p1 = vcmph.eq(r3:2,r7:6)
1108 ; CHECK-NEXT: p2 = vcmph.eq(r5:4,r7:6)
1111 ; CHECK-NEXT: p0 = and(p2,and(p0,p1))
1114 ; CHECK-NEXT: r1:0 = mask(p0)
1117 ; CHECK-NEXT: jumpr r31
1120 %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
1121 %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
1122 %v2 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
1123 %v3 = and <4 x i1> %v0, %v1
1124 %v4 = and <4 x i1> %v2, %v3
1125 %v5 = sext <4 x i1> %v4 to <4 x i16>
1129 define <4 x i16> @f32(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
1131 ; CHECK: // %bb.0: // %b0
1133 ; CHECK-NEXT: r6 = ##65537
1136 ; CHECK-NEXT: r7 = ##65537
1139 ; CHECK-NEXT: p0 = vcmph.eq(r1:0,r7:6)
1142 ; CHECK-NEXT: p1 = vcmph.eq(r3:2,r7:6)
1145 ; CHECK-NEXT: p2 = vcmph.eq(r5:4,r7:6)
1148 ; CHECK-NEXT: p0 = and(p2,or(p0,p1))
1151 ; CHECK-NEXT: r1:0 = mask(p0)
1154 ; CHECK-NEXT: jumpr r31
1157 %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
1158 %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
1159 %v2 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
1160 %v3 = or <4 x i1> %v0, %v1
1161 %v4 = and <4 x i1> %v2, %v3
1162 %v5 = sext <4 x i1> %v4 to <4 x i16>
1166 define <4 x i16> @f33(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
1168 ; CHECK: // %bb.0: // %b0
1170 ; CHECK-NEXT: r6 = ##65537
1173 ; CHECK-NEXT: r7 = ##65537
1176 ; CHECK-NEXT: p0 = vcmph.eq(r1:0,r7:6)
1179 ; CHECK-NEXT: p1 = vcmph.eq(r3:2,r7:6)
1182 ; CHECK-NEXT: p2 = vcmph.eq(r5:4,r7:6)
1185 ; CHECK-NEXT: p0 = or(p2,and(p0,p1))
1188 ; CHECK-NEXT: r1:0 = mask(p0)
1191 ; CHECK-NEXT: jumpr r31
1194 %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
1195 %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
1196 %v2 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
1197 %v3 = and <4 x i1> %v0, %v1
1198 %v4 = or <4 x i1> %v2, %v3
1199 %v5 = sext <4 x i1> %v4 to <4 x i16>
1203 define <4 x i16> @f34(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
1205 ; CHECK: // %bb.0: // %b0
1207 ; CHECK-NEXT: r6 = ##65537
1210 ; CHECK-NEXT: r7 = ##65537
1213 ; CHECK-NEXT: p0 = vcmph.eq(r1:0,r7:6)
1216 ; CHECK-NEXT: p1 = vcmph.eq(r3:2,r7:6)
1219 ; CHECK-NEXT: p2 = vcmph.eq(r5:4,r7:6)
1222 ; CHECK-NEXT: p0 = or(p2,or(p0,p1))
1225 ; CHECK-NEXT: r1:0 = mask(p0)
1228 ; CHECK-NEXT: jumpr r31
1231 %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
1232 %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
1233 %v2 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
1234 %v3 = or <4 x i1> %v0, %v1
1235 %v4 = or <4 x i1> %v2, %v3
1236 %v5 = sext <4 x i1> %v4 to <4 x i16>
1240 define <4 x i16> @f35(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
1242 ; CHECK: // %bb.0: // %b0
1244 ; CHECK-NEXT: r6 = ##65537
1247 ; CHECK-NEXT: r7 = ##65537
1250 ; CHECK-NEXT: p0 = vcmph.eq(r1:0,r7:6)
1253 ; CHECK-NEXT: p1 = vcmph.eq(r3:2,r7:6)
1256 ; CHECK-NEXT: p2 = vcmph.eq(r5:4,r7:6)
1259 ; CHECK-NEXT: p0 = and(p2,and(p0,!p1))
1262 ; CHECK-NEXT: r1:0 = mask(p0)
1265 ; CHECK-NEXT: jumpr r31
1268 %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
1269 %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
1270 %v2 = xor <4 x i1> %v1, <i1 true, i1 true, i1 true, i1 true>
1271 %v3 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
1272 %v4 = and <4 x i1> %v0, %v2
1273 %v5 = and <4 x i1> %v3, %v4
1274 %v6 = sext <4 x i1> %v5 to <4 x i16>
1278 define <4 x i16> @f36(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
1280 ; CHECK: // %bb.0: // %b0
1282 ; CHECK-NEXT: r6 = ##65537
1285 ; CHECK-NEXT: r7 = ##65537
1288 ; CHECK-NEXT: p0 = vcmph.eq(r1:0,r7:6)
1291 ; CHECK-NEXT: p1 = vcmph.eq(r3:2,r7:6)
1294 ; CHECK-NEXT: p2 = vcmph.eq(r5:4,r7:6)
1297 ; CHECK-NEXT: p0 = and(p2,or(p0,!p1))
1300 ; CHECK-NEXT: r1:0 = mask(p0)
1303 ; CHECK-NEXT: jumpr r31
1306 %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
1307 %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
1308 %v2 = xor <4 x i1> %v1, <i1 true, i1 true, i1 true, i1 true>
1309 %v3 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
1310 %v4 = or <4 x i1> %v0, %v2
1311 %v5 = and <4 x i1> %v3, %v4
1312 %v6 = sext <4 x i1> %v5 to <4 x i16>
1316 define <4 x i16> @f37(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
1318 ; CHECK: // %bb.0: // %b0
1320 ; CHECK-NEXT: r6 = ##65537
1323 ; CHECK-NEXT: r7 = ##65537
1326 ; CHECK-NEXT: p0 = vcmph.eq(r1:0,r7:6)
1329 ; CHECK-NEXT: p1 = vcmph.eq(r3:2,r7:6)
1332 ; CHECK-NEXT: p2 = vcmph.eq(r5:4,r7:6)
1335 ; CHECK-NEXT: p0 = or(p2,and(p0,!p1))
1338 ; CHECK-NEXT: r1:0 = mask(p0)
1341 ; CHECK-NEXT: jumpr r31
1344 %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
1345 %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
1346 %v2 = xor <4 x i1> %v1, <i1 true, i1 true, i1 true, i1 true>
1347 %v3 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
1348 %v4 = and <4 x i1> %v0, %v2
1349 %v5 = or <4 x i1> %v3, %v4
1350 %v6 = sext <4 x i1> %v5 to <4 x i16>
1354 define <4 x i16> @f38(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
1356 ; CHECK: // %bb.0: // %b0
1358 ; CHECK-NEXT: r6 = ##65537
1361 ; CHECK-NEXT: r7 = ##65537
1364 ; CHECK-NEXT: p0 = vcmph.eq(r1:0,r7:6)
1367 ; CHECK-NEXT: p1 = vcmph.eq(r3:2,r7:6)
1370 ; CHECK-NEXT: p2 = vcmph.eq(r5:4,r7:6)
1373 ; CHECK-NEXT: p0 = or(p2,or(p0,!p1))
1376 ; CHECK-NEXT: r1:0 = mask(p0)
1379 ; CHECK-NEXT: jumpr r31
1382 %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
1383 %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
1384 %v2 = xor <4 x i1> %v1, <i1 true, i1 true, i1 true, i1 true>
1385 %v3 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
1386 %v4 = or <4 x i1> %v0, %v2
1387 %v5 = or <4 x i1> %v3, %v4
1388 %v6 = sext <4 x i1> %v5 to <4 x i16>
1392 define <8 x i8> @f39(<8 x i8> %a0, <8 x i8> %a1) #1 {
1394 ; CHECK: // %bb.0: // %b0
1396 ; CHECK-NEXT: r4 = ##16843009
1399 ; CHECK-NEXT: r5 = ##16843009
1402 ; CHECK-NEXT: p0 = vcmpb.eq(r1:0,r5:4)
1405 ; CHECK-NEXT: p1 = vcmpb.eq(r3:2,r5:4)
1408 ; CHECK-NEXT: p0 = and(p0,p1)
1411 ; CHECK-NEXT: r1:0 = mask(p0)
1414 ; CHECK-NEXT: jumpr r31
1417 %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1418 %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1419 %v2 = and <8 x i1> %v0, %v1
1420 %v3 = sext <8 x i1> %v2 to <8 x i8>
1424 define <8 x i8> @f40(<8 x i8> %a0, <8 x i8> %a1) #1 {
1426 ; CHECK: // %bb.0: // %b0
1428 ; CHECK-NEXT: r4 = ##16843009
1431 ; CHECK-NEXT: r5 = ##16843009
1434 ; CHECK-NEXT: p0 = vcmpb.eq(r1:0,r5:4)
1437 ; CHECK-NEXT: p1 = vcmpb.eq(r3:2,r5:4)
1440 ; CHECK-NEXT: p0 = or(p0,p1)
1443 ; CHECK-NEXT: r1:0 = mask(p0)
1446 ; CHECK-NEXT: jumpr r31
1449 %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1450 %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1451 %v2 = or <8 x i1> %v0, %v1
1452 %v3 = sext <8 x i1> %v2 to <8 x i8>
1456 define <8 x i8> @f41(<8 x i8> %a0, <8 x i8> %a1) #1 {
1458 ; CHECK: // %bb.0: // %b0
1460 ; CHECK-NEXT: r4 = ##16843009
1463 ; CHECK-NEXT: r5 = ##16843009
1466 ; CHECK-NEXT: p0 = vcmpb.eq(r1:0,r5:4)
1469 ; CHECK-NEXT: p1 = vcmpb.eq(r3:2,r5:4)
1472 ; CHECK-NEXT: p0 = xor(p0,p1)
1475 ; CHECK-NEXT: r1:0 = mask(p0)
1478 ; CHECK-NEXT: jumpr r31
1481 %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1482 %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1483 %v2 = xor <8 x i1> %v0, %v1
1484 %v3 = sext <8 x i1> %v2 to <8 x i8>
1488 define <8 x i8> @f42(<8 x i8> %a0, <8 x i8> %a1) #1 {
1490 ; CHECK: // %bb.0: // %b0
1492 ; CHECK-NEXT: r4 = ##16843009
1495 ; CHECK-NEXT: r5 = ##16843009
1498 ; CHECK-NEXT: p0 = vcmpb.eq(r1:0,r5:4)
1501 ; CHECK-NEXT: p1 = vcmpb.eq(r3:2,r5:4)
1504 ; CHECK-NEXT: p0 = and(p0,!p1)
1507 ; CHECK-NEXT: r1:0 = mask(p0)
1510 ; CHECK-NEXT: jumpr r31
1513 %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1514 %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1515 %v2 = xor <8 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
1516 %v3 = and <8 x i1> %v0, %v2
1517 %v4 = sext <8 x i1> %v3 to <8 x i8>
1521 define <8 x i8> @f43(<8 x i8> %a0, <8 x i8> %a1) #1 {
1523 ; CHECK: // %bb.0: // %b0
1525 ; CHECK-NEXT: r4 = ##16843009
1528 ; CHECK-NEXT: r5 = ##16843009
1531 ; CHECK-NEXT: p0 = vcmpb.eq(r1:0,r5:4)
1534 ; CHECK-NEXT: p1 = vcmpb.eq(r3:2,r5:4)
1537 ; CHECK-NEXT: p0 = or(p0,!p1)
1540 ; CHECK-NEXT: r1:0 = mask(p0)
1543 ; CHECK-NEXT: jumpr r31
1546 %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1547 %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1548 %v2 = xor <8 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
1549 %v3 = or <8 x i1> %v0, %v2
1550 %v4 = sext <8 x i1> %v3 to <8 x i8>
1554 define <8 x i8> @f44(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
1556 ; CHECK: // %bb.0: // %b0
1558 ; CHECK-NEXT: r6 = ##16843009
1561 ; CHECK-NEXT: r7 = ##16843009
1564 ; CHECK-NEXT: p0 = vcmpb.eq(r1:0,r7:6)
1567 ; CHECK-NEXT: p1 = vcmpb.eq(r3:2,r7:6)
1570 ; CHECK-NEXT: p2 = vcmpb.eq(r5:4,r7:6)
1573 ; CHECK-NEXT: p0 = and(p2,and(p0,p1))
1576 ; CHECK-NEXT: r1:0 = mask(p0)
1579 ; CHECK-NEXT: jumpr r31
1582 %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1583 %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1584 %v2 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1585 %v3 = and <8 x i1> %v0, %v1
1586 %v4 = and <8 x i1> %v2, %v3
1587 %v5 = sext <8 x i1> %v4 to <8 x i8>
1591 define <8 x i8> @f45(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
1593 ; CHECK: // %bb.0: // %b0
1595 ; CHECK-NEXT: r6 = ##16843009
1598 ; CHECK-NEXT: r7 = ##16843009
1601 ; CHECK-NEXT: p0 = vcmpb.eq(r1:0,r7:6)
1604 ; CHECK-NEXT: p1 = vcmpb.eq(r3:2,r7:6)
1607 ; CHECK-NEXT: p2 = vcmpb.eq(r5:4,r7:6)
1610 ; CHECK-NEXT: p0 = and(p2,or(p0,p1))
1613 ; CHECK-NEXT: r1:0 = mask(p0)
1616 ; CHECK-NEXT: jumpr r31
1619 %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1620 %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1621 %v2 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1622 %v3 = or <8 x i1> %v0, %v1
1623 %v4 = and <8 x i1> %v2, %v3
1624 %v5 = sext <8 x i1> %v4 to <8 x i8>
1628 define <8 x i8> @f46(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
1630 ; CHECK: // %bb.0: // %b0
1632 ; CHECK-NEXT: r6 = ##16843009
1635 ; CHECK-NEXT: r7 = ##16843009
1638 ; CHECK-NEXT: p0 = vcmpb.eq(r1:0,r7:6)
1641 ; CHECK-NEXT: p1 = vcmpb.eq(r3:2,r7:6)
1644 ; CHECK-NEXT: p2 = vcmpb.eq(r5:4,r7:6)
1647 ; CHECK-NEXT: p0 = or(p2,and(p0,p1))
1650 ; CHECK-NEXT: r1:0 = mask(p0)
1653 ; CHECK-NEXT: jumpr r31
1656 %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1657 %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1658 %v2 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1659 %v3 = and <8 x i1> %v0, %v1
1660 %v4 = or <8 x i1> %v2, %v3
1661 %v5 = sext <8 x i1> %v4 to <8 x i8>
1665 define <8 x i8> @f47(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
1667 ; CHECK: // %bb.0: // %b0
1669 ; CHECK-NEXT: r6 = ##16843009
1672 ; CHECK-NEXT: r7 = ##16843009
1675 ; CHECK-NEXT: p0 = vcmpb.eq(r1:0,r7:6)
1678 ; CHECK-NEXT: p1 = vcmpb.eq(r3:2,r7:6)
1681 ; CHECK-NEXT: p2 = vcmpb.eq(r5:4,r7:6)
1684 ; CHECK-NEXT: p0 = or(p2,or(p0,p1))
1687 ; CHECK-NEXT: r1:0 = mask(p0)
1690 ; CHECK-NEXT: jumpr r31
1693 %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1694 %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1695 %v2 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1696 %v3 = or <8 x i1> %v0, %v1
1697 %v4 = or <8 x i1> %v2, %v3
1698 %v5 = sext <8 x i1> %v4 to <8 x i8>
1702 define <8 x i8> @f48(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
1704 ; CHECK: // %bb.0: // %b0
1706 ; CHECK-NEXT: r6 = ##16843009
1709 ; CHECK-NEXT: r7 = ##16843009
1712 ; CHECK-NEXT: p0 = vcmpb.eq(r1:0,r7:6)
1715 ; CHECK-NEXT: p1 = vcmpb.eq(r3:2,r7:6)
1718 ; CHECK-NEXT: p2 = vcmpb.eq(r5:4,r7:6)
1721 ; CHECK-NEXT: p0 = and(p2,and(p0,!p1))
1724 ; CHECK-NEXT: r1:0 = mask(p0)
1727 ; CHECK-NEXT: jumpr r31
1730 %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1731 %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1732 %v2 = xor <8 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
1733 %v3 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1734 %v4 = and <8 x i1> %v0, %v2
1735 %v5 = and <8 x i1> %v3, %v4
1736 %v6 = sext <8 x i1> %v5 to <8 x i8>
1740 define <8 x i8> @f49(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
1742 ; CHECK: // %bb.0: // %b0
1744 ; CHECK-NEXT: r6 = ##16843009
1747 ; CHECK-NEXT: r7 = ##16843009
1750 ; CHECK-NEXT: p0 = vcmpb.eq(r1:0,r7:6)
1753 ; CHECK-NEXT: p1 = vcmpb.eq(r3:2,r7:6)
1756 ; CHECK-NEXT: p2 = vcmpb.eq(r5:4,r7:6)
1759 ; CHECK-NEXT: p0 = and(p2,or(p0,!p1))
1762 ; CHECK-NEXT: r1:0 = mask(p0)
1765 ; CHECK-NEXT: jumpr r31
1768 %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1769 %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1770 %v2 = xor <8 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
1771 %v3 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1772 %v4 = or <8 x i1> %v0, %v2
1773 %v5 = and <8 x i1> %v3, %v4
1774 %v6 = sext <8 x i1> %v5 to <8 x i8>
1778 define <8 x i8> @f50(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
1780 ; CHECK: // %bb.0: // %b0
1782 ; CHECK-NEXT: r6 = ##16843009
1785 ; CHECK-NEXT: r7 = ##16843009
1788 ; CHECK-NEXT: p0 = vcmpb.eq(r1:0,r7:6)
1791 ; CHECK-NEXT: p1 = vcmpb.eq(r3:2,r7:6)
1794 ; CHECK-NEXT: p2 = vcmpb.eq(r5:4,r7:6)
1797 ; CHECK-NEXT: p0 = or(p2,and(p0,!p1))
1800 ; CHECK-NEXT: r1:0 = mask(p0)
1803 ; CHECK-NEXT: jumpr r31
1806 %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1807 %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1808 %v2 = xor <8 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
1809 %v3 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1810 %v4 = and <8 x i1> %v0, %v2
1811 %v5 = or <8 x i1> %v3, %v4
1812 %v6 = sext <8 x i1> %v5 to <8 x i8>
1816 define <8 x i8> @f51(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
1818 ; CHECK: // %bb.0: // %b0
1820 ; CHECK-NEXT: r6 = ##16843009
1823 ; CHECK-NEXT: r7 = ##16843009
1826 ; CHECK-NEXT: p0 = vcmpb.eq(r1:0,r7:6)
1829 ; CHECK-NEXT: p1 = vcmpb.eq(r3:2,r7:6)
1832 ; CHECK-NEXT: p2 = vcmpb.eq(r5:4,r7:6)
1835 ; CHECK-NEXT: p0 = or(p2,or(p0,!p1))
1838 ; CHECK-NEXT: r1:0 = mask(p0)
1841 ; CHECK-NEXT: jumpr r31
1844 %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1845 %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1846 %v2 = xor <8 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
1847 %v3 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1848 %v4 = or <8 x i1> %v0, %v2
1849 %v5 = or <8 x i1> %v3, %v4
1850 %v6 = sext <8 x i1> %v5 to <8 x i8>
1854 attributes #0 = { nounwind readnone }
1855 attributes #1 = { nounwind "target-features"="-small-data,-packets" }