1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
5 declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
6 declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
8 declare {<4 x i32>, <4 x i1>} @llvm.sadd.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
9 declare {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
11 ; fold (sadd x, 0) -> x
12 define i32 @combine_sadd_zero(i32 %a0, i32 %a1) {
13 ; SSE-LABEL: combine_sadd_zero:
15 ; SSE-NEXT: movl %edi, %eax
18 ; AVX-LABEL: combine_sadd_zero:
20 ; AVX-NEXT: movl %edi, %eax
22 %1 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a0, i32 zeroinitializer)
23 %2 = extractvalue {i32, i1} %1, 0
24 %3 = extractvalue {i32, i1} %1, 1
25 %4 = select i1 %3, i32 %a1, i32 %2
29 define <4 x i32> @combine_vec_sadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
30 ; SSE-LABEL: combine_vec_sadd_zero:
34 ; AVX-LABEL: combine_vec_sadd_zero:
37 %1 = call {<4 x i32>, <4 x i1>} @llvm.sadd.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
38 %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
39 %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
40 %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2
44 ; fold (uadd x, 0) -> x
45 define i32 @combine_uadd_zero(i32 %a0, i32 %a1) {
46 ; SSE-LABEL: combine_uadd_zero:
48 ; SSE-NEXT: movl %edi, %eax
51 ; AVX-LABEL: combine_uadd_zero:
53 ; AVX-NEXT: movl %edi, %eax
55 %1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a0, i32 zeroinitializer)
56 %2 = extractvalue {i32, i1} %1, 0
57 %3 = extractvalue {i32, i1} %1, 1
58 %4 = select i1 %3, i32 %a1, i32 %2
62 define <4 x i32> @combine_vec_uadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
63 ; SSE-LABEL: combine_vec_uadd_zero:
67 ; AVX-LABEL: combine_vec_uadd_zero:
70 %1 = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
71 %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
72 %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
73 %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2
77 ; fold (uadd (xor a, -1), 1) -> (usub 0, a) and flip carry
78 define i32 @combine_uadd_not(i32 %a0, i32 %a1) {
79 ; SSE-LABEL: combine_uadd_not:
81 ; SSE-NEXT: movl %edi, %eax
83 ; SSE-NEXT: cmovael %esi, %eax
86 ; AVX-LABEL: combine_uadd_not:
88 ; AVX-NEXT: movl %edi, %eax
90 ; AVX-NEXT: cmovael %esi, %eax
93 %2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %1, i32 1)
94 %3 = extractvalue {i32, i1} %2, 0
95 %4 = extractvalue {i32, i1} %2, 1
96 %5 = select i1 %4, i32 %a1, i32 %3
100 define <4 x i32> @combine_vec_uadd_not(<4 x i32> %a0, <4 x i32> %a1) {
101 ; SSE-LABEL: combine_vec_uadd_not:
103 ; SSE-NEXT: pxor %xmm2, %xmm2
104 ; SSE-NEXT: psubd %xmm0, %xmm2
105 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
106 ; SSE-NEXT: pmaxud %xmm2, %xmm0
107 ; SSE-NEXT: pcmpeqd %xmm2, %xmm0
108 ; SSE-NEXT: blendvps %xmm0, %xmm2, %xmm1
109 ; SSE-NEXT: movaps %xmm1, %xmm0
112 ; AVX-LABEL: combine_vec_uadd_not:
114 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
115 ; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0
116 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
117 ; AVX-NEXT: vpmaxud %xmm2, %xmm0, %xmm2
118 ; AVX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2
119 ; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
121 %1 = xor <4 x i32> %a0, <i32 -1, i32 -1, i32 -1, i32 -1>
122 %2 = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
123 %3 = extractvalue {<4 x i32>, <4 x i1>} %2, 0
124 %4 = extractvalue {<4 x i32>, <4 x i1>} %2, 1
125 %5 = select <4 x i1> %4, <4 x i32> %a1, <4 x i32> %3
129 ; if uaddo never overflows, replace with add
130 define i32 @combine_uadd_no_overflow(i32 %a0, i32 %a1, i32 %a2) {
131 ; SSE-LABEL: combine_uadd_no_overflow:
133 ; SSE-NEXT: # kill: def $edx killed $edx def $rdx
134 ; SSE-NEXT: # kill: def $esi killed $esi def $rsi
135 ; SSE-NEXT: shrl $16, %esi
136 ; SSE-NEXT: shrl $16, %edx
137 ; SSE-NEXT: leal (%rdx,%rsi), %eax
140 ; AVX-LABEL: combine_uadd_no_overflow:
142 ; AVX-NEXT: # kill: def $edx killed $edx def $rdx
143 ; AVX-NEXT: # kill: def $esi killed $esi def $rsi
144 ; AVX-NEXT: shrl $16, %esi
145 ; AVX-NEXT: shrl $16, %edx
146 ; AVX-NEXT: leal (%rdx,%rsi), %eax
148 %1 = lshr i32 %a1, 16
149 %2 = lshr i32 %a2, 16
150 %3 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %1, i32 %2)
151 %4 = extractvalue {i32, i1} %3, 0
152 %5 = extractvalue {i32, i1} %3, 1
153 %6 = select i1 %5, i32 %a2, i32 %4
157 define <4 x i32> @combine_vec_uadd_no_overflow(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
158 ; SSE-LABEL: combine_vec_uadd_no_overflow:
160 ; SSE-NEXT: movdqa %xmm2, %xmm0
161 ; SSE-NEXT: psrld $16, %xmm1
162 ; SSE-NEXT: psrld $16, %xmm0
163 ; SSE-NEXT: paddd %xmm1, %xmm0
166 ; AVX-LABEL: combine_vec_uadd_no_overflow:
168 ; AVX-NEXT: vpsrld $16, %xmm1, %xmm0
169 ; AVX-NEXT: vpsrld $16, %xmm2, %xmm1
170 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
172 %1 = lshr <4 x i32> %a1, <i32 16, i32 16, i32 16, i32 16>
173 %2 = lshr <4 x i32> %a2, <i32 16, i32 16, i32 16, i32 16>
174 %3 = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %1, <4 x i32> %2)
175 %4 = extractvalue {<4 x i32>, <4 x i1>} %3, 0
176 %5 = extractvalue {<4 x i32>, <4 x i1>} %3, 1
177 %6 = select <4 x i1> %5, <4 x i32> %a2, <4 x i32> %4