1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE42
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
10 declare i32 @llvm.usub.sat.i32 (i32, i32)
11 declare i64 @llvm.usub.sat.i64 (i64, i64)
12 declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
13 declare <8 x i32> @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>)
15 ; fold (usub_sat x, undef) -> 0
16 define i32 @combine_undef_i32(i32 %a0) {
17 ; CHECK-LABEL: combine_undef_i32:
19 ; CHECK-NEXT: xorl %eax, %eax
21 %res = call i32 @llvm.usub.sat.i32(i32 %a0, i32 undef)
25 define <8 x i16> @combine_undef_v8i16(<8 x i16> %a0) {
26 ; SSE-LABEL: combine_undef_v8i16:
28 ; SSE-NEXT: xorps %xmm0, %xmm0
31 ; AVX-LABEL: combine_undef_v8i16:
33 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
35 %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> %a0)
39 ; fold (usub_sat c1, c2) -> c3
40 define i32 @combine_constfold_i32() {
41 ; CHECK-LABEL: combine_constfold_i32:
43 ; CHECK-NEXT: xorl %eax, %eax
45 %res = call i32 @llvm.usub.sat.i32(i32 100, i32 4294967295)
49 define <8 x i16> @combine_constfold_v8i16() {
50 ; SSE-LABEL: combine_constfold_v8i16:
52 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,254,0,65534,0,0,0]
55 ; AVX-LABEL: combine_constfold_v8i16:
57 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,254,0,65534,0,0,0]
59 %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
63 define <8 x i16> @combine_constfold_undef_v8i16() {
64 ; SSE-LABEL: combine_constfold_undef_v8i16:
66 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,65534,0,0,0]
69 ; AVX-LABEL: combine_constfold_undef_v8i16:
71 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65534,0,0,0]
73 %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
77 ; fold (usub_sat x, 0) -> x
78 define i32 @combine_zero_i32(i32 %a0) {
79 ; CHECK-LABEL: combine_zero_i32:
81 ; CHECK-NEXT: movl %edi, %eax
83 %1 = call i32 @llvm.usub.sat.i32(i32 %a0, i32 0)
87 define <8 x i16> @combine_zero_v8i16(<8 x i16> %a0) {
88 ; CHECK-LABEL: combine_zero_v8i16:
91 %1 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer)
95 ; fold (usub_sat x, x) -> 0
96 define i32 @combine_self_i32(i32 %a0) {
97 ; CHECK-LABEL: combine_self_i32:
99 ; CHECK-NEXT: xorl %eax, %eax
101 %1 = call i32 @llvm.usub.sat.i32(i32 %a0, i32 %a0)
105 define <8 x i16> @combine_self_v8i16(<8 x i16> %a0) {
106 ; SSE-LABEL: combine_self_v8i16:
108 ; SSE-NEXT: xorps %xmm0, %xmm0
111 ; AVX-LABEL: combine_self_v8i16:
113 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
115 %1 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a0)
119 ; fold (usub_sat x, y) -> (sub x, y) iff no overflow
120 define i32 @combine_no_overflow_i32(i32 %a0, i32 %a1) {
121 ; CHECK-LABEL: combine_no_overflow_i32:
123 ; CHECK-NEXT: shrl $16, %edi
124 ; CHECK-NEXT: shrl $16, %esi
125 ; CHECK-NEXT: xorl %eax, %eax
126 ; CHECK-NEXT: subl %esi, %edi
127 ; CHECK-NEXT: cmovael %edi, %eax
129 %1 = lshr i32 %a0, 16
130 %2 = lshr i32 %a1, 16
131 %3 = call i32 @llvm.usub.sat.i32(i32 %1, i32 %2)
135 define <8 x i16> @combine_no_overflow_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
136 ; SSE-LABEL: combine_no_overflow_v8i16:
138 ; SSE-NEXT: psrlw $10, %xmm0
139 ; SSE-NEXT: psrlw $10, %xmm1
140 ; SSE-NEXT: psubusw %xmm1, %xmm0
143 ; AVX-LABEL: combine_no_overflow_v8i16:
145 ; AVX-NEXT: vpsrlw $10, %xmm0, %xmm0
146 ; AVX-NEXT: vpsrlw $10, %xmm1, %xmm1
147 ; AVX-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
149 %1 = lshr <8 x i16> %a0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
150 %2 = lshr <8 x i16> %a1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
151 %3 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %1, <8 x i16> %2)
155 ; FIXME: fold (trunc (usub_sat zext(x), y)) -> usub_sat(x, trunc(umin(y,satlimit)))
156 define i16 @combine_trunc_i32_i16(i16 %a0, i32 %a1) {
157 ; CHECK-LABEL: combine_trunc_i32_i16:
159 ; CHECK-NEXT: movzwl %di, %eax
160 ; CHECK-NEXT: xorl %ecx, %ecx
161 ; CHECK-NEXT: subl %esi, %eax
162 ; CHECK-NEXT: cmovbl %ecx, %eax
163 ; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
165 %1 = zext i16 %a0 to i32
166 %2 = call i32 @llvm.usub.sat.i32(i32 %1, i32 %a1)
167 %3 = trunc i32 %2 to i16
171 define <8 x i8> @combine_trunc_v8i16_v8i8(<8 x i8> %a0, <8 x i16> %a1) {
172 ; SSE2-LABEL: combine_trunc_v8i16_v8i8:
174 ; SSE2-NEXT: pxor %xmm2, %xmm2
175 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
176 ; SSE2-NEXT: psubusw %xmm1, %xmm0
177 ; SSE2-NEXT: packuswb %xmm0, %xmm0
180 ; SSE41-LABEL: combine_trunc_v8i16_v8i8:
182 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
183 ; SSE41-NEXT: psubusw %xmm1, %xmm0
184 ; SSE41-NEXT: packuswb %xmm0, %xmm0
187 ; SSE42-LABEL: combine_trunc_v8i16_v8i8:
189 ; SSE42-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
190 ; SSE42-NEXT: psubusw %xmm1, %xmm0
191 ; SSE42-NEXT: packuswb %xmm0, %xmm0
194 ; AVX-LABEL: combine_trunc_v8i16_v8i8:
196 ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
197 ; AVX-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
198 ; AVX-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
200 %1 = zext <8 x i8> %a0 to <8 x i16>
201 %2 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %1, <8 x i16> %a1)
202 %3 = trunc <8 x i16> %2 to <8 x i8>
206 define <8 x i16> @combine_trunc_v8i32_v8i16(<8 x i16> %a0, <8 x i32> %a1) {
207 ; SSE2-LABEL: combine_trunc_v8i32_v8i16:
209 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
210 ; SSE2-NEXT: movdqa %xmm2, %xmm4
211 ; SSE2-NEXT: pxor %xmm3, %xmm4
212 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147549183,2147549183,2147549183,2147549183]
213 ; SSE2-NEXT: movdqa %xmm5, %xmm6
214 ; SSE2-NEXT: pcmpgtd %xmm4, %xmm6
215 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
216 ; SSE2-NEXT: pand %xmm6, %xmm2
217 ; SSE2-NEXT: pxor %xmm4, %xmm6
218 ; SSE2-NEXT: por %xmm2, %xmm6
219 ; SSE2-NEXT: pslld $16, %xmm6
220 ; SSE2-NEXT: psrad $16, %xmm6
221 ; SSE2-NEXT: pxor %xmm1, %xmm3
222 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
223 ; SSE2-NEXT: pxor %xmm5, %xmm4
224 ; SSE2-NEXT: pand %xmm1, %xmm5
225 ; SSE2-NEXT: por %xmm4, %xmm5
226 ; SSE2-NEXT: pslld $16, %xmm5
227 ; SSE2-NEXT: psrad $16, %xmm5
228 ; SSE2-NEXT: packssdw %xmm6, %xmm5
229 ; SSE2-NEXT: psubusw %xmm5, %xmm0
232 ; SSE41-LABEL: combine_trunc_v8i32_v8i16:
234 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,65535]
235 ; SSE41-NEXT: pminud %xmm3, %xmm2
236 ; SSE41-NEXT: pminud %xmm3, %xmm1
237 ; SSE41-NEXT: packusdw %xmm2, %xmm1
238 ; SSE41-NEXT: psubusw %xmm1, %xmm0
241 ; SSE42-LABEL: combine_trunc_v8i32_v8i16:
243 ; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,65535]
244 ; SSE42-NEXT: pminud %xmm3, %xmm2
245 ; SSE42-NEXT: pminud %xmm3, %xmm1
246 ; SSE42-NEXT: packusdw %xmm2, %xmm1
247 ; SSE42-NEXT: psubusw %xmm1, %xmm0
250 ; AVX1-LABEL: combine_trunc_v8i32_v8i16:
252 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
253 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [65535,65535,65535,65535]
254 ; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm2
255 ; AVX1-NEXT: vpminud %xmm3, %xmm1, %xmm1
256 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
257 ; AVX1-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
258 ; AVX1-NEXT: vzeroupper
261 ; AVX2-LABEL: combine_trunc_v8i32_v8i16:
263 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535]
264 ; AVX2-NEXT: vpminud %ymm2, %ymm1, %ymm1
265 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
266 ; AVX2-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
267 ; AVX2-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
268 ; AVX2-NEXT: vzeroupper
271 ; AVX512-LABEL: combine_trunc_v8i32_v8i16:
273 ; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
274 ; AVX512-NEXT: vpmovusdw %zmm1, %ymm1
275 ; AVX512-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
276 ; AVX512-NEXT: vzeroupper
278 %1 = zext <8 x i16> %a0 to <8 x i32>
279 %2 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %1, <8 x i32> %a1)
280 %3 = trunc <8 x i32> %2 to <8 x i16>
284 ; fold (usub_sat (shuffle x, u, m), (shuffle y, u, m)) -> (shuffle (usub_sat x, y), u, m)
285 define <8 x i16> @combine_shuffle_shuffle_v8i16(<8 x i16> %x0, <8 x i16> %y0) {
286 ; SSE-LABEL: combine_shuffle_shuffle_v8i16:
288 ; SSE-NEXT: psubusw %xmm1, %xmm0
289 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
292 ; AVX-LABEL: combine_shuffle_shuffle_v8i16:
294 ; AVX-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
295 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
297 %x1= shufflevector <8 x i16> %x0, <8 x i16> poison, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7>
298 %y1 = shufflevector <8 x i16> %y0, <8 x i16> poison, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7>
299 %res = tail call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %x1, <8 x i16> %y1)