1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE42
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512F
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512BW
10 declare i32 @llvm.uadd.sat.i32 (i32, i32)
11 declare i64 @llvm.uadd.sat.i64 (i64, i64)
12 declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
14 ; fold (uadd_sat x, undef) -> -1
15 define i32 @combine_undef_i32(i32 %a0) {
16 ; CHECK-LABEL: combine_undef_i32:
18 ; CHECK-NEXT: movl $-1, %eax
20 %res = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 undef)
24 define <8 x i16> @combine_undef_v8i16(<8 x i16> %a0) {
25 ; SSE-LABEL: combine_undef_v8i16:
27 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0
30 ; AVX-LABEL: combine_undef_v8i16:
32 ; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
34 %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> %a0)
38 ; fold (uadd_sat c1, c2) -> c3
39 define i32 @combine_constfold_i32() {
40 ; CHECK-LABEL: combine_constfold_i32:
42 ; CHECK-NEXT: movl $-1, %eax
44 %res = call i32 @llvm.uadd.sat.i32(i32 4294967295, i32 100)
48 define <8 x i16> @combine_constfold_v8i16() {
49 ; SSE-LABEL: combine_constfold_v8i16:
51 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,65535,256,65535,65535,65535,2,65535]
54 ; AVX-LABEL: combine_constfold_v8i16:
56 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,65535,256,65535,65535,65535,2,65535]
58 %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
62 define <8 x i16> @combine_constfold_undef_v8i16() {
63 ; SSE-LABEL: combine_constfold_undef_v8i16:
65 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,65535,2,65535]
68 ; AVX-LABEL: combine_constfold_undef_v8i16:
70 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,65535,2,65535]
72 %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
76 ; fold (uadd_sat c, x) -> (add_ssat x, c)
77 define i32 @combine_constant_i32(i32 %a0) {
78 ; CHECK-LABEL: combine_constant_i32:
80 ; CHECK-NEXT: incl %edi
81 ; CHECK-NEXT: movl $-1, %eax
82 ; CHECK-NEXT: cmovnel %edi, %eax
84 %1 = call i32 @llvm.uadd.sat.i32(i32 1, i32 %a0)
88 define <8 x i16> @combine_constant_v8i16(<8 x i16> %a0) {
89 ; SSE-LABEL: combine_constant_v8i16:
91 ; SSE-NEXT: paddusw {{.*}}(%rip), %xmm0
94 ; AVX-LABEL: combine_constant_v8i16:
96 ; AVX-NEXT: vpaddusw {{.*}}(%rip), %xmm0, %xmm0
98 %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0)
102 ; fold (uadd_sat c, 0) -> x
103 define i32 @combine_zero_i32(i32 %a0) {
104 ; CHECK-LABEL: combine_zero_i32:
106 ; CHECK-NEXT: movl %edi, %eax
108 %1 = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 0)
112 define <8 x i16> @combine_zero_v8i16(<8 x i16> %a0) {
113 ; CHECK-LABEL: combine_zero_v8i16:
116 %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer)
120 ; fold (uadd_sat x, y) -> (add x, y) iff no overflow
121 define i32 @combine_no_overflow_i32(i32 %a0, i32 %a1) {
122 ; CHECK-LABEL: combine_no_overflow_i32:
124 ; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
125 ; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
126 ; CHECK-NEXT: shrl $16, %edi
127 ; CHECK-NEXT: shrl $16, %esi
128 ; CHECK-NEXT: leal (%rsi,%rdi), %eax
130 %1 = lshr i32 %a0, 16
131 %2 = lshr i32 %a1, 16
132 %3 = call i32 @llvm.uadd.sat.i32(i32 %1, i32 %2)
136 define <8 x i16> @combine_no_overflow_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
137 ; SSE-LABEL: combine_no_overflow_v8i16:
139 ; SSE-NEXT: psrlw $10, %xmm0
140 ; SSE-NEXT: psrlw $10, %xmm1
141 ; SSE-NEXT: paddw %xmm1, %xmm0
144 ; AVX-LABEL: combine_no_overflow_v8i16:
146 ; AVX-NEXT: vpsrlw $10, %xmm0, %xmm0
147 ; AVX-NEXT: vpsrlw $10, %xmm1, %xmm1
148 ; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
150 %1 = lshr <8 x i16> %a0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
151 %2 = lshr <8 x i16> %a1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
152 %3 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2)