1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefixes=CHECK,X86
3 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefixes=CHECK,X64
5 declare i4 @llvm.uadd.sat.i4(i4, i4)
6 declare i8 @llvm.uadd.sat.i8(i8, i8)
7 declare i16 @llvm.uadd.sat.i16(i16, i16)
8 declare i32 @llvm.uadd.sat.i32(i32, i32)
9 declare i64 @llvm.uadd.sat.i64(i64, i64)
10 declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
12 define i32 @func(i32 %x, i32 %y) nounwind {
15 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
16 ; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
17 ; X86-NEXT: movl $-1, %eax
18 ; X86-NEXT: cmovael %ecx, %eax
23 ; X64-NEXT: addl %esi, %edi
24 ; X64-NEXT: movl $-1, %eax
25 ; X64-NEXT: cmovael %edi, %eax
27 %tmp = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y)
31 define i64 @func2(i64 %x, i64 %y) nounwind {
34 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
35 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
36 ; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
37 ; X86-NEXT: adcl {{[0-9]+}}(%esp), %edx
38 ; X86-NEXT: movl $-1, %ecx
39 ; X86-NEXT: cmovbl %ecx, %edx
40 ; X86-NEXT: cmovbl %ecx, %eax
45 ; X64-NEXT: addq %rsi, %rdi
46 ; X64-NEXT: movq $-1, %rax
47 ; X64-NEXT: cmovaeq %rdi, %rax
49 %tmp = call i64 @llvm.uadd.sat.i64(i64 %x, i64 %y)
53 define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind {
56 ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
57 ; X86-NEXT: addw {{[0-9]+}}(%esp), %cx
58 ; X86-NEXT: movl $65535, %eax # imm = 0xFFFF
59 ; X86-NEXT: cmovael %ecx, %eax
60 ; X86-NEXT: # kill: def $ax killed $ax killed $eax
65 ; X64-NEXT: addw %si, %di
66 ; X64-NEXT: movl $65535, %eax # imm = 0xFFFF
67 ; X64-NEXT: cmovael %edi, %eax
68 ; X64-NEXT: # kill: def $ax killed $ax killed $eax
70 %tmp = call i16 @llvm.uadd.sat.i16(i16 %x, i16 %y)
74 define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind {
77 ; X86-NEXT: movb {{[0-9]+}}(%esp), %al
78 ; X86-NEXT: addb {{[0-9]+}}(%esp), %al
79 ; X86-NEXT: movzbl %al, %ecx
80 ; X86-NEXT: movl $255, %eax
81 ; X86-NEXT: cmovael %ecx, %eax
82 ; X86-NEXT: # kill: def $al killed $al killed $eax
87 ; X64-NEXT: addb %sil, %dil
88 ; X64-NEXT: movzbl %dil, %ecx
89 ; X64-NEXT: movl $255, %eax
90 ; X64-NEXT: cmovael %ecx, %eax
91 ; X64-NEXT: # kill: def $al killed $al killed $eax
93 %tmp = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %y)
97 define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind {
100 ; X86-NEXT: movb {{[0-9]+}}(%esp), %al
101 ; X86-NEXT: addb {{[0-9]+}}(%esp), %al
102 ; X86-NEXT: movzbl %al, %ecx
103 ; X86-NEXT: cmpb $15, %al
104 ; X86-NEXT: movl $15, %eax
105 ; X86-NEXT: cmovbl %ecx, %eax
106 ; X86-NEXT: movzbl %al, %eax
111 ; X64-NEXT: addb %sil, %dil
112 ; X64-NEXT: movzbl %dil, %eax
113 ; X64-NEXT: cmpb $15, %al
114 ; X64-NEXT: movl $15, %ecx
115 ; X64-NEXT: cmovbl %eax, %ecx
116 ; X64-NEXT: movzbl %cl, %eax
118 %tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %y)
122 define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
125 ; X86-NEXT: pushl %ebx
126 ; X86-NEXT: pushl %edi
127 ; X86-NEXT: pushl %esi
128 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
129 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
130 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
131 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
132 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
133 ; X86-NEXT: addl {{[0-9]+}}(%esp), %edi
134 ; X86-NEXT: movl $-1, %ebx
135 ; X86-NEXT: cmovbl %ebx, %edi
136 ; X86-NEXT: addl {{[0-9]+}}(%esp), %esi
137 ; X86-NEXT: cmovbl %ebx, %esi
138 ; X86-NEXT: addl {{[0-9]+}}(%esp), %edx
139 ; X86-NEXT: cmovbl %ebx, %edx
140 ; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
141 ; X86-NEXT: cmovbl %ebx, %ecx
142 ; X86-NEXT: movl %ecx, 12(%eax)
143 ; X86-NEXT: movl %edx, 8(%eax)
144 ; X86-NEXT: movl %esi, 4(%eax)
145 ; X86-NEXT: movl %edi, (%eax)
146 ; X86-NEXT: popl %esi
147 ; X86-NEXT: popl %edi
148 ; X86-NEXT: popl %ebx
153 ; X64-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
154 ; X64-NEXT: paddd %xmm0, %xmm1
155 ; X64-NEXT: pxor %xmm2, %xmm0
156 ; X64-NEXT: pxor %xmm1, %xmm2
157 ; X64-NEXT: pcmpgtd %xmm2, %xmm0
158 ; X64-NEXT: por %xmm1, %xmm0
160 %tmp = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)