1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-- | FileCheck %s --check-prefixes=X86
3 ; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s --check-prefixes=X64
6 ; fixed avg(x,y) = sub(or(x,y),ashr(xor(x,y),1))
8 ; ext avg(x,y) = trunc(ashr(add(sext(x),sext(y),1),1))
11 define i8 @test_fixed_i8(i8 %a0, i8 %a1) nounwind {
12 ; X86-LABEL: test_fixed_i8:
14 ; X86-NEXT: movsbl {{[0-9]+}}(%esp), %eax
15 ; X86-NEXT: movsbl {{[0-9]+}}(%esp), %ecx
16 ; X86-NEXT: leal 1(%ecx,%eax), %eax
18 ; X86-NEXT: # kill: def $al killed $al killed $eax
21 ; X64-LABEL: test_fixed_i8:
23 ; X64-NEXT: movsbl %sil, %eax
24 ; X64-NEXT: movsbl %dil, %ecx
25 ; X64-NEXT: leal 1(%rcx,%rax), %eax
27 ; X64-NEXT: # kill: def $al killed $al killed $eax
30 %xor = xor i8 %a0, %a1
31 %shift = ashr i8 %xor, 1
32 %res = sub i8 %or, %shift
36 define i8 @test_ext_i8(i8 %a0, i8 %a1) nounwind {
37 ; X86-LABEL: test_ext_i8:
39 ; X86-NEXT: movsbl {{[0-9]+}}(%esp), %eax
40 ; X86-NEXT: movsbl {{[0-9]+}}(%esp), %ecx
41 ; X86-NEXT: leal 1(%ecx,%eax), %eax
43 ; X86-NEXT: # kill: def $al killed $al killed $eax
46 ; X64-LABEL: test_ext_i8:
48 ; X64-NEXT: movsbl %sil, %eax
49 ; X64-NEXT: movsbl %dil, %ecx
50 ; X64-NEXT: leal 1(%rcx,%rax), %eax
52 ; X64-NEXT: # kill: def $al killed $al killed $eax
54 %x0 = sext i8 %a0 to i16
55 %x1 = sext i8 %a1 to i16
56 %sum = add i16 %x0, %x1
57 %sum1 = add i16 %sum, 1
58 %shift = ashr i16 %sum1, 1
59 %res = trunc i16 %shift to i8
63 define i16 @test_fixed_i16(i16 %a0, i16 %a1) nounwind {
64 ; X86-LABEL: test_fixed_i16:
66 ; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax
67 ; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx
68 ; X86-NEXT: leal 1(%ecx,%eax), %eax
70 ; X86-NEXT: # kill: def $ax killed $ax killed $eax
73 ; X64-LABEL: test_fixed_i16:
75 ; X64-NEXT: movswl %si, %eax
76 ; X64-NEXT: movswl %di, %ecx
77 ; X64-NEXT: leal 1(%rcx,%rax), %eax
79 ; X64-NEXT: # kill: def $ax killed $ax killed $eax
82 %xor = xor i16 %a0, %a1
83 %shift = ashr i16 %xor, 1
84 %res = sub i16 %or, %shift
88 define i16 @test_ext_i16(i16 %a0, i16 %a1) nounwind {
89 ; X86-LABEL: test_ext_i16:
91 ; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax
92 ; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx
93 ; X86-NEXT: leal 1(%ecx,%eax), %eax
95 ; X86-NEXT: # kill: def $ax killed $ax killed $eax
98 ; X64-LABEL: test_ext_i16:
100 ; X64-NEXT: movswl %si, %eax
101 ; X64-NEXT: movswl %di, %ecx
102 ; X64-NEXT: leal 1(%rcx,%rax), %eax
103 ; X64-NEXT: shrl %eax
104 ; X64-NEXT: # kill: def $ax killed $ax killed $eax
106 %x0 = sext i16 %a0 to i32
107 %x1 = sext i16 %a1 to i32
108 %sum = add i32 %x0, %x1
109 %sum1 = add i32 %sum, 1
110 %shift = ashr i32 %sum1, 1
111 %res = trunc i32 %shift to i16
115 define i32 @test_fixed_i32(i32 %a0, i32 %a1) nounwind {
116 ; X86-LABEL: test_fixed_i32:
118 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
119 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
120 ; X86-NEXT: movl %edx, %eax
121 ; X86-NEXT: orl %ecx, %eax
122 ; X86-NEXT: xorl %ecx, %edx
123 ; X86-NEXT: sarl %edx
124 ; X86-NEXT: subl %edx, %eax
127 ; X64-LABEL: test_fixed_i32:
129 ; X64-NEXT: movslq %esi, %rax
130 ; X64-NEXT: movslq %edi, %rcx
131 ; X64-NEXT: leaq 1(%rcx,%rax), %rax
132 ; X64-NEXT: shrq %rax
133 ; X64-NEXT: # kill: def $eax killed $eax killed $rax
135 %or = or i32 %a0, %a1
136 %xor = xor i32 %a1, %a0
137 %shift = ashr i32 %xor, 1
138 %res = sub i32 %or, %shift
142 define i32 @test_ext_i32(i32 %a0, i32 %a1) nounwind {
143 ; X86-LABEL: test_ext_i32:
145 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
146 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
147 ; X86-NEXT: movl %edx, %eax
148 ; X86-NEXT: orl %ecx, %eax
149 ; X86-NEXT: xorl %ecx, %edx
150 ; X86-NEXT: sarl %edx
151 ; X86-NEXT: subl %edx, %eax
154 ; X64-LABEL: test_ext_i32:
156 ; X64-NEXT: movslq %esi, %rax
157 ; X64-NEXT: movslq %edi, %rcx
158 ; X64-NEXT: leaq 1(%rcx,%rax), %rax
159 ; X64-NEXT: shrq %rax
160 ; X64-NEXT: # kill: def $eax killed $eax killed $rax
162 %x0 = sext i32 %a0 to i64
163 %x1 = sext i32 %a1 to i64
164 %sum = add i64 %x0, %x1
165 %sum1 = add i64 %sum, 1
166 %shift = ashr i64 %sum1, 1
167 %res = trunc i64 %shift to i32
171 define i64 @test_fixed_i64(i64 %a0, i64 %a1) nounwind {
172 ; X86-LABEL: test_fixed_i64:
174 ; X86-NEXT: pushl %ebx
175 ; X86-NEXT: pushl %edi
176 ; X86-NEXT: pushl %esi
177 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
178 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
179 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
180 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
181 ; X86-NEXT: movl %eax, %edi
182 ; X86-NEXT: xorl %ecx, %edi
183 ; X86-NEXT: movl %edx, %ebx
184 ; X86-NEXT: xorl %esi, %ebx
185 ; X86-NEXT: shrdl $1, %ebx, %edi
186 ; X86-NEXT: orl %esi, %edx
187 ; X86-NEXT: sarl %ebx
188 ; X86-NEXT: orl %ecx, %eax
189 ; X86-NEXT: subl %edi, %eax
190 ; X86-NEXT: sbbl %ebx, %edx
191 ; X86-NEXT: popl %esi
192 ; X86-NEXT: popl %edi
193 ; X86-NEXT: popl %ebx
196 ; X64-LABEL: test_fixed_i64:
198 ; X64-NEXT: movq %rdi, %rax
199 ; X64-NEXT: orq %rsi, %rax
200 ; X64-NEXT: xorq %rsi, %rdi
201 ; X64-NEXT: sarq %rdi
202 ; X64-NEXT: subq %rdi, %rax
204 %or = or i64 %a0, %a1
205 %xor = xor i64 %a1, %a0
206 %shift = ashr i64 %xor, 1
207 %res = sub i64 %or, %shift
211 define i64 @test_ext_i64(i64 %a0, i64 %a1) nounwind {
212 ; X86-LABEL: test_ext_i64:
214 ; X86-NEXT: pushl %ebx
215 ; X86-NEXT: pushl %edi
216 ; X86-NEXT: pushl %esi
217 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
218 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
219 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
220 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
221 ; X86-NEXT: movl %eax, %edi
222 ; X86-NEXT: xorl %ecx, %edi
223 ; X86-NEXT: movl %edx, %ebx
224 ; X86-NEXT: xorl %esi, %ebx
225 ; X86-NEXT: shrdl $1, %ebx, %edi
226 ; X86-NEXT: orl %esi, %edx
227 ; X86-NEXT: sarl %ebx
228 ; X86-NEXT: orl %ecx, %eax
229 ; X86-NEXT: subl %edi, %eax
230 ; X86-NEXT: sbbl %ebx, %edx
231 ; X86-NEXT: popl %esi
232 ; X86-NEXT: popl %edi
233 ; X86-NEXT: popl %ebx
236 ; X64-LABEL: test_ext_i64:
238 ; X64-NEXT: movq %rdi, %rax
239 ; X64-NEXT: orq %rsi, %rax
240 ; X64-NEXT: xorq %rsi, %rdi
241 ; X64-NEXT: sarq %rdi
242 ; X64-NEXT: subq %rdi, %rax
244 %x0 = sext i64 %a0 to i128
245 %x1 = sext i64 %a1 to i128
246 %sum = add i128 %x0, %x1
247 %sum1 = add i128 %sum, 1
248 %shift = ashr i128 %sum1, 1
249 %res = trunc i128 %shift to i64