1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
5 define void @knownbits_zext_in_reg(i8*) nounwind {
6 ; X32-LABEL: knownbits_zext_in_reg:
9 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
10 ; X32-NEXT: movzbl (%eax), %ecx
11 ; X32-NEXT: imull $101, %ecx, %eax
12 ; X32-NEXT: shrl $14, %eax
13 ; X32-NEXT: imull $177, %ecx, %edx
14 ; X32-NEXT: shrl $14, %edx
15 ; X32-NEXT: movzbl %al, %ecx
16 ; X32-NEXT: xorl %ebx, %ebx
17 ; X32-NEXT: .p2align 4, 0x90
18 ; X32-NEXT: .LBB0_1: # %CF
19 ; X32-NEXT: # =>This Loop Header: Depth=1
20 ; X32-NEXT: # Child Loop BB0_2 Depth 2
21 ; X32-NEXT: movl %ecx, %eax
23 ; X32-NEXT: .p2align 4, 0x90
24 ; X32-NEXT: .LBB0_2: # %CF237
25 ; X32-NEXT: # Parent Loop BB0_1 Depth=1
26 ; X32-NEXT: # => This Inner Loop Header: Depth=2
27 ; X32-NEXT: testb %bl, %bl
28 ; X32-NEXT: jne .LBB0_2
29 ; X32-NEXT: jmp .LBB0_1
31 ; X64-LABEL: knownbits_zext_in_reg:
33 ; X64-NEXT: movzbl (%rdi), %eax
34 ; X64-NEXT: imull $101, %eax, %ecx
35 ; X64-NEXT: shrl $14, %ecx
36 ; X64-NEXT: imull $177, %eax, %edx
37 ; X64-NEXT: shrl $14, %edx
38 ; X64-NEXT: movzbl %cl, %ecx
39 ; X64-NEXT: xorl %esi, %esi
40 ; X64-NEXT: .p2align 4, 0x90
41 ; X64-NEXT: .LBB0_1: # %CF
42 ; X64-NEXT: # =>This Loop Header: Depth=1
43 ; X64-NEXT: # Child Loop BB0_2 Depth 2
44 ; X64-NEXT: movl %ecx, %eax
46 ; X64-NEXT: .p2align 4, 0x90
47 ; X64-NEXT: .LBB0_2: # %CF237
48 ; X64-NEXT: # Parent Loop BB0_1 Depth=1
49 ; X64-NEXT: # => This Inner Loop Header: Depth=2
50 ; X64-NEXT: testb %sil, %sil
51 ; X64-NEXT: jne .LBB0_2
52 ; X64-NEXT: jmp .LBB0_1
55 %Sl9 = select i1 true, i8 %L5, i8 undef
56 %B21 = udiv i8 %Sl9, -93
57 %B22 = udiv i8 %Sl9, 93
60 CF: ; preds = %CF246, %BB
61 %I40 = insertelement <4 x i8> zeroinitializer, i8 %B21, i32 1
62 %I41 = insertelement <4 x i8> zeroinitializer, i8 %B22, i32 1
63 %B41 = srem <4 x i8> %I40, %I41
66 CF237: ; preds = %CF237, %CF
67 %Cmp73 = icmp ne i1 undef, undef
68 br i1 %Cmp73, label %CF237, label %CF246
70 CF246: ; preds = %CF237
71 %Cmp117 = icmp ult <4 x i8> %B41, undef
72 %E156 = extractelement <4 x i1> %Cmp117, i32 2
76 define i32 @knownbits_mask_add_lshr(i32 %a0, i32 %a1) nounwind {
77 ; X32-LABEL: knownbits_mask_add_lshr:
79 ; X32-NEXT: xorl %eax, %eax
82 ; X64-LABEL: knownbits_mask_add_lshr:
84 ; X64-NEXT: xorl %eax, %eax
86 %1 = and i32 %a0, 32767
87 %2 = and i32 %a1, 32766
93 define i128 @knownbits_mask_addc_shl(i64 %a0, i64 %a1, i64 %a2) nounwind {
94 ; X32-LABEL: knownbits_mask_addc_shl:
96 ; X32-NEXT: pushl %edi
97 ; X32-NEXT: pushl %esi
98 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
99 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
100 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
101 ; X32-NEXT: movl $-1024, %esi # imm = 0xFC00
102 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
103 ; X32-NEXT: andl %esi, %edi
104 ; X32-NEXT: andl {{[0-9]+}}(%esp), %esi
105 ; X32-NEXT: addl %edi, %esi
106 ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
107 ; X32-NEXT: adcl $0, %ecx
108 ; X32-NEXT: shldl $22, %edx, %ecx
109 ; X32-NEXT: shldl $22, %esi, %edx
110 ; X32-NEXT: movl %edx, 8(%eax)
111 ; X32-NEXT: movl %ecx, 12(%eax)
112 ; X32-NEXT: movl $0, 4(%eax)
113 ; X32-NEXT: movl $0, (%eax)
114 ; X32-NEXT: popl %esi
115 ; X32-NEXT: popl %edi
118 ; X64-LABEL: knownbits_mask_addc_shl:
120 ; X64-NEXT: andq $-1024, %rdi # imm = 0xFC00
121 ; X64-NEXT: andq $-1024, %rsi # imm = 0xFC00
122 ; X64-NEXT: addq %rdi, %rsi
123 ; X64-NEXT: adcl $0, %edx
124 ; X64-NEXT: shldq $54, %rsi, %rdx
125 ; X64-NEXT: xorl %eax, %eax
127 %1 = and i64 %a0, -1024
128 %2 = zext i64 %1 to i128
129 %3 = and i64 %a1, -1024
130 %4 = zext i64 %3 to i128
132 %6 = zext i64 %a2 to i128
139 define {i32, i1} @knownbits_uaddo_saddo(i64 %a0, i64 %a1) nounwind {
140 ; X32-LABEL: knownbits_uaddo_saddo:
142 ; X32-NEXT: pushl %ebx
143 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
144 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
145 ; X32-NEXT: movl %ecx, %edx
146 ; X32-NEXT: addl %eax, %edx
148 ; X32-NEXT: testl %eax, %eax
149 ; X32-NEXT: setns %al
150 ; X32-NEXT: testl %ecx, %ecx
151 ; X32-NEXT: setns %cl
152 ; X32-NEXT: cmpb %al, %cl
154 ; X32-NEXT: testl %edx, %edx
155 ; X32-NEXT: setns %dl
156 ; X32-NEXT: cmpb %dl, %cl
157 ; X32-NEXT: setne %dl
158 ; X32-NEXT: andb %al, %dl
159 ; X32-NEXT: orb %bl, %dl
160 ; X32-NEXT: xorl %eax, %eax
161 ; X32-NEXT: popl %ebx
164 ; X64-LABEL: knownbits_uaddo_saddo:
166 ; X64-NEXT: shlq $32, %rdi
167 ; X64-NEXT: shlq $32, %rsi
168 ; X64-NEXT: addq %rdi, %rsi
171 ; X64-NEXT: orb %al, %dl
172 ; X64-NEXT: xorl %eax, %eax
176 %u = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %1, i64 %2)
177 %uval = extractvalue {i64, i1} %u, 0
178 %uovf = extractvalue {i64, i1} %u, 1
179 %s = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %1, i64 %2)
180 %sval = extractvalue {i64, i1} %s, 0
181 %sovf = extractvalue {i64, i1} %s, 1
182 %sum = add i64 %uval, %sval
183 %3 = trunc i64 %sum to i32
184 %4 = or i1 %uovf, %sovf
185 %ret0 = insertvalue {i32, i1} undef, i32 %3, 0
186 %ret1 = insertvalue {i32, i1} %ret0, i1 %4, 1
190 define {i32, i1} @knownbits_usubo_ssubo(i64 %a0, i64 %a1) nounwind {
191 ; X32-LABEL: knownbits_usubo_ssubo:
193 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
194 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
195 ; X32-NEXT: cmpl %eax, %ecx
197 ; X32-NEXT: setns %dl
198 ; X32-NEXT: testl %ecx, %ecx
199 ; X32-NEXT: setns %cl
200 ; X32-NEXT: cmpb %dl, %cl
201 ; X32-NEXT: setne %ch
202 ; X32-NEXT: testl %eax, %eax
203 ; X32-NEXT: setns %al
204 ; X32-NEXT: cmpb %al, %cl
205 ; X32-NEXT: setne %dl
206 ; X32-NEXT: andb %ch, %dl
207 ; X32-NEXT: orb %dh, %dl
208 ; X32-NEXT: xorl %eax, %eax
211 ; X64-LABEL: knownbits_usubo_ssubo:
213 ; X64-NEXT: shlq $32, %rdi
214 ; X64-NEXT: shlq $32, %rsi
215 ; X64-NEXT: cmpq %rsi, %rdi
218 ; X64-NEXT: orb %al, %dl
219 ; X64-NEXT: xorl %eax, %eax
223 %u = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %1, i64 %2)
224 %uval = extractvalue {i64, i1} %u, 0
225 %uovf = extractvalue {i64, i1} %u, 1
226 %s = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %1, i64 %2)
227 %sval = extractvalue {i64, i1} %s, 0
228 %sovf = extractvalue {i64, i1} %s, 1
229 %sum = add i64 %uval, %sval
230 %3 = trunc i64 %sum to i32
231 %4 = or i1 %uovf, %sovf
232 %ret0 = insertvalue {i32, i1} undef, i32 %3, 0
233 %ret1 = insertvalue {i32, i1} %ret0, i1 %4, 1
237 declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
238 declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
239 declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
240 declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
242 define i32 @knownbits_fshl(i32 %a0) nounwind {
243 ; X32-LABEL: knownbits_fshl:
245 ; X32-NEXT: movl $3, %eax
248 ; X64-LABEL: knownbits_fshl:
250 ; X64-NEXT: movl $3, %eax
252 %1 = tail call i32 @llvm.fshl.i32(i32 %a0, i32 -1, i32 5)
257 define i32 @knownbits_fshr(i32 %a0) nounwind {
258 ; X32-LABEL: knownbits_fshr:
260 ; X32-NEXT: movl $3, %eax
263 ; X64-LABEL: knownbits_fshr:
265 ; X64-NEXT: movl $3, %eax
267 %1 = tail call i32 @llvm.fshr.i32(i32 %a0, i32 -1, i32 5)
272 declare i32 @llvm.fshl.i32(i32, i32, i32) nounwind readnone
273 declare i32 @llvm.fshr.i32(i32, i32, i32) nounwind readnone