1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2 < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,SSE2,X86-SSE2,BMI1,X86-BMI1,V0,X86-V0
3 ; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,SSE2,X86-SSE2,BMI1,X86-BMI1,V1,X86-V1
4 ; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,SSE2,X86-SSE2,BMI2,X86-BMI2,V2,X86-V2
5 ; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,BMI2,X86-BMI2,AVX2,X86-AVX2
6 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2 < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,SSE2,X64-SSE2,BMI1,X64-BMI1,V0,X64-V0
7 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,SSE2,X64-SSE2,BMI1,X64-BMI1,V1,X64-V1
8 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,SSE2,X64-SSE2,BMI2,X64-BMI2,V2,X64-V2
9 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,BMI2,X64-BMI2,AVX2,X64-AVX2
11 ; We are looking for the following pattern here:
12 ; (X & (C << Y)) ==/!= 0
13 ; It may be optimal to hoist the constant:
14 ; ((X l>> Y) & C) ==/!= 0
16 ;------------------------------------------------------------------------------;
18 ;------------------------------------------------------------------------------;
22 define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
23 ; X86-LABEL: scalar_i8_signbit_eq:
25 ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
26 ; X86-NEXT: movb $-128, %al
27 ; X86-NEXT: shlb %cl, %al
28 ; X86-NEXT: testb %al, {{[0-9]+}}(%esp)
32 ; X64-LABEL: scalar_i8_signbit_eq:
34 ; X64-NEXT: movl %esi, %ecx
35 ; X64-NEXT: movb $-128, %al
36 ; X64-NEXT: # kill: def $cl killed $cl killed $ecx
37 ; X64-NEXT: shlb %cl, %al
38 ; X64-NEXT: testb %dil, %al
43 %res = icmp eq i8 %t1, 0
47 define i1 @scalar_i8_lowestbit_eq(i8 %x, i8 %y) nounwind {
48 ; X86-LABEL: scalar_i8_lowestbit_eq:
50 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
51 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
52 ; X86-NEXT: btl %eax, %ecx
56 ; X64-LABEL: scalar_i8_lowestbit_eq:
58 ; X64-NEXT: btl %esi, %edi
63 %res = icmp eq i8 %t1, 0
67 define i1 @scalar_i8_bitsinmiddle_eq(i8 %x, i8 %y) nounwind {
68 ; X86-LABEL: scalar_i8_bitsinmiddle_eq:
70 ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
71 ; X86-NEXT: movb $24, %al
72 ; X86-NEXT: shlb %cl, %al
73 ; X86-NEXT: testb %al, {{[0-9]+}}(%esp)
77 ; X64-LABEL: scalar_i8_bitsinmiddle_eq:
79 ; X64-NEXT: movl %esi, %ecx
80 ; X64-NEXT: movb $24, %al
81 ; X64-NEXT: # kill: def $cl killed $cl killed $ecx
82 ; X64-NEXT: shlb %cl, %al
83 ; X64-NEXT: testb %dil, %al
88 %res = icmp eq i8 %t1, 0
94 define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
95 ; X86-BMI1-LABEL: scalar_i16_signbit_eq:
97 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
98 ; X86-BMI1-NEXT: movl $-32768, %eax # imm = 0x8000
99 ; X86-BMI1-NEXT: shll %cl, %eax
100 ; X86-BMI1-NEXT: testw %ax, {{[0-9]+}}(%esp)
101 ; X86-BMI1-NEXT: sete %al
102 ; X86-BMI1-NEXT: retl
104 ; X86-BMI2-LABEL: scalar_i16_signbit_eq:
106 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
107 ; X86-BMI2-NEXT: movl $-32768, %ecx # imm = 0x8000
108 ; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
109 ; X86-BMI2-NEXT: testw %ax, {{[0-9]+}}(%esp)
110 ; X86-BMI2-NEXT: sete %al
111 ; X86-BMI2-NEXT: retl
113 ; X64-BMI1-LABEL: scalar_i16_signbit_eq:
115 ; X64-BMI1-NEXT: movl %esi, %ecx
116 ; X64-BMI1-NEXT: movl $-32768, %eax # imm = 0x8000
117 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
118 ; X64-BMI1-NEXT: shll %cl, %eax
119 ; X64-BMI1-NEXT: testw %di, %ax
120 ; X64-BMI1-NEXT: sete %al
121 ; X64-BMI1-NEXT: retq
123 ; X64-BMI2-LABEL: scalar_i16_signbit_eq:
125 ; X64-BMI2-NEXT: movl $-32768, %eax # imm = 0x8000
126 ; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
127 ; X64-BMI2-NEXT: testw %di, %ax
128 ; X64-BMI2-NEXT: sete %al
129 ; X64-BMI2-NEXT: retq
130 %t0 = shl i16 32768, %y
131 %t1 = and i16 %t0, %x
132 %res = icmp eq i16 %t1, 0
136 define i1 @scalar_i16_lowestbit_eq(i16 %x, i16 %y) nounwind {
137 ; X86-LABEL: scalar_i16_lowestbit_eq:
139 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
140 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
141 ; X86-NEXT: btl %eax, %ecx
142 ; X86-NEXT: setae %al
145 ; X64-LABEL: scalar_i16_lowestbit_eq:
147 ; X64-NEXT: btl %esi, %edi
148 ; X64-NEXT: setae %al
151 %t1 = and i16 %t0, %x
152 %res = icmp eq i16 %t1, 0
156 define i1 @scalar_i16_bitsinmiddle_eq(i16 %x, i16 %y) nounwind {
157 ; X86-BMI1-LABEL: scalar_i16_bitsinmiddle_eq:
159 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
160 ; X86-BMI1-NEXT: movl $4080, %eax # imm = 0xFF0
161 ; X86-BMI1-NEXT: shll %cl, %eax
162 ; X86-BMI1-NEXT: testw %ax, {{[0-9]+}}(%esp)
163 ; X86-BMI1-NEXT: sete %al
164 ; X86-BMI1-NEXT: retl
166 ; X86-BMI2-LABEL: scalar_i16_bitsinmiddle_eq:
168 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
169 ; X86-BMI2-NEXT: movl $4080, %ecx # imm = 0xFF0
170 ; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
171 ; X86-BMI2-NEXT: testw %ax, {{[0-9]+}}(%esp)
172 ; X86-BMI2-NEXT: sete %al
173 ; X86-BMI2-NEXT: retl
175 ; X64-BMI1-LABEL: scalar_i16_bitsinmiddle_eq:
177 ; X64-BMI1-NEXT: movl %esi, %ecx
178 ; X64-BMI1-NEXT: movl $4080, %eax # imm = 0xFF0
179 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
180 ; X64-BMI1-NEXT: shll %cl, %eax
181 ; X64-BMI1-NEXT: testw %di, %ax
182 ; X64-BMI1-NEXT: sete %al
183 ; X64-BMI1-NEXT: retq
185 ; X64-BMI2-LABEL: scalar_i16_bitsinmiddle_eq:
187 ; X64-BMI2-NEXT: movl $4080, %eax # imm = 0xFF0
188 ; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
189 ; X64-BMI2-NEXT: testw %di, %ax
190 ; X64-BMI2-NEXT: sete %al
191 ; X64-BMI2-NEXT: retq
192 %t0 = shl i16 4080, %y
193 %t1 = and i16 %t0, %x
194 %res = icmp eq i16 %t1, 0
200 define i1 @scalar_i32_signbit_eq(i32 %x, i32 %y) nounwind {
201 ; X86-BMI1-LABEL: scalar_i32_signbit_eq:
203 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
204 ; X86-BMI1-NEXT: movl $-2147483648, %eax # imm = 0x80000000
205 ; X86-BMI1-NEXT: shll %cl, %eax
206 ; X86-BMI1-NEXT: testl %eax, {{[0-9]+}}(%esp)
207 ; X86-BMI1-NEXT: sete %al
208 ; X86-BMI1-NEXT: retl
210 ; X86-BMI2-LABEL: scalar_i32_signbit_eq:
212 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
213 ; X86-BMI2-NEXT: movl $-2147483648, %ecx # imm = 0x80000000
214 ; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
215 ; X86-BMI2-NEXT: testl %eax, {{[0-9]+}}(%esp)
216 ; X86-BMI2-NEXT: sete %al
217 ; X86-BMI2-NEXT: retl
219 ; X64-BMI1-LABEL: scalar_i32_signbit_eq:
221 ; X64-BMI1-NEXT: movl %esi, %ecx
222 ; X64-BMI1-NEXT: movl $-2147483648, %eax # imm = 0x80000000
223 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
224 ; X64-BMI1-NEXT: shll %cl, %eax
225 ; X64-BMI1-NEXT: testl %edi, %eax
226 ; X64-BMI1-NEXT: sete %al
227 ; X64-BMI1-NEXT: retq
229 ; X64-BMI2-LABEL: scalar_i32_signbit_eq:
231 ; X64-BMI2-NEXT: movl $-2147483648, %eax # imm = 0x80000000
232 ; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
233 ; X64-BMI2-NEXT: testl %edi, %eax
234 ; X64-BMI2-NEXT: sete %al
235 ; X64-BMI2-NEXT: retq
236 %t0 = shl i32 2147483648, %y
237 %t1 = and i32 %t0, %x
238 %res = icmp eq i32 %t1, 0
242 define i1 @scalar_i32_lowestbit_eq(i32 %x, i32 %y) nounwind {
243 ; X86-LABEL: scalar_i32_lowestbit_eq:
245 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
246 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
247 ; X86-NEXT: btl %ecx, %eax
248 ; X86-NEXT: setae %al
251 ; X64-LABEL: scalar_i32_lowestbit_eq:
253 ; X64-NEXT: btl %esi, %edi
254 ; X64-NEXT: setae %al
257 %t1 = and i32 %t0, %x
258 %res = icmp eq i32 %t1, 0
262 define i1 @scalar_i32_bitsinmiddle_eq(i32 %x, i32 %y) nounwind {
263 ; X86-BMI1-LABEL: scalar_i32_bitsinmiddle_eq:
265 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
266 ; X86-BMI1-NEXT: movl $16776960, %eax # imm = 0xFFFF00
267 ; X86-BMI1-NEXT: shll %cl, %eax
268 ; X86-BMI1-NEXT: testl %eax, {{[0-9]+}}(%esp)
269 ; X86-BMI1-NEXT: sete %al
270 ; X86-BMI1-NEXT: retl
272 ; X86-BMI2-LABEL: scalar_i32_bitsinmiddle_eq:
274 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
275 ; X86-BMI2-NEXT: movl $16776960, %ecx # imm = 0xFFFF00
276 ; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
277 ; X86-BMI2-NEXT: testl %eax, {{[0-9]+}}(%esp)
278 ; X86-BMI2-NEXT: sete %al
279 ; X86-BMI2-NEXT: retl
281 ; X64-BMI1-LABEL: scalar_i32_bitsinmiddle_eq:
283 ; X64-BMI1-NEXT: movl %esi, %ecx
284 ; X64-BMI1-NEXT: movl $16776960, %eax # imm = 0xFFFF00
285 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
286 ; X64-BMI1-NEXT: shll %cl, %eax
287 ; X64-BMI1-NEXT: testl %edi, %eax
288 ; X64-BMI1-NEXT: sete %al
289 ; X64-BMI1-NEXT: retq
291 ; X64-BMI2-LABEL: scalar_i32_bitsinmiddle_eq:
293 ; X64-BMI2-NEXT: movl $16776960, %eax # imm = 0xFFFF00
294 ; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
295 ; X64-BMI2-NEXT: testl %edi, %eax
296 ; X64-BMI2-NEXT: sete %al
297 ; X64-BMI2-NEXT: retq
298 %t0 = shl i32 16776960, %y
299 %t1 = and i32 %t0, %x
300 %res = icmp eq i32 %t1, 0
306 define i1 @scalar_i64_signbit_eq(i64 %x, i64 %y) nounwind {
307 ; X86-LABEL: scalar_i64_signbit_eq:
309 ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
310 ; X86-NEXT: xorl %eax, %eax
311 ; X86-NEXT: movl $-2147483648, %edx # imm = 0x80000000
312 ; X86-NEXT: shldl %cl, %eax, %edx
313 ; X86-NEXT: testb $32, %cl
314 ; X86-NEXT: cmovnel %eax, %edx
315 ; X86-NEXT: andl {{[0-9]+}}(%esp), %edx
316 ; X86-NEXT: orl $0, %edx
320 ; X64-BMI1-LABEL: scalar_i64_signbit_eq:
322 ; X64-BMI1-NEXT: movq %rsi, %rcx
323 ; X64-BMI1-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
324 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $rcx
325 ; X64-BMI1-NEXT: shlq %cl, %rax
326 ; X64-BMI1-NEXT: testq %rdi, %rax
327 ; X64-BMI1-NEXT: sete %al
328 ; X64-BMI1-NEXT: retq
330 ; X64-BMI2-LABEL: scalar_i64_signbit_eq:
332 ; X64-BMI2-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
333 ; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax
334 ; X64-BMI2-NEXT: testq %rdi, %rax
335 ; X64-BMI2-NEXT: sete %al
336 ; X64-BMI2-NEXT: retq
337 %t0 = shl i64 9223372036854775808, %y
338 %t1 = and i64 %t0, %x
339 %res = icmp eq i64 %t1, 0
343 define i1 @scalar_i64_lowestbit_eq(i64 %x, i64 %y) nounwind {
344 ; X86-BMI1-LABEL: scalar_i64_lowestbit_eq:
346 ; X86-BMI1-NEXT: pushl %esi
347 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
348 ; X86-BMI1-NEXT: movl $1, %eax
349 ; X86-BMI1-NEXT: xorl %edx, %edx
350 ; X86-BMI1-NEXT: xorl %esi, %esi
351 ; X86-BMI1-NEXT: shldl %cl, %eax, %esi
352 ; X86-BMI1-NEXT: shll %cl, %eax
353 ; X86-BMI1-NEXT: testb $32, %cl
354 ; X86-BMI1-NEXT: cmovnel %eax, %esi
355 ; X86-BMI1-NEXT: cmovnel %edx, %eax
356 ; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %esi
357 ; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %eax
358 ; X86-BMI1-NEXT: orl %esi, %eax
359 ; X86-BMI1-NEXT: sete %al
360 ; X86-BMI1-NEXT: popl %esi
361 ; X86-BMI1-NEXT: retl
363 ; X86-BMI2-LABEL: scalar_i64_lowestbit_eq:
365 ; X86-BMI2-NEXT: pushl %esi
366 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
367 ; X86-BMI2-NEXT: movl $1, %eax
368 ; X86-BMI2-NEXT: xorl %edx, %edx
369 ; X86-BMI2-NEXT: xorl %esi, %esi
370 ; X86-BMI2-NEXT: shldl %cl, %eax, %esi
371 ; X86-BMI2-NEXT: shlxl %ecx, %eax, %eax
372 ; X86-BMI2-NEXT: testb $32, %cl
373 ; X86-BMI2-NEXT: cmovnel %eax, %esi
374 ; X86-BMI2-NEXT: cmovnel %edx, %eax
375 ; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %esi
376 ; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
377 ; X86-BMI2-NEXT: orl %esi, %eax
378 ; X86-BMI2-NEXT: sete %al
379 ; X86-BMI2-NEXT: popl %esi
380 ; X86-BMI2-NEXT: retl
382 ; X64-LABEL: scalar_i64_lowestbit_eq:
384 ; X64-NEXT: btq %rsi, %rdi
385 ; X64-NEXT: setae %al
388 %t1 = and i64 %t0, %x
389 %res = icmp eq i64 %t1, 0
393 define i1 @scalar_i64_bitsinmiddle_eq(i64 %x, i64 %y) nounwind {
394 ; X86-BMI1-LABEL: scalar_i64_bitsinmiddle_eq:
396 ; X86-BMI1-NEXT: pushl %esi
397 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
398 ; X86-BMI1-NEXT: movl $-65536, %eax # imm = 0xFFFF0000
399 ; X86-BMI1-NEXT: movl $65535, %edx # imm = 0xFFFF
400 ; X86-BMI1-NEXT: shldl %cl, %eax, %edx
401 ; X86-BMI1-NEXT: shll %cl, %eax
402 ; X86-BMI1-NEXT: xorl %esi, %esi
403 ; X86-BMI1-NEXT: testb $32, %cl
404 ; X86-BMI1-NEXT: cmovnel %eax, %edx
405 ; X86-BMI1-NEXT: cmovel %eax, %esi
406 ; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %edx
407 ; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %esi
408 ; X86-BMI1-NEXT: orl %edx, %esi
409 ; X86-BMI1-NEXT: sete %al
410 ; X86-BMI1-NEXT: popl %esi
411 ; X86-BMI1-NEXT: retl
413 ; X86-BMI2-LABEL: scalar_i64_bitsinmiddle_eq:
415 ; X86-BMI2-NEXT: pushl %esi
416 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
417 ; X86-BMI2-NEXT: movl $-65536, %eax # imm = 0xFFFF0000
418 ; X86-BMI2-NEXT: movl $65535, %edx # imm = 0xFFFF
419 ; X86-BMI2-NEXT: shldl %cl, %eax, %edx
420 ; X86-BMI2-NEXT: shlxl %ecx, %eax, %eax
421 ; X86-BMI2-NEXT: xorl %esi, %esi
422 ; X86-BMI2-NEXT: testb $32, %cl
423 ; X86-BMI2-NEXT: cmovnel %eax, %edx
424 ; X86-BMI2-NEXT: cmovel %eax, %esi
425 ; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
426 ; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %esi
427 ; X86-BMI2-NEXT: orl %edx, %esi
428 ; X86-BMI2-NEXT: sete %al
429 ; X86-BMI2-NEXT: popl %esi
430 ; X86-BMI2-NEXT: retl
432 ; X64-BMI1-LABEL: scalar_i64_bitsinmiddle_eq:
434 ; X64-BMI1-NEXT: movq %rsi, %rcx
435 ; X64-BMI1-NEXT: movabsq $281474976645120, %rax # imm = 0xFFFFFFFF0000
436 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $rcx
437 ; X64-BMI1-NEXT: shlq %cl, %rax
438 ; X64-BMI1-NEXT: testq %rdi, %rax
439 ; X64-BMI1-NEXT: sete %al
440 ; X64-BMI1-NEXT: retq
442 ; X64-BMI2-LABEL: scalar_i64_bitsinmiddle_eq:
444 ; X64-BMI2-NEXT: movabsq $281474976645120, %rax # imm = 0xFFFFFFFF0000
445 ; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax
446 ; X64-BMI2-NEXT: testq %rdi, %rax
447 ; X64-BMI2-NEXT: sete %al
448 ; X64-BMI2-NEXT: retq
449 %t0 = shl i64 281474976645120, %y
450 %t1 = and i64 %t0, %x
451 %res = icmp eq i64 %t1, 0
455 ;------------------------------------------------------------------------------;
456 ; A few trivial vector tests
457 ;------------------------------------------------------------------------------;
459 define <4 x i1> @vec_4xi32_splat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
460 ; X86-SSE2-LABEL: vec_4xi32_splat_eq:
462 ; X86-SSE2-NEXT: pslld $23, %xmm1
463 ; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
464 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
465 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
466 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
467 ; X86-SSE2-NEXT: pmuludq %xmm2, %xmm3
468 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2
469 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
470 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
471 ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
472 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
473 ; X86-SSE2-NEXT: pxor %xmm1, %xmm1
474 ; X86-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
475 ; X86-SSE2-NEXT: retl
477 ; AVX2-LABEL: vec_4xi32_splat_eq:
479 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
480 ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
481 ; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
482 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
483 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
484 ; AVX2-NEXT: ret{{[l|q]}}
486 ; X64-SSE2-LABEL: vec_4xi32_splat_eq:
488 ; X64-SSE2-NEXT: pslld $23, %xmm1
489 ; X64-SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
490 ; X64-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
491 ; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
492 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
493 ; X64-SSE2-NEXT: pmuludq %xmm2, %xmm3
494 ; X64-SSE2-NEXT: pmuludq %xmm1, %xmm2
495 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
496 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
497 ; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
498 ; X64-SSE2-NEXT: pand %xmm1, %xmm0
499 ; X64-SSE2-NEXT: pxor %xmm1, %xmm1
500 ; X64-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
501 ; X64-SSE2-NEXT: retq
502 %t0 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
503 %t1 = and <4 x i32> %t0, %x
504 %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
508 define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
509 ; X86-SSE2-LABEL: vec_4xi32_nonsplat_eq:
511 ; X86-SSE2-NEXT: pslld $23, %xmm1
512 ; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
513 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
514 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,1,16776960,2147483648]
515 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
516 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2
517 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
518 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
519 ; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1
520 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
521 ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
522 ; X86-SSE2-NEXT: pand %xmm2, %xmm0
523 ; X86-SSE2-NEXT: pxor %xmm1, %xmm1
524 ; X86-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
525 ; X86-SSE2-NEXT: retl
527 ; AVX2-LABEL: vec_4xi32_nonsplat_eq:
529 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,16776960,2147483648]
530 ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
531 ; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
532 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
533 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
534 ; AVX2-NEXT: ret{{[l|q]}}
536 ; X64-SSE2-LABEL: vec_4xi32_nonsplat_eq:
538 ; X64-SSE2-NEXT: pslld $23, %xmm1
539 ; X64-SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
540 ; X64-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
541 ; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,1,16776960,2147483648]
542 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
543 ; X64-SSE2-NEXT: pmuludq %xmm1, %xmm2
544 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
545 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
546 ; X64-SSE2-NEXT: pmuludq %xmm3, %xmm1
547 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
548 ; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
549 ; X64-SSE2-NEXT: pand %xmm2, %xmm0
550 ; X64-SSE2-NEXT: pxor %xmm1, %xmm1
551 ; X64-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
552 ; X64-SSE2-NEXT: retq
553 %t0 = shl <4 x i32> <i32 0, i32 1, i32 16776960, i32 2147483648>, %y
554 %t1 = and <4 x i32> %t0, %x
555 %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
559 define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
560 ; X86-SSE2-LABEL: vec_4xi32_nonsplat_undef0_eq:
562 ; X86-SSE2-NEXT: pslld $23, %xmm1
563 ; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
564 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
565 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
566 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
567 ; X86-SSE2-NEXT: pmuludq %xmm2, %xmm3
568 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2
569 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
570 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
571 ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
572 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
573 ; X86-SSE2-NEXT: pxor %xmm1, %xmm1
574 ; X86-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
575 ; X86-SSE2-NEXT: retl
577 ; AVX2-LABEL: vec_4xi32_nonsplat_undef0_eq:
579 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
580 ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
581 ; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
582 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
583 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
584 ; AVX2-NEXT: ret{{[l|q]}}
586 ; X64-SSE2-LABEL: vec_4xi32_nonsplat_undef0_eq:
588 ; X64-SSE2-NEXT: pslld $23, %xmm1
589 ; X64-SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
590 ; X64-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
591 ; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
592 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
593 ; X64-SSE2-NEXT: pmuludq %xmm2, %xmm3
594 ; X64-SSE2-NEXT: pmuludq %xmm1, %xmm2
595 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
596 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
597 ; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
598 ; X64-SSE2-NEXT: pand %xmm1, %xmm0
599 ; X64-SSE2-NEXT: pxor %xmm1, %xmm1
600 ; X64-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
601 ; X64-SSE2-NEXT: retq
602 %t0 = shl <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y
603 %t1 = and <4 x i32> %t0, %x
604 %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
607 define <4 x i1> @vec_4xi32_nonsplat_undef1_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
608 ; X86-SSE2-LABEL: vec_4xi32_nonsplat_undef1_eq:
610 ; X86-SSE2-NEXT: pslld $23, %xmm1
611 ; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
612 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
613 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
614 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
615 ; X86-SSE2-NEXT: pmuludq %xmm2, %xmm3
616 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2
617 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
618 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
619 ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
620 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
621 ; X86-SSE2-NEXT: pxor %xmm1, %xmm1
622 ; X86-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
623 ; X86-SSE2-NEXT: retl
625 ; AVX2-LABEL: vec_4xi32_nonsplat_undef1_eq:
627 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
628 ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
629 ; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
630 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
631 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
632 ; AVX2-NEXT: ret{{[l|q]}}
634 ; X64-SSE2-LABEL: vec_4xi32_nonsplat_undef1_eq:
636 ; X64-SSE2-NEXT: pslld $23, %xmm1
637 ; X64-SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
638 ; X64-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
639 ; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
640 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
641 ; X64-SSE2-NEXT: pmuludq %xmm2, %xmm3
642 ; X64-SSE2-NEXT: pmuludq %xmm1, %xmm2
643 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
644 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
645 ; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
646 ; X64-SSE2-NEXT: pand %xmm1, %xmm0
647 ; X64-SSE2-NEXT: pxor %xmm1, %xmm1
648 ; X64-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
649 ; X64-SSE2-NEXT: retq
650 %t0 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
651 %t1 = and <4 x i32> %t0, %x
652 %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>
655 define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
656 ; X86-SSE2-LABEL: vec_4xi32_nonsplat_undef2_eq:
658 ; X86-SSE2-NEXT: pslld $23, %xmm1
659 ; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
660 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
661 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
662 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
663 ; X86-SSE2-NEXT: pmuludq %xmm2, %xmm3
664 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2
665 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
666 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
667 ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
668 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
669 ; X86-SSE2-NEXT: pxor %xmm1, %xmm1
670 ; X86-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
671 ; X86-SSE2-NEXT: retl
673 ; AVX2-LABEL: vec_4xi32_nonsplat_undef2_eq:
675 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
676 ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
677 ; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
678 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
679 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
680 ; AVX2-NEXT: ret{{[l|q]}}
682 ; X64-SSE2-LABEL: vec_4xi32_nonsplat_undef2_eq:
684 ; X64-SSE2-NEXT: pslld $23, %xmm1
685 ; X64-SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
686 ; X64-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
687 ; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
688 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
689 ; X64-SSE2-NEXT: pmuludq %xmm2, %xmm3
690 ; X64-SSE2-NEXT: pmuludq %xmm1, %xmm2
691 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
692 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
693 ; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
694 ; X64-SSE2-NEXT: pand %xmm1, %xmm0
695 ; X64-SSE2-NEXT: pxor %xmm1, %xmm1
696 ; X64-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
697 ; X64-SSE2-NEXT: retq
698 %t0 = shl <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y
699 %t1 = and <4 x i32> %t0, %x
700 %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>
704 ;------------------------------------------------------------------------------;
706 ;------------------------------------------------------------------------------;
708 define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
709 ; X86-LABEL: scalar_i8_signbit_ne:
711 ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
712 ; X86-NEXT: movb $-128, %al
713 ; X86-NEXT: shlb %cl, %al
714 ; X86-NEXT: testb %al, {{[0-9]+}}(%esp)
715 ; X86-NEXT: setne %al
718 ; X64-LABEL: scalar_i8_signbit_ne:
720 ; X64-NEXT: movl %esi, %ecx
721 ; X64-NEXT: movb $-128, %al
722 ; X64-NEXT: # kill: def $cl killed $cl killed $ecx
723 ; X64-NEXT: shlb %cl, %al
724 ; X64-NEXT: testb %dil, %al
725 ; X64-NEXT: setne %al
729 %res = icmp ne i8 %t1, 0 ; we are perfectly happy with 'ne' predicate
733 ;------------------------------------------------------------------------------;
734 ; What if X is a constant too?
735 ;------------------------------------------------------------------------------;
737 define i1 @scalar_i32_x_is_const_eq(i32 %y) nounwind {
738 ; X86-BMI1-LABEL: scalar_i32_x_is_const_eq:
740 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
741 ; X86-BMI1-NEXT: movl $-1437226411, %eax # imm = 0xAA55AA55
742 ; X86-BMI1-NEXT: shll %cl, %eax
743 ; X86-BMI1-NEXT: testb $1, %al
744 ; X86-BMI1-NEXT: sete %al
745 ; X86-BMI1-NEXT: retl
747 ; X86-BMI2-LABEL: scalar_i32_x_is_const_eq:
749 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
750 ; X86-BMI2-NEXT: movl $-1437226411, %ecx # imm = 0xAA55AA55
751 ; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
752 ; X86-BMI2-NEXT: testb $1, %al
753 ; X86-BMI2-NEXT: sete %al
754 ; X86-BMI2-NEXT: retl
756 ; X64-BMI1-LABEL: scalar_i32_x_is_const_eq:
758 ; X64-BMI1-NEXT: movl %edi, %ecx
759 ; X64-BMI1-NEXT: movl $-1437226411, %eax # imm = 0xAA55AA55
760 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
761 ; X64-BMI1-NEXT: shll %cl, %eax
762 ; X64-BMI1-NEXT: testb $1, %al
763 ; X64-BMI1-NEXT: sete %al
764 ; X64-BMI1-NEXT: retq
766 ; X64-BMI2-LABEL: scalar_i32_x_is_const_eq:
768 ; X64-BMI2-NEXT: movl $-1437226411, %eax # imm = 0xAA55AA55
769 ; X64-BMI2-NEXT: shlxl %edi, %eax, %eax
770 ; X64-BMI2-NEXT: testb $1, %al
771 ; X64-BMI2-NEXT: sete %al
772 ; X64-BMI2-NEXT: retq
773 %t0 = shl i32 2857740885, %y
775 %res = icmp eq i32 %t1, 0
778 define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
779 ; X86-LABEL: scalar_i32_x_is_const2_eq:
781 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
782 ; X86-NEXT: movl $-1437226411, %ecx # imm = 0xAA55AA55
783 ; X86-NEXT: btl %eax, %ecx
784 ; X86-NEXT: setae %al
787 ; X64-LABEL: scalar_i32_x_is_const2_eq:
789 ; X64-NEXT: movl $-1437226411, %eax # imm = 0xAA55AA55
790 ; X64-NEXT: btl %edi, %eax
791 ; X64-NEXT: setae %al
794 %t1 = and i32 %t0, 2857740885
795 %res = icmp eq i32 %t1, 0
799 ;------------------------------------------------------------------------------;
800 ; A few negative tests
801 ;------------------------------------------------------------------------------;
803 define i1 @negative_scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
804 ; X86-LABEL: negative_scalar_i8_bitsinmiddle_slt:
806 ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
807 ; X86-NEXT: movb $24, %al
808 ; X86-NEXT: shlb %cl, %al
809 ; X86-NEXT: andb {{[0-9]+}}(%esp), %al
810 ; X86-NEXT: shrb $7, %al
813 ; X64-LABEL: negative_scalar_i8_bitsinmiddle_slt:
815 ; X64-NEXT: movl %esi, %ecx
816 ; X64-NEXT: movb $24, %al
817 ; X64-NEXT: # kill: def $cl killed $cl killed $ecx
818 ; X64-NEXT: shlb %cl, %al
819 ; X64-NEXT: andb %dil, %al
820 ; X64-NEXT: shrb $7, %al
824 %res = icmp slt i8 %t1, 0
828 define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
829 ; X86-LABEL: scalar_i8_signbit_eq_with_nonzero:
831 ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
832 ; X86-NEXT: movb $-128, %al
833 ; X86-NEXT: shlb %cl, %al
834 ; X86-NEXT: andb {{[0-9]+}}(%esp), %al
835 ; X86-NEXT: cmpb $1, %al
839 ; X64-LABEL: scalar_i8_signbit_eq_with_nonzero:
841 ; X64-NEXT: movl %esi, %ecx
842 ; X64-NEXT: movb $-128, %al
843 ; X64-NEXT: # kill: def $cl killed $cl killed $ecx
844 ; X64-NEXT: shlb %cl, %al
845 ; X64-NEXT: andb %dil, %al
846 ; X64-NEXT: cmpb $1, %al
851 %res = icmp eq i8 %t1, 1 ; should be comparing with 0