1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2 < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,SSE2,X86-SSE2,BMI1,X86-BMI1,V0,X86-V0
3 ; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,SSE2,X86-SSE2,BMI1,X86-BMI1,V1,X86-V1
4 ; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,SSE2,X86-SSE2,BMI2,X86-BMI2,V2,X86-V2
5 ; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,BMI2,X86-BMI2,AVX2,X86-AVX2
6 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2 < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,SSE2,X64-SSE2,BMI1,X64-BMI1,V0,X64-V0
7 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,SSE2,X64-SSE2,BMI1,X64-BMI1,V1,X64-V1
8 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,SSE2,X64-SSE2,BMI2,X64-BMI2,V2,X64-V2
9 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,BMI2,X64-BMI2,AVX2,X64-AVX2
11 ; We are looking for the following pattern here:
12 ; (X & (C l>> Y)) ==/!= 0
13 ; It may be optimal to hoist the constant:
14 ; ((X << Y) & C) ==/!= 0
16 ;------------------------------------------------------------------------------;
18 ;------------------------------------------------------------------------------;
22 define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
23 ; X86-LABEL: scalar_i8_signbit_eq:
25 ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
26 ; X86-NEXT: movb {{[0-9]+}}(%esp), %al
27 ; X86-NEXT: shlb %cl, %al
28 ; X86-NEXT: testb $-128, %al
32 ; X64-LABEL: scalar_i8_signbit_eq:
34 ; X64-NEXT: movl %esi, %ecx
35 ; X64-NEXT: # kill: def $cl killed $cl killed $ecx
36 ; X64-NEXT: shlb %cl, %dil
37 ; X64-NEXT: testb $-128, %dil
42 %res = icmp eq i8 %t1, 0
46 define i1 @scalar_i8_lowestbit_eq(i8 %x, i8 %y) nounwind {
47 ; X86-LABEL: scalar_i8_lowestbit_eq:
49 ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
50 ; X86-NEXT: movb {{[0-9]+}}(%esp), %al
51 ; X86-NEXT: shlb %cl, %al
52 ; X86-NEXT: testb $1, %al
56 ; X64-LABEL: scalar_i8_lowestbit_eq:
58 ; X64-NEXT: movl %esi, %ecx
59 ; X64-NEXT: # kill: def $cl killed $cl killed $ecx
60 ; X64-NEXT: shlb %cl, %dil
61 ; X64-NEXT: testb $1, %dil
66 %res = icmp eq i8 %t1, 0
70 define i1 @scalar_i8_bitsinmiddle_eq(i8 %x, i8 %y) nounwind {
71 ; X86-LABEL: scalar_i8_bitsinmiddle_eq:
73 ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
74 ; X86-NEXT: movb {{[0-9]+}}(%esp), %al
75 ; X86-NEXT: shlb %cl, %al
76 ; X86-NEXT: testb $24, %al
80 ; X64-LABEL: scalar_i8_bitsinmiddle_eq:
82 ; X64-NEXT: movl %esi, %ecx
83 ; X64-NEXT: # kill: def $cl killed $cl killed $ecx
84 ; X64-NEXT: shlb %cl, %dil
85 ; X64-NEXT: testb $24, %dil
90 %res = icmp eq i8 %t1, 0
96 define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
97 ; X86-BMI1-LABEL: scalar_i16_signbit_eq:
99 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
100 ; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax
101 ; X86-BMI1-NEXT: shll %cl, %eax
102 ; X86-BMI1-NEXT: testl $32768, %eax # imm = 0x8000
103 ; X86-BMI1-NEXT: sete %al
104 ; X86-BMI1-NEXT: retl
106 ; X86-BMI2-LABEL: scalar_i16_signbit_eq:
108 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
109 ; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %eax
110 ; X86-BMI2-NEXT: testl $32768, %eax # imm = 0x8000
111 ; X86-BMI2-NEXT: sete %al
112 ; X86-BMI2-NEXT: retl
114 ; X64-BMI1-LABEL: scalar_i16_signbit_eq:
116 ; X64-BMI1-NEXT: movl %esi, %ecx
117 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
118 ; X64-BMI1-NEXT: shll %cl, %edi
119 ; X64-BMI1-NEXT: testl $32768, %edi # imm = 0x8000
120 ; X64-BMI1-NEXT: sete %al
121 ; X64-BMI1-NEXT: retq
123 ; X64-BMI2-LABEL: scalar_i16_signbit_eq:
125 ; X64-BMI2-NEXT: shlxl %esi, %edi, %eax
126 ; X64-BMI2-NEXT: testl $32768, %eax # imm = 0x8000
127 ; X64-BMI2-NEXT: sete %al
128 ; X64-BMI2-NEXT: retq
129 %t0 = lshr i16 32768, %y
130 %t1 = and i16 %t0, %x
131 %res = icmp eq i16 %t1, 0
135 define i1 @scalar_i16_lowestbit_eq(i16 %x, i16 %y) nounwind {
136 ; X86-BMI1-LABEL: scalar_i16_lowestbit_eq:
138 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
139 ; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax
140 ; X86-BMI1-NEXT: shll %cl, %eax
141 ; X86-BMI1-NEXT: testb $1, %al
142 ; X86-BMI1-NEXT: sete %al
143 ; X86-BMI1-NEXT: retl
145 ; X86-BMI2-LABEL: scalar_i16_lowestbit_eq:
147 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
148 ; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %eax
149 ; X86-BMI2-NEXT: testb $1, %al
150 ; X86-BMI2-NEXT: sete %al
151 ; X86-BMI2-NEXT: retl
153 ; X64-BMI1-LABEL: scalar_i16_lowestbit_eq:
155 ; X64-BMI1-NEXT: movl %esi, %ecx
156 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
157 ; X64-BMI1-NEXT: shll %cl, %edi
158 ; X64-BMI1-NEXT: testb $1, %dil
159 ; X64-BMI1-NEXT: sete %al
160 ; X64-BMI1-NEXT: retq
162 ; X64-BMI2-LABEL: scalar_i16_lowestbit_eq:
164 ; X64-BMI2-NEXT: shlxl %esi, %edi, %eax
165 ; X64-BMI2-NEXT: testb $1, %al
166 ; X64-BMI2-NEXT: sete %al
167 ; X64-BMI2-NEXT: retq
169 %t1 = and i16 %t0, %x
170 %res = icmp eq i16 %t1, 0
174 define i1 @scalar_i16_bitsinmiddle_eq(i16 %x, i16 %y) nounwind {
175 ; X86-BMI1-LABEL: scalar_i16_bitsinmiddle_eq:
177 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
178 ; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax
179 ; X86-BMI1-NEXT: shll %cl, %eax
180 ; X86-BMI1-NEXT: testl $4080, %eax # imm = 0xFF0
181 ; X86-BMI1-NEXT: sete %al
182 ; X86-BMI1-NEXT: retl
184 ; X86-BMI2-LABEL: scalar_i16_bitsinmiddle_eq:
186 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
187 ; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %eax
188 ; X86-BMI2-NEXT: testl $4080, %eax # imm = 0xFF0
189 ; X86-BMI2-NEXT: sete %al
190 ; X86-BMI2-NEXT: retl
192 ; X64-BMI1-LABEL: scalar_i16_bitsinmiddle_eq:
194 ; X64-BMI1-NEXT: movl %esi, %ecx
195 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
196 ; X64-BMI1-NEXT: shll %cl, %edi
197 ; X64-BMI1-NEXT: testl $4080, %edi # imm = 0xFF0
198 ; X64-BMI1-NEXT: sete %al
199 ; X64-BMI1-NEXT: retq
201 ; X64-BMI2-LABEL: scalar_i16_bitsinmiddle_eq:
203 ; X64-BMI2-NEXT: shlxl %esi, %edi, %eax
204 ; X64-BMI2-NEXT: testl $4080, %eax # imm = 0xFF0
205 ; X64-BMI2-NEXT: sete %al
206 ; X64-BMI2-NEXT: retq
207 %t0 = lshr i16 4080, %y
208 %t1 = and i16 %t0, %x
209 %res = icmp eq i16 %t1, 0
215 define i1 @scalar_i32_signbit_eq(i32 %x, i32 %y) nounwind {
216 ; X86-BMI1-LABEL: scalar_i32_signbit_eq:
218 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
219 ; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax
220 ; X86-BMI1-NEXT: shll %cl, %eax
221 ; X86-BMI1-NEXT: testl $-2147483648, %eax # imm = 0x80000000
222 ; X86-BMI1-NEXT: sete %al
223 ; X86-BMI1-NEXT: retl
225 ; X86-BMI2-LABEL: scalar_i32_signbit_eq:
227 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
228 ; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %eax
229 ; X86-BMI2-NEXT: testl $-2147483648, %eax # imm = 0x80000000
230 ; X86-BMI2-NEXT: sete %al
231 ; X86-BMI2-NEXT: retl
233 ; X64-BMI1-LABEL: scalar_i32_signbit_eq:
235 ; X64-BMI1-NEXT: movl %esi, %ecx
236 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
237 ; X64-BMI1-NEXT: shll %cl, %edi
238 ; X64-BMI1-NEXT: testl $-2147483648, %edi # imm = 0x80000000
239 ; X64-BMI1-NEXT: sete %al
240 ; X64-BMI1-NEXT: retq
242 ; X64-BMI2-LABEL: scalar_i32_signbit_eq:
244 ; X64-BMI2-NEXT: shlxl %esi, %edi, %eax
245 ; X64-BMI2-NEXT: testl $-2147483648, %eax # imm = 0x80000000
246 ; X64-BMI2-NEXT: sete %al
247 ; X64-BMI2-NEXT: retq
248 %t0 = lshr i32 2147483648, %y
249 %t1 = and i32 %t0, %x
250 %res = icmp eq i32 %t1, 0
254 define i1 @scalar_i32_lowestbit_eq(i32 %x, i32 %y) nounwind {
255 ; X86-BMI1-LABEL: scalar_i32_lowestbit_eq:
257 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
258 ; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax
259 ; X86-BMI1-NEXT: shll %cl, %eax
260 ; X86-BMI1-NEXT: testb $1, %al
261 ; X86-BMI1-NEXT: sete %al
262 ; X86-BMI1-NEXT: retl
264 ; X86-BMI2-LABEL: scalar_i32_lowestbit_eq:
266 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
267 ; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %eax
268 ; X86-BMI2-NEXT: testb $1, %al
269 ; X86-BMI2-NEXT: sete %al
270 ; X86-BMI2-NEXT: retl
272 ; X64-BMI1-LABEL: scalar_i32_lowestbit_eq:
274 ; X64-BMI1-NEXT: movl %esi, %ecx
275 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
276 ; X64-BMI1-NEXT: shll %cl, %edi
277 ; X64-BMI1-NEXT: testb $1, %dil
278 ; X64-BMI1-NEXT: sete %al
279 ; X64-BMI1-NEXT: retq
281 ; X64-BMI2-LABEL: scalar_i32_lowestbit_eq:
283 ; X64-BMI2-NEXT: shlxl %esi, %edi, %eax
284 ; X64-BMI2-NEXT: testb $1, %al
285 ; X64-BMI2-NEXT: sete %al
286 ; X64-BMI2-NEXT: retq
288 %t1 = and i32 %t0, %x
289 %res = icmp eq i32 %t1, 0
293 define i1 @scalar_i32_bitsinmiddle_eq(i32 %x, i32 %y) nounwind {
294 ; X86-BMI1-LABEL: scalar_i32_bitsinmiddle_eq:
296 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
297 ; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax
298 ; X86-BMI1-NEXT: shll %cl, %eax
299 ; X86-BMI1-NEXT: testl $16776960, %eax # imm = 0xFFFF00
300 ; X86-BMI1-NEXT: sete %al
301 ; X86-BMI1-NEXT: retl
303 ; X86-BMI2-LABEL: scalar_i32_bitsinmiddle_eq:
305 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
306 ; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %eax
307 ; X86-BMI2-NEXT: testl $16776960, %eax # imm = 0xFFFF00
308 ; X86-BMI2-NEXT: sete %al
309 ; X86-BMI2-NEXT: retl
311 ; X64-BMI1-LABEL: scalar_i32_bitsinmiddle_eq:
313 ; X64-BMI1-NEXT: movl %esi, %ecx
314 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
315 ; X64-BMI1-NEXT: shll %cl, %edi
316 ; X64-BMI1-NEXT: testl $16776960, %edi # imm = 0xFFFF00
317 ; X64-BMI1-NEXT: sete %al
318 ; X64-BMI1-NEXT: retq
320 ; X64-BMI2-LABEL: scalar_i32_bitsinmiddle_eq:
322 ; X64-BMI2-NEXT: shlxl %esi, %edi, %eax
323 ; X64-BMI2-NEXT: testl $16776960, %eax # imm = 0xFFFF00
324 ; X64-BMI2-NEXT: sete %al
325 ; X64-BMI2-NEXT: retq
326 %t0 = lshr i32 16776960, %y
327 %t1 = and i32 %t0, %x
328 %res = icmp eq i32 %t1, 0
334 define i1 @scalar_i64_signbit_eq(i64 %x, i64 %y) nounwind {
335 ; X86-BMI1-LABEL: scalar_i64_signbit_eq:
337 ; X86-BMI1-NEXT: pushl %esi
338 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
339 ; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax
340 ; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %edx
341 ; X86-BMI1-NEXT: movl %eax, %esi
342 ; X86-BMI1-NEXT: shll %cl, %esi
343 ; X86-BMI1-NEXT: shldl %cl, %eax, %edx
344 ; X86-BMI1-NEXT: testb $32, %cl
345 ; X86-BMI1-NEXT: cmovnel %esi, %edx
346 ; X86-BMI1-NEXT: testl $-2147483648, %edx # imm = 0x80000000
347 ; X86-BMI1-NEXT: sete %al
348 ; X86-BMI1-NEXT: popl %esi
349 ; X86-BMI1-NEXT: retl
351 ; X86-BMI2-LABEL: scalar_i64_signbit_eq:
353 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
354 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
355 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
356 ; X86-BMI2-NEXT: shldl %cl, %eax, %edx
357 ; X86-BMI2-NEXT: shlxl %ecx, %eax, %eax
358 ; X86-BMI2-NEXT: testb $32, %cl
359 ; X86-BMI2-NEXT: cmovel %edx, %eax
360 ; X86-BMI2-NEXT: testl $-2147483648, %eax # imm = 0x80000000
361 ; X86-BMI2-NEXT: sete %al
362 ; X86-BMI2-NEXT: retl
364 ; X64-BMI1-LABEL: scalar_i64_signbit_eq:
366 ; X64-BMI1-NEXT: movq %rsi, %rcx
367 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $rcx
368 ; X64-BMI1-NEXT: shlq %cl, %rdi
369 ; X64-BMI1-NEXT: shrq $63, %rdi
370 ; X64-BMI1-NEXT: sete %al
371 ; X64-BMI1-NEXT: retq
373 ; X64-BMI2-LABEL: scalar_i64_signbit_eq:
375 ; X64-BMI2-NEXT: shlxq %rsi, %rdi, %rax
376 ; X64-BMI2-NEXT: shrq $63, %rax
377 ; X64-BMI2-NEXT: sete %al
378 ; X64-BMI2-NEXT: retq
379 %t0 = lshr i64 9223372036854775808, %y
380 %t1 = and i64 %t0, %x
381 %res = icmp eq i64 %t1, 0
385 define i1 @scalar_i64_lowestbit_eq(i64 %x, i64 %y) nounwind {
386 ; X86-BMI1-LABEL: scalar_i64_lowestbit_eq:
388 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
389 ; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax
390 ; X86-BMI1-NEXT: shll %cl, %eax
391 ; X86-BMI1-NEXT: xorl %edx, %edx
392 ; X86-BMI1-NEXT: testb $32, %cl
393 ; X86-BMI1-NEXT: cmovel %eax, %edx
394 ; X86-BMI1-NEXT: testb $1, %dl
395 ; X86-BMI1-NEXT: sete %al
396 ; X86-BMI1-NEXT: retl
398 ; X86-BMI2-LABEL: scalar_i64_lowestbit_eq:
400 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
401 ; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %ecx
402 ; X86-BMI2-NEXT: xorl %edx, %edx
403 ; X86-BMI2-NEXT: testb $32, %al
404 ; X86-BMI2-NEXT: cmovel %ecx, %edx
405 ; X86-BMI2-NEXT: testb $1, %dl
406 ; X86-BMI2-NEXT: sete %al
407 ; X86-BMI2-NEXT: retl
409 ; X64-BMI1-LABEL: scalar_i64_lowestbit_eq:
411 ; X64-BMI1-NEXT: movq %rsi, %rcx
412 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $rcx
413 ; X64-BMI1-NEXT: shlq %cl, %rdi
414 ; X64-BMI1-NEXT: testb $1, %dil
415 ; X64-BMI1-NEXT: sete %al
416 ; X64-BMI1-NEXT: retq
418 ; X64-BMI2-LABEL: scalar_i64_lowestbit_eq:
420 ; X64-BMI2-NEXT: shlxq %rsi, %rdi, %rax
421 ; X64-BMI2-NEXT: testb $1, %al
422 ; X64-BMI2-NEXT: sete %al
423 ; X64-BMI2-NEXT: retq
425 %t1 = and i64 %t0, %x
426 %res = icmp eq i64 %t1, 0
430 define i1 @scalar_i64_bitsinmiddle_eq(i64 %x, i64 %y) nounwind {
431 ; X86-BMI1-LABEL: scalar_i64_bitsinmiddle_eq:
433 ; X86-BMI1-NEXT: pushl %esi
434 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
435 ; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax
436 ; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %edx
437 ; X86-BMI1-NEXT: movl %eax, %esi
438 ; X86-BMI1-NEXT: shll %cl, %esi
439 ; X86-BMI1-NEXT: shldl %cl, %eax, %edx
440 ; X86-BMI1-NEXT: xorl %eax, %eax
441 ; X86-BMI1-NEXT: testb $32, %cl
442 ; X86-BMI1-NEXT: cmovnel %esi, %edx
443 ; X86-BMI1-NEXT: movzwl %dx, %ecx
444 ; X86-BMI1-NEXT: cmovel %esi, %eax
445 ; X86-BMI1-NEXT: andl $-65536, %eax # imm = 0xFFFF0000
446 ; X86-BMI1-NEXT: orl %ecx, %eax
447 ; X86-BMI1-NEXT: sete %al
448 ; X86-BMI1-NEXT: popl %esi
449 ; X86-BMI1-NEXT: retl
451 ; X86-BMI2-LABEL: scalar_i64_bitsinmiddle_eq:
453 ; X86-BMI2-NEXT: pushl %esi
454 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
455 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
456 ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
457 ; X86-BMI2-NEXT: shldl %cl, %eax, %edx
458 ; X86-BMI2-NEXT: shlxl %ecx, %eax, %eax
459 ; X86-BMI2-NEXT: xorl %esi, %esi
460 ; X86-BMI2-NEXT: testb $32, %cl
461 ; X86-BMI2-NEXT: cmovnel %eax, %edx
462 ; X86-BMI2-NEXT: movzwl %dx, %ecx
463 ; X86-BMI2-NEXT: cmovel %eax, %esi
464 ; X86-BMI2-NEXT: andl $-65536, %esi # imm = 0xFFFF0000
465 ; X86-BMI2-NEXT: orl %ecx, %esi
466 ; X86-BMI2-NEXT: sete %al
467 ; X86-BMI2-NEXT: popl %esi
468 ; X86-BMI2-NEXT: retl
470 ; X64-BMI1-LABEL: scalar_i64_bitsinmiddle_eq:
472 ; X64-BMI1-NEXT: movq %rsi, %rcx
473 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $rcx
474 ; X64-BMI1-NEXT: shlq %cl, %rdi
475 ; X64-BMI1-NEXT: movabsq $281474976645120, %rax # imm = 0xFFFFFFFF0000
476 ; X64-BMI1-NEXT: testq %rax, %rdi
477 ; X64-BMI1-NEXT: sete %al
478 ; X64-BMI1-NEXT: retq
480 ; X64-BMI2-LABEL: scalar_i64_bitsinmiddle_eq:
482 ; X64-BMI2-NEXT: shlxq %rsi, %rdi, %rax
483 ; X64-BMI2-NEXT: movabsq $281474976645120, %rcx # imm = 0xFFFFFFFF0000
484 ; X64-BMI2-NEXT: testq %rcx, %rax
485 ; X64-BMI2-NEXT: sete %al
486 ; X64-BMI2-NEXT: retq
487 %t0 = lshr i64 281474976645120, %y
488 %t1 = and i64 %t0, %x
489 %res = icmp eq i64 %t1, 0
493 ;------------------------------------------------------------------------------;
494 ; A few trivial vector tests
495 ;------------------------------------------------------------------------------;
497 define <4 x i1> @vec_4xi32_splat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
498 ; X86-SSE2-LABEL: vec_4xi32_splat_eq:
500 ; X86-SSE2-NEXT: pxor %xmm2, %xmm2
501 ; X86-SSE2-NEXT: pslld $23, %xmm1
502 ; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
503 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
504 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
505 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
506 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
507 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
508 ; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1
509 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
510 ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
511 ; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
512 ; X86-SSE2-NEXT: pcmpeqd %xmm2, %xmm0
513 ; X86-SSE2-NEXT: retl
515 ; AVX2-LABEL: vec_4xi32_splat_eq:
517 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
518 ; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
519 ; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
520 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
521 ; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
522 ; AVX2-NEXT: ret{{[l|q]}}
524 ; X64-SSE2-LABEL: vec_4xi32_splat_eq:
526 ; X64-SSE2-NEXT: pxor %xmm2, %xmm2
527 ; X64-SSE2-NEXT: pslld $23, %xmm1
528 ; X64-SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
529 ; X64-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
530 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
531 ; X64-SSE2-NEXT: pmuludq %xmm1, %xmm0
532 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
533 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
534 ; X64-SSE2-NEXT: pmuludq %xmm3, %xmm1
535 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
536 ; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
537 ; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
538 ; X64-SSE2-NEXT: pcmpeqd %xmm2, %xmm0
539 ; X64-SSE2-NEXT: retq
540 %t0 = lshr <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
541 %t1 = and <4 x i32> %t0, %x
542 %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
546 define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
547 ; SSE2-LABEL: vec_4xi32_nonsplat_eq:
549 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
550 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,1,16776960,2147483648]
551 ; SSE2-NEXT: movdqa %xmm3, %xmm4
552 ; SSE2-NEXT: psrld %xmm2, %xmm4
553 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,1,1,1,4,5,6,7]
554 ; SSE2-NEXT: movdqa %xmm3, %xmm5
555 ; SSE2-NEXT: psrld %xmm2, %xmm5
556 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0]
557 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
558 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
559 ; SSE2-NEXT: movdqa %xmm3, %xmm4
560 ; SSE2-NEXT: psrld %xmm2, %xmm4
561 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
562 ; SSE2-NEXT: psrld %xmm1, %xmm3
563 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1]
564 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm3[0,3]
565 ; SSE2-NEXT: andps %xmm5, %xmm0
566 ; SSE2-NEXT: pxor %xmm1, %xmm1
567 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
568 ; SSE2-NEXT: ret{{[l|q]}}
570 ; AVX2-LABEL: vec_4xi32_nonsplat_eq:
572 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,16776960,2147483648]
573 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm2, %xmm1
574 ; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
575 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
576 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
577 ; AVX2-NEXT: ret{{[l|q]}}
578 %t0 = lshr <4 x i32> <i32 0, i32 1, i32 16776960, i32 2147483648>, %y
579 %t1 = and <4 x i32> %t0, %x
580 %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
584 define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
585 ; X86-SSE2-LABEL: vec_4xi32_nonsplat_undef0_eq:
587 ; X86-SSE2-NEXT: pxor %xmm2, %xmm2
588 ; X86-SSE2-NEXT: pslld $23, %xmm1
589 ; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
590 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
591 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
592 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
593 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
594 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
595 ; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1
596 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
597 ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
598 ; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
599 ; X86-SSE2-NEXT: pcmpeqd %xmm2, %xmm0
600 ; X86-SSE2-NEXT: retl
602 ; AVX2-LABEL: vec_4xi32_nonsplat_undef0_eq:
604 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
605 ; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
606 ; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
607 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
608 ; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
609 ; AVX2-NEXT: ret{{[l|q]}}
611 ; X64-SSE2-LABEL: vec_4xi32_nonsplat_undef0_eq:
613 ; X64-SSE2-NEXT: pxor %xmm2, %xmm2
614 ; X64-SSE2-NEXT: pslld $23, %xmm1
615 ; X64-SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
616 ; X64-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
617 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
618 ; X64-SSE2-NEXT: pmuludq %xmm1, %xmm0
619 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
620 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
621 ; X64-SSE2-NEXT: pmuludq %xmm3, %xmm1
622 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
623 ; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
624 ; X64-SSE2-NEXT: pand {{.*}}(%rip), %xmm0
625 ; X64-SSE2-NEXT: pcmpeqd %xmm2, %xmm0
626 ; X64-SSE2-NEXT: retq
627 %t0 = lshr <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y
628 %t1 = and <4 x i32> %t0, %x
629 %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
632 define <4 x i1> @vec_4xi32_nonsplat_undef1_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
633 ; SSE2-LABEL: vec_4xi32_nonsplat_undef1_eq:
635 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
636 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
637 ; SSE2-NEXT: movdqa %xmm3, %xmm4
638 ; SSE2-NEXT: psrld %xmm2, %xmm4
639 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,1,1,1,4,5,6,7]
640 ; SSE2-NEXT: movdqa %xmm3, %xmm5
641 ; SSE2-NEXT: psrld %xmm2, %xmm5
642 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0]
643 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
644 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
645 ; SSE2-NEXT: movdqa %xmm3, %xmm4
646 ; SSE2-NEXT: psrld %xmm2, %xmm4
647 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
648 ; SSE2-NEXT: psrld %xmm1, %xmm3
649 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1]
650 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm3[0,3]
651 ; SSE2-NEXT: andps %xmm5, %xmm0
652 ; SSE2-NEXT: pxor %xmm1, %xmm1
653 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
654 ; SSE2-NEXT: ret{{[l|q]}}
656 ; AVX2-LABEL: vec_4xi32_nonsplat_undef1_eq:
658 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
659 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm2, %xmm1
660 ; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
661 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
662 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
663 ; AVX2-NEXT: ret{{[l|q]}}
664 %t0 = lshr <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
665 %t1 = and <4 x i32> %t0, %x
666 %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>
669 define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
670 ; SSE2-LABEL: vec_4xi32_nonsplat_undef2_eq:
672 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
673 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = <1,1,u,1>
674 ; SSE2-NEXT: movdqa %xmm3, %xmm4
675 ; SSE2-NEXT: psrld %xmm2, %xmm4
676 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,1,1,1,4,5,6,7]
677 ; SSE2-NEXT: movdqa %xmm3, %xmm5
678 ; SSE2-NEXT: psrld %xmm2, %xmm5
679 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0]
680 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
681 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
682 ; SSE2-NEXT: movdqa %xmm3, %xmm4
683 ; SSE2-NEXT: psrld %xmm2, %xmm4
684 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
685 ; SSE2-NEXT: psrld %xmm1, %xmm3
686 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1]
687 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm3[0,3]
688 ; SSE2-NEXT: andps %xmm5, %xmm0
689 ; SSE2-NEXT: pxor %xmm1, %xmm1
690 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
691 ; SSE2-NEXT: ret{{[l|q]}}
693 ; AVX2-LABEL: vec_4xi32_nonsplat_undef2_eq:
695 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
696 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm2, %xmm1
697 ; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
698 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
699 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
700 ; AVX2-NEXT: ret{{[l|q]}}
701 %t0 = lshr <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y
702 %t1 = and <4 x i32> %t0, %x
703 %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>
707 ;------------------------------------------------------------------------------;
709 ;------------------------------------------------------------------------------;
711 define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
712 ; X86-LABEL: scalar_i8_signbit_ne:
714 ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
715 ; X86-NEXT: movb {{[0-9]+}}(%esp), %al
716 ; X86-NEXT: shlb %cl, %al
717 ; X86-NEXT: shrb $7, %al
720 ; X64-LABEL: scalar_i8_signbit_ne:
722 ; X64-NEXT: movl %esi, %ecx
723 ; X64-NEXT: movl %edi, %eax
724 ; X64-NEXT: # kill: def $cl killed $cl killed $ecx
725 ; X64-NEXT: shlb %cl, %al
726 ; X64-NEXT: shrb $7, %al
727 ; X64-NEXT: # kill: def $al killed $al killed $eax
729 %t0 = lshr i8 128, %y
731 %res = icmp ne i8 %t1, 0 ; we are perfectly happy with 'ne' predicate
735 ;------------------------------------------------------------------------------;
736 ; What if X is a constant too?
737 ;------------------------------------------------------------------------------;
739 define i1 @scalar_i32_x_is_const_eq(i32 %y) nounwind {
740 ; X86-LABEL: scalar_i32_x_is_const_eq:
742 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
743 ; X86-NEXT: movl $-1437226411, %ecx # imm = 0xAA55AA55
744 ; X86-NEXT: btl %eax, %ecx
745 ; X86-NEXT: setae %al
748 ; X64-LABEL: scalar_i32_x_is_const_eq:
750 ; X64-NEXT: movl $-1437226411, %eax # imm = 0xAA55AA55
751 ; X64-NEXT: btl %edi, %eax
752 ; X64-NEXT: setae %al
754 %t0 = lshr i32 2857740885, %y
756 %res = icmp eq i32 %t1, 0
759 define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
760 ; X86-BMI1-LABEL: scalar_i32_x_is_const2_eq:
762 ; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl
763 ; X86-BMI1-NEXT: movl $1, %eax
764 ; X86-BMI1-NEXT: shrl %cl, %eax
765 ; X86-BMI1-NEXT: testl $-1437226411, %eax # imm = 0xAA55AA55
766 ; X86-BMI1-NEXT: sete %al
767 ; X86-BMI1-NEXT: retl
769 ; X86-BMI2-LABEL: scalar_i32_x_is_const2_eq:
771 ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
772 ; X86-BMI2-NEXT: movl $1, %ecx
773 ; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax
774 ; X86-BMI2-NEXT: testl $-1437226411, %eax # imm = 0xAA55AA55
775 ; X86-BMI2-NEXT: sete %al
776 ; X86-BMI2-NEXT: retl
778 ; X64-BMI1-LABEL: scalar_i32_x_is_const2_eq:
780 ; X64-BMI1-NEXT: movl %edi, %ecx
781 ; X64-BMI1-NEXT: movl $1, %eax
782 ; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
783 ; X64-BMI1-NEXT: shrl %cl, %eax
784 ; X64-BMI1-NEXT: testl $-1437226411, %eax # imm = 0xAA55AA55
785 ; X64-BMI1-NEXT: sete %al
786 ; X64-BMI1-NEXT: retq
788 ; X64-BMI2-LABEL: scalar_i32_x_is_const2_eq:
790 ; X64-BMI2-NEXT: movl $1, %eax
791 ; X64-BMI2-NEXT: shrxl %edi, %eax, %eax
792 ; X64-BMI2-NEXT: testl $-1437226411, %eax # imm = 0xAA55AA55
793 ; X64-BMI2-NEXT: sete %al
794 ; X64-BMI2-NEXT: retq
796 %t1 = and i32 %t0, 2857740885
797 %res = icmp eq i32 %t1, 0
801 ;------------------------------------------------------------------------------;
802 ; A few negative tests
803 ;------------------------------------------------------------------------------;
805 define i1 @negative_scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
806 ; X86-LABEL: negative_scalar_i8_bitsinmiddle_slt:
808 ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
809 ; X86-NEXT: movb $24, %al
810 ; X86-NEXT: shrb %cl, %al
811 ; X86-NEXT: andb {{[0-9]+}}(%esp), %al
812 ; X86-NEXT: shrb $7, %al
815 ; X64-LABEL: negative_scalar_i8_bitsinmiddle_slt:
817 ; X64-NEXT: movl %esi, %ecx
818 ; X64-NEXT: movb $24, %al
819 ; X64-NEXT: # kill: def $cl killed $cl killed $ecx
820 ; X64-NEXT: shrb %cl, %al
821 ; X64-NEXT: andb %dil, %al
822 ; X64-NEXT: shrb $7, %al
826 %res = icmp slt i8 %t1, 0
830 define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
831 ; X86-LABEL: scalar_i8_signbit_eq_with_nonzero:
833 ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
834 ; X86-NEXT: movb $-128, %al
835 ; X86-NEXT: shrb %cl, %al
836 ; X86-NEXT: andb {{[0-9]+}}(%esp), %al
837 ; X86-NEXT: cmpb $1, %al
841 ; X64-LABEL: scalar_i8_signbit_eq_with_nonzero:
843 ; X64-NEXT: movl %esi, %ecx
844 ; X64-NEXT: movb $-128, %al
845 ; X64-NEXT: # kill: def $cl killed $cl killed $ecx
846 ; X64-NEXT: shrb %cl, %al
847 ; X64-NEXT: andb %dil, %al
848 ; X64-NEXT: cmpb $1, %al
851 %t0 = lshr i8 128, %y
853 %res = icmp eq i8 %t1, 1 ; should be comparing with 0