1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-- | FileCheck %s --check-prefixes=X86
3 ; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s --check-prefixes=X64
5 ; Fold and(sextinreg(v0,i5),sextinreg(v1,i5)) -> sextinreg(and(v0,v1),i5)
6 define i32 @sextinreg_i32(ptr %p0, ptr %p1) {
7 ; X86-LABEL: sextinreg_i32:
9 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
10 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
11 ; X86-NEXT: movzbl (%ecx), %ecx
12 ; X86-NEXT: movzbl (%eax), %eax
13 ; X86-NEXT: andl %ecx, %eax
14 ; X86-NEXT: shll $27, %eax
15 ; X86-NEXT: sarl $27, %eax
18 ; X64-LABEL: sextinreg_i32:
20 ; X64-NEXT: movzbl (%rdi), %ecx
21 ; X64-NEXT: movzbl (%rsi), %eax
22 ; X64-NEXT: andl %ecx, %eax
23 ; X64-NEXT: shll $27, %eax
24 ; X64-NEXT: sarl $27, %eax
26 %v0 = load i8, ptr %p0, align 1
27 %v1 = load i8, ptr %p1, align 1
28 %x0 = zext i8 %v0 to i32
29 %x1 = zext i8 %v1 to i32
32 %a0 = ashr exact i32 %l0, 27
33 %a1 = ashr exact i32 %l1, 27
34 %and = and i32 %a0, %a1
38 ; MISMATCH and(sextinreg(v0,i2),sextinreg(v1,i5)) != sextinreg(and(v0,v1),i2)
39 define i32 @sextinreg_i32_mismatch(ptr %p0, ptr %p1) {
40 ; X86-LABEL: sextinreg_i32_mismatch:
42 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
43 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
44 ; X86-NEXT: movzbl (%ecx), %ecx
45 ; X86-NEXT: movzbl (%eax), %eax
46 ; X86-NEXT: shll $30, %ecx
47 ; X86-NEXT: sarl $30, %ecx
48 ; X86-NEXT: shll $27, %eax
49 ; X86-NEXT: sarl $27, %eax
50 ; X86-NEXT: andl %ecx, %eax
53 ; X64-LABEL: sextinreg_i32_mismatch:
55 ; X64-NEXT: movzbl (%rdi), %ecx
56 ; X64-NEXT: movzbl (%rsi), %eax
57 ; X64-NEXT: shll $30, %ecx
58 ; X64-NEXT: sarl $30, %ecx
59 ; X64-NEXT: shll $27, %eax
60 ; X64-NEXT: sarl $27, %eax
61 ; X64-NEXT: andl %ecx, %eax
63 %v0 = load i8, ptr %p0, align 1
64 %v1 = load i8, ptr %p1, align 1
65 %x0 = zext i8 %v0 to i32
66 %x1 = zext i8 %v1 to i32
69 %a0 = ashr exact i32 %l0, 30
70 %a1 = ashr exact i32 %l1, 27
71 %and = and i32 %a0, %a1