1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
5 define i64 @test1(ptr %data) {
7 ; X86: # %bb.0: # %entry
8 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
9 ; X86-NEXT: movzbl (%eax), %eax
10 ; X86-NEXT: shll $2, %eax
11 ; X86-NEXT: andl $60, %eax
12 ; X86-NEXT: xorl %edx, %edx
16 ; X64: # %bb.0: # %entry
17 ; X64-NEXT: movl (%rdi), %eax
18 ; X64-NEXT: shll $2, %eax
19 ; X64-NEXT: andl $60, %eax
22 %bf.load = load i8, ptr %data, align 4
23 %bf.clear = shl i8 %bf.load, 2
24 %0 = and i8 %bf.clear, 60
25 %mul = zext i8 %0 to i64
29 define ptr @test2(ptr %data) {
31 ; X86: # %bb.0: # %entry
32 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
33 ; X86-NEXT: movzbl (%eax), %ecx
34 ; X86-NEXT: andl $15, %ecx
35 ; X86-NEXT: leal (%eax,%ecx,4), %eax
39 ; X64: # %bb.0: # %entry
40 ; X64-NEXT: movl (%rdi), %eax
41 ; X64-NEXT: andl $15, %eax
42 ; X64-NEXT: leaq (%rdi,%rax,4), %rax
45 %bf.load = load i8, ptr %data, align 4
46 %bf.clear = shl i8 %bf.load, 2
47 %0 = and i8 %bf.clear, 60
48 %mul = zext i8 %0 to i64
49 %add.ptr = getelementptr inbounds i8, ptr %data, i64 %mul
53 ; If the shift op is SHL, the logic op can only be AND.
54 define i64 @test3(ptr %data) {
56 ; X86: # %bb.0: # %entry
57 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
58 ; X86-NEXT: movzbl (%eax), %eax
59 ; X86-NEXT: shlb $2, %al
60 ; X86-NEXT: xorb $60, %al
61 ; X86-NEXT: movzbl %al, %eax
62 ; X86-NEXT: xorl %edx, %edx
66 ; X64: # %bb.0: # %entry
67 ; X64-NEXT: movzbl (%rdi), %eax
68 ; X64-NEXT: shlb $2, %al
69 ; X64-NEXT: xorb $60, %al
70 ; X64-NEXT: movzbl %al, %eax
73 %bf.load = load i8, ptr %data, align 4
74 %bf.clear = shl i8 %bf.load, 2
75 %0 = xor i8 %bf.clear, 60
76 %mul = zext i8 %0 to i64
80 define i64 @test4(ptr %data) {
82 ; X86: # %bb.0: # %entry
83 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
84 ; X86-NEXT: movzbl (%eax), %eax
85 ; X86-NEXT: shrl $2, %eax
86 ; X86-NEXT: andl $-4, %eax
87 ; X86-NEXT: xorl %edx, %edx
91 ; X64: # %bb.0: # %entry
92 ; X64-NEXT: movzbl (%rdi), %eax
93 ; X64-NEXT: shrl $2, %eax
94 ; X64-NEXT: andl $-4, %eax
97 %bf.load = load i8, ptr %data, align 4
98 %bf.clear = lshr i8 %bf.load, 2
99 %0 = and i8 %bf.clear, 60
100 %1 = zext i8 %0 to i64
104 define i64 @test5(ptr %data) {
106 ; X86: # %bb.0: # %entry
107 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
108 ; X86-NEXT: movzbl (%eax), %eax
109 ; X86-NEXT: shrl $2, %eax
110 ; X86-NEXT: xorl $60, %eax
111 ; X86-NEXT: xorl %edx, %edx
115 ; X64: # %bb.0: # %entry
116 ; X64-NEXT: movzbl (%rdi), %eax
117 ; X64-NEXT: shrl $2, %eax
118 ; X64-NEXT: xorq $60, %rax
121 %bf.load = load i8, ptr %data, align 4
122 %bf.clear = lshr i8 %bf.load, 2
123 %0 = xor i8 %bf.clear, 60
124 %1 = zext i8 %0 to i64
128 define i64 @test6(ptr %data) {
130 ; X86: # %bb.0: # %entry
131 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
132 ; X86-NEXT: movzbl (%eax), %eax
133 ; X86-NEXT: shrl $2, %eax
134 ; X86-NEXT: orl $60, %eax
135 ; X86-NEXT: xorl %edx, %edx
139 ; X64: # %bb.0: # %entry
140 ; X64-NEXT: movzbl (%rdi), %eax
141 ; X64-NEXT: shrl $2, %eax
142 ; X64-NEXT: orq $60, %rax
145 %bf.load = load i8, ptr %data, align 4
146 %bf.clear = lshr i8 %bf.load, 2
147 %0 = or i8 %bf.clear, 60
148 %1 = zext i8 %0 to i64
152 ; Load is folded with sext.
153 define i64 @test8(ptr %data) {
155 ; X86: # %bb.0: # %entry
156 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
157 ; X86-NEXT: movsbl (%eax), %eax
158 ; X86-NEXT: movzwl %ax, %eax
159 ; X86-NEXT: shrl $2, %eax
160 ; X86-NEXT: orl $60, %eax
161 ; X86-NEXT: xorl %edx, %edx
165 ; X64: # %bb.0: # %entry
166 ; X64-NEXT: movsbl (%rdi), %eax
167 ; X64-NEXT: movzwl %ax, %eax
168 ; X64-NEXT: shrl $2, %eax
169 ; X64-NEXT: orl $60, %eax
172 %bf.load = load i8, ptr %data, align 4
173 %ext = sext i8 %bf.load to i16
174 %bf.clear = lshr i16 %ext, 2
175 %0 = or i16 %bf.clear, 60
176 %1 = zext i16 %0 to i64