1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=i686-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X86,NOBMI-X86
3 ; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi < %s | FileCheck %s --check-prefixes=X86,BMI-X86
4 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X64,NOBMI-X64
5 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi < %s | FileCheck %s --check-prefixes=X64,BMI-X64
8 ; ptr - (ptr & (alignment-1))
10 ; ptr & (0 - alignment)
12 ; This needs to be a backend-level fold because only by now pointers
13 ; are just registers; in middle-end IR this can only be done via @llvm.ptrmask()
14 ; intrinsic which is not sufficiently widely-spread yet.
16 ; https://bugs.llvm.org/show_bug.cgi?id=44448
18 ; The basic positive tests
20 define i32 @t0_32(i32 %ptr, i32 %alignment) nounwind {
23 ; X86-NEXT: xorl %eax, %eax
24 ; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
25 ; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
30 ; X64-NEXT: movl %esi, %eax
32 ; X64-NEXT: andl %edi, %eax
34 %mask = add i32 %alignment, -1
35 %bias = and i32 %ptr, %mask
36 %r = sub i32 %ptr, %bias
39 define i64 @t1_64(i64 %ptr, i64 %alignment) nounwind {
42 ; X86-NEXT: xorl %edx, %edx
43 ; X86-NEXT: xorl %eax, %eax
44 ; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
45 ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx
46 ; X86-NEXT: andl {{[0-9]+}}(%esp), %edx
47 ; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
52 ; X64-NEXT: movq %rsi, %rax
54 ; X64-NEXT: andq %rdi, %rax
56 %mask = add i64 %alignment, -1
57 %bias = and i64 %ptr, %mask
58 %r = sub i64 %ptr, %bias
62 define i32 @t2_commutative(i32 %ptr, i32 %alignment) nounwind {
63 ; X86-LABEL: t2_commutative:
65 ; X86-NEXT: xorl %eax, %eax
66 ; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
67 ; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
70 ; X64-LABEL: t2_commutative:
72 ; X64-NEXT: movl %esi, %eax
74 ; X64-NEXT: andl %edi, %eax
76 %mask = add i32 %alignment, -1
77 %bias = and i32 %mask, %ptr ; swapped
78 %r = sub i32 %ptr, %bias
84 define i32 @t3_extrause0(i32 %ptr, i32 %alignment, ptr %mask_storage) nounwind {
85 ; X86-LABEL: t3_extrause0:
87 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
88 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
89 ; X86-NEXT: leal -1(%eax), %edx
90 ; X86-NEXT: movl %edx, (%ecx)
92 ; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
95 ; X64-LABEL: t3_extrause0:
97 ; X64-NEXT: movl %esi, %eax
98 ; X64-NEXT: leal -1(%rax), %ecx
99 ; X64-NEXT: movl %ecx, (%rdx)
100 ; X64-NEXT: negl %eax
101 ; X64-NEXT: andl %edi, %eax
102 ; X64-NEXT: # kill: def $eax killed $eax killed $rax
104 %mask = add i32 %alignment, -1
105 store i32 %mask, ptr %mask_storage
106 %bias = and i32 %ptr, %mask
107 %r = sub i32 %ptr, %bias
110 define i32 @n4_extrause1(i32 %ptr, i32 %alignment, ptr %bias_storage) nounwind {
111 ; X86-LABEL: n4_extrause1:
113 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
114 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
115 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
116 ; X86-NEXT: decl %edx
117 ; X86-NEXT: andl %eax, %edx
118 ; X86-NEXT: movl %edx, (%ecx)
119 ; X86-NEXT: subl %edx, %eax
122 ; X64-LABEL: n4_extrause1:
124 ; X64-NEXT: movl %edi, %eax
125 ; X64-NEXT: decl %esi
126 ; X64-NEXT: andl %edi, %esi
127 ; X64-NEXT: movl %esi, (%rdx)
128 ; X64-NEXT: subl %esi, %eax
130 %mask = add i32 %alignment, -1
131 %bias = and i32 %ptr, %mask ; has extra uses, can't fold
132 store i32 %bias, ptr %bias_storage
133 %r = sub i32 %ptr, %bias
136 define i32 @n5_extrause2(i32 %ptr, i32 %alignment, ptr %mask_storage, ptr %bias_storage) nounwind {
137 ; X86-LABEL: n5_extrause2:
139 ; X86-NEXT: pushl %esi
140 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
141 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
142 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
143 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
144 ; X86-NEXT: decl %esi
145 ; X86-NEXT: movl %esi, (%edx)
146 ; X86-NEXT: andl %eax, %esi
147 ; X86-NEXT: movl %esi, (%ecx)
148 ; X86-NEXT: subl %esi, %eax
149 ; X86-NEXT: popl %esi
152 ; X64-LABEL: n5_extrause2:
154 ; X64-NEXT: movl %edi, %eax
155 ; X64-NEXT: decl %esi
156 ; X64-NEXT: movl %esi, (%rdx)
157 ; X64-NEXT: andl %edi, %esi
158 ; X64-NEXT: movl %esi, (%rcx)
159 ; X64-NEXT: subl %esi, %eax
161 %mask = add i32 %alignment, -1
162 store i32 %mask, ptr %mask_storage
163 %bias = and i32 %ptr, %mask ; has extra uses, can't fold
164 store i32 %bias, ptr %bias_storage
165 %r = sub i32 %ptr, %bias
171 define i32 @n6_different_ptrs(i32 %ptr0, i32 %ptr1, i32 %alignment) nounwind {
172 ; X86-LABEL: n6_different_ptrs:
174 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
175 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
176 ; X86-NEXT: decl %ecx
177 ; X86-NEXT: andl {{[0-9]+}}(%esp), %ecx
178 ; X86-NEXT: subl %ecx, %eax
181 ; X64-LABEL: n6_different_ptrs:
183 ; X64-NEXT: movl %edi, %eax
184 ; X64-NEXT: decl %edx
185 ; X64-NEXT: andl %esi, %edx
186 ; X64-NEXT: subl %edx, %eax
188 %mask = add i32 %alignment, -1
189 %bias = and i32 %ptr1, %mask ; not %ptr0
190 %r = sub i32 %ptr0, %bias ; not %ptr1
193 define i32 @n7_different_ptrs_commutative(i32 %ptr0, i32 %ptr1, i32 %alignment) nounwind {
194 ; X86-LABEL: n7_different_ptrs_commutative:
196 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
197 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
198 ; X86-NEXT: decl %ecx
199 ; X86-NEXT: andl {{[0-9]+}}(%esp), %ecx
200 ; X86-NEXT: subl %ecx, %eax
203 ; X64-LABEL: n7_different_ptrs_commutative:
205 ; X64-NEXT: movl %edi, %eax
206 ; X64-NEXT: decl %edx
207 ; X64-NEXT: andl %esi, %edx
208 ; X64-NEXT: subl %edx, %eax
210 %mask = add i32 %alignment, -1
211 %bias = and i32 %mask, %ptr1 ; swapped, not %ptr0
212 %r = sub i32 %ptr0, %bias ; not %ptr1
216 define i32 @n8_not_lowbit_mask(i32 %ptr, i32 %alignment) nounwind {
217 ; NOBMI-X86-LABEL: n8_not_lowbit_mask:
218 ; NOBMI-X86: # %bb.0:
219 ; NOBMI-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
220 ; NOBMI-X86-NEXT: incl %eax
221 ; NOBMI-X86-NEXT: notl %eax
222 ; NOBMI-X86-NEXT: andl {{[0-9]+}}(%esp), %eax
223 ; NOBMI-X86-NEXT: retl
225 ; BMI-X86-LABEL: n8_not_lowbit_mask:
227 ; BMI-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
228 ; BMI-X86-NEXT: incl %eax
229 ; BMI-X86-NEXT: andnl {{[0-9]+}}(%esp), %eax, %eax
232 ; NOBMI-X64-LABEL: n8_not_lowbit_mask:
233 ; NOBMI-X64: # %bb.0:
234 ; NOBMI-X64-NEXT: # kill: def $esi killed $esi def $rsi
235 ; NOBMI-X64-NEXT: leal 1(%rsi), %eax
236 ; NOBMI-X64-NEXT: notl %eax
237 ; NOBMI-X64-NEXT: andl %edi, %eax
238 ; NOBMI-X64-NEXT: retq
240 ; BMI-X64-LABEL: n8_not_lowbit_mask:
242 ; BMI-X64-NEXT: incl %esi
243 ; BMI-X64-NEXT: andnl %edi, %esi, %eax
245 %mask = add i32 %alignment, 1 ; not -1
246 %bias = and i32 %ptr, %mask
247 %r = sub i32 %ptr, %bias
251 define i32 @n9_sub_is_not_commutative(i32 %ptr, i32 %alignment) nounwind {
252 ; X86-LABEL: n9_sub_is_not_commutative:
254 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
255 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
256 ; X86-NEXT: decl %eax
257 ; X86-NEXT: andl %ecx, %eax
258 ; X86-NEXT: subl %ecx, %eax
261 ; X64-LABEL: n9_sub_is_not_commutative:
263 ; X64-NEXT: # kill: def $esi killed $esi def $rsi
264 ; X64-NEXT: leal -1(%rsi), %eax
265 ; X64-NEXT: andl %edi, %eax
266 ; X64-NEXT: subl %edi, %eax
268 %mask = add i32 %alignment, -1
269 %bias = and i32 %ptr, %mask
270 %r = sub i32 %bias, %ptr ; wrong order