1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt %s -instcombine -S | FileCheck %s
4 ; Here we subtract two values, check that subtraction did not overflow AND
5 ; that the result is non-zero. This can be simplified just to a comparison
6 ; between the base and offset.
9 declare void @use64(i64)
10 declare void @use1(i1)
12 declare {i8, i1} @llvm.usub.with.overflow(i8, i8)
13 declare void @useagg({i8, i1})
15 declare void @llvm.assume(i1)
17 ; There is a number of base patterns..
19 define i1 @t0_noncanonical_ignoreme(i8 %base, i8 %offset) {
20 ; CHECK-LABEL: @t0_noncanonical_ignoreme(
21 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
22 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
23 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
24 ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
25 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
26 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
27 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
28 ; CHECK-NEXT: ret i1 [[R]]
30 %adjusted = sub i8 %base, %offset
31 call void @use8(i8 %adjusted)
32 %no_underflow = icmp ule i8 %adjusted, %base
33 call void @use1(i1 %no_underflow)
34 %not_null = icmp ne i8 %adjusted, 0
35 call void @use1(i1 %not_null)
36 %r = and i1 %not_null, %no_underflow
40 define i1 @t1(i8 %base, i8 %offset) {
42 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
43 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
44 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
45 ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
46 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
47 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
48 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
49 ; CHECK-NEXT: ret i1 [[R]]
51 %adjusted = sub i8 %base, %offset
52 call void @use8(i8 %adjusted)
53 %no_underflow = icmp uge i8 %base, %offset
54 call void @use1(i1 %no_underflow)
55 %not_null = icmp ne i8 %adjusted, 0
56 call void @use1(i1 %not_null)
57 %r = and i1 %not_null, %no_underflow
60 define i1 @t1_strict(i8 %base, i8 %offset) {
61 ; CHECK-LABEL: @t1_strict(
62 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
63 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
64 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i8 [[BASE]], [[OFFSET]]
65 ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
66 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
67 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
68 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
69 ; CHECK-NEXT: ret i1 [[R]]
71 %adjusted = sub i8 %base, %offset
72 call void @use8(i8 %adjusted)
73 %no_underflow = icmp ugt i8 %base, %offset ; same is valid for strict predicate
74 call void @use1(i1 %no_underflow)
75 %not_null = icmp ne i8 %adjusted, 0
76 call void @use1(i1 %not_null)
77 %r = and i1 %not_null, %no_underflow
81 define i1 @t2(i8 %base, i8 %offset) {
83 ; CHECK-NEXT: [[AGG:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[BASE:%.*]], i8 [[OFFSET:%.*]])
84 ; CHECK-NEXT: call void @useagg({ i8, i1 } [[AGG]])
85 ; CHECK-NEXT: [[ADJUSTED:%.*]] = extractvalue { i8, i1 } [[AGG]], 0
86 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
87 ; CHECK-NEXT: [[UNDERFLOW:%.*]] = extractvalue { i8, i1 } [[AGG]], 1
88 ; CHECK-NEXT: call void @use1(i1 [[UNDERFLOW]])
89 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = xor i1 [[UNDERFLOW]], true
90 ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
91 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
92 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
93 ; CHECK-NEXT: ret i1 [[R]]
95 %agg = call {i8, i1} @llvm.usub.with.overflow(i8 %base, i8 %offset)
96 call void @useagg({i8, i1} %agg)
97 %adjusted = extractvalue {i8, i1} %agg, 0
98 call void @use8(i8 %adjusted)
99 %underflow = extractvalue {i8, i1} %agg, 1
100 call void @use1(i1 %underflow)
101 %no_underflow = xor i1 %underflow, -1
102 call void @use1(i1 %no_underflow)
103 %not_null = icmp ne i8 %adjusted, 0
104 %r = and i1 %not_null, %no_underflow
110 define i1 @t3_commutability0(i8 %base, i8 %offset) {
111 ; CHECK-LABEL: @t3_commutability0(
112 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
113 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
114 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
115 ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
116 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
117 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
118 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
119 ; CHECK-NEXT: ret i1 [[R]]
121 %adjusted = sub i8 %base, %offset
122 call void @use8(i8 %adjusted)
123 %no_underflow = icmp ule i8 %offset, %base ; swapped
124 call void @use1(i1 %no_underflow)
125 %not_null = icmp ne i8 %adjusted, 0
126 call void @use1(i1 %not_null)
127 %r = and i1 %not_null, %no_underflow
130 define i1 @t4_commutability1(i8 %base, i8 %offset) {
131 ; CHECK-LABEL: @t4_commutability1(
132 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
133 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
134 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
135 ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
136 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
137 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
138 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]]
139 ; CHECK-NEXT: ret i1 [[R]]
141 %adjusted = sub i8 %base, %offset
142 call void @use8(i8 %adjusted)
143 %no_underflow = icmp uge i8 %base, %offset
144 call void @use1(i1 %no_underflow)
145 %not_null = icmp ne i8 %adjusted, 0
146 call void @use1(i1 %not_null)
147 %r = and i1 %no_underflow, %not_null ; swapped
150 define i1 @t5_commutability2(i8 %base, i8 %offset) {
151 ; CHECK-LABEL: @t5_commutability2(
152 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
153 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
154 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
155 ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
156 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
157 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
158 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]]
159 ; CHECK-NEXT: ret i1 [[R]]
161 %adjusted = sub i8 %base, %offset
162 call void @use8(i8 %adjusted)
163 %no_underflow = icmp ule i8 %offset, %base ; swapped
164 call void @use1(i1 %no_underflow)
165 %not_null = icmp ne i8 %adjusted, 0
166 call void @use1(i1 %not_null)
167 %r = and i1 %no_underflow, %not_null ; swapped
171 define i1 @t6_commutability(i8 %base, i8 %offset) {
172 ; CHECK-LABEL: @t6_commutability(
173 ; CHECK-NEXT: [[AGG:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[BASE:%.*]], i8 [[OFFSET:%.*]])
174 ; CHECK-NEXT: call void @useagg({ i8, i1 } [[AGG]])
175 ; CHECK-NEXT: [[ADJUSTED:%.*]] = extractvalue { i8, i1 } [[AGG]], 0
176 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
177 ; CHECK-NEXT: [[UNDERFLOW:%.*]] = extractvalue { i8, i1 } [[AGG]], 1
178 ; CHECK-NEXT: call void @use1(i1 [[UNDERFLOW]])
179 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = xor i1 [[UNDERFLOW]], true
180 ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
181 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
182 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
183 ; CHECK-NEXT: ret i1 [[R]]
185 %agg = call {i8, i1} @llvm.usub.with.overflow(i8 %base, i8 %offset)
186 call void @useagg({i8, i1} %agg)
187 %adjusted = extractvalue {i8, i1} %agg, 0
188 call void @use8(i8 %adjusted)
189 %underflow = extractvalue {i8, i1} %agg, 1
190 call void @use1(i1 %underflow)
191 %no_underflow = xor i1 %underflow, -1
192 call void @use1(i1 %no_underflow)
193 %not_null = icmp ne i8 %adjusted, 0
194 %r = and i1 %no_underflow, %not_null ; swapped
198 ; What if we were checking the opposite question, that we either got null,
199 ; or overflow happened?
201 define i1 @t7(i8 %base, i8 %offset) {
203 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
204 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
205 ; CHECK-NEXT: [[UNDERFLOW:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
206 ; CHECK-NEXT: call void @use1(i1 [[UNDERFLOW]])
207 ; CHECK-NEXT: [[NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
208 ; CHECK-NEXT: call void @use1(i1 [[NULL]])
209 ; CHECK-NEXT: [[R:%.*]] = or i1 [[NULL]], [[UNDERFLOW]]
210 ; CHECK-NEXT: ret i1 [[R]]
212 %adjusted = sub i8 %base, %offset
213 call void @use8(i8 %adjusted)
214 %underflow = icmp ult i8 %base, %offset
215 call void @use1(i1 %underflow)
216 %null = icmp eq i8 %adjusted, 0
217 call void @use1(i1 %null)
218 %r = or i1 %null, %underflow
221 define i1 @t7_nonstrict(i8 %base, i8 %offset) {
222 ; CHECK-LABEL: @t7_nonstrict(
223 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
224 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
225 ; CHECK-NEXT: [[UNDERFLOW:%.*]] = icmp ule i8 [[BASE]], [[OFFSET]]
226 ; CHECK-NEXT: call void @use1(i1 [[UNDERFLOW]])
227 ; CHECK-NEXT: [[NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
228 ; CHECK-NEXT: call void @use1(i1 [[NULL]])
229 ; CHECK-NEXT: [[R:%.*]] = or i1 [[NULL]], [[UNDERFLOW]]
230 ; CHECK-NEXT: ret i1 [[R]]
232 %adjusted = sub i8 %base, %offset
233 call void @use8(i8 %adjusted)
234 %underflow = icmp ule i8 %base, %offset ; same is valid for non-strict predicate
235 call void @use1(i1 %underflow)
236 %null = icmp eq i8 %adjusted, 0
237 call void @use1(i1 %null)
238 %r = or i1 %null, %underflow
242 define i1 @t8(i8 %base, i8 %offset) {
244 ; CHECK-NEXT: [[AGG:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[BASE:%.*]], i8 [[OFFSET:%.*]])
245 ; CHECK-NEXT: call void @useagg({ i8, i1 } [[AGG]])
246 ; CHECK-NEXT: [[ADJUSTED:%.*]] = extractvalue { i8, i1 } [[AGG]], 0
247 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
248 ; CHECK-NEXT: [[UNDERFLOW:%.*]] = extractvalue { i8, i1 } [[AGG]], 1
249 ; CHECK-NEXT: call void @use1(i1 [[UNDERFLOW]])
250 ; CHECK-NEXT: [[NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
251 ; CHECK-NEXT: [[R:%.*]] = or i1 [[NULL]], [[UNDERFLOW]]
252 ; CHECK-NEXT: ret i1 [[R]]
254 %agg = call {i8, i1} @llvm.usub.with.overflow(i8 %base, i8 %offset)
255 call void @useagg({i8, i1} %agg)
256 %adjusted = extractvalue {i8, i1} %agg, 0
257 call void @use8(i8 %adjusted)
258 %underflow = extractvalue {i8, i1} %agg, 1
259 call void @use1(i1 %underflow)
260 %null = icmp eq i8 %adjusted, 0
261 %r = or i1 %null, %underflow
265 ; And these patterns also have commutative variants
267 define i1 @t9_commutative(i8 %base, i8 %offset) {
268 ; CHECK-LABEL: @t9_commutative(
269 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
270 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
271 ; CHECK-NEXT: [[UNDERFLOW:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
272 ; CHECK-NEXT: call void @use1(i1 [[UNDERFLOW]])
273 ; CHECK-NEXT: [[NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
274 ; CHECK-NEXT: call void @use1(i1 [[NULL]])
275 ; CHECK-NEXT: [[R:%.*]] = or i1 [[NULL]], [[UNDERFLOW]]
276 ; CHECK-NEXT: ret i1 [[R]]
278 %adjusted = sub i8 %base, %offset
279 call void @use8(i8 %adjusted)
280 %underflow = icmp ult i8 %base, %adjusted ; swapped
281 call void @use1(i1 %underflow)
282 %null = icmp eq i8 %adjusted, 0
283 call void @use1(i1 %null)
284 %r = or i1 %null, %underflow
288 ;-------------------------------------------------------------------------------
290 define i1 @t10(i64 %base, i64* nonnull %offsetptr) {
292 ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
293 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
294 ; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
295 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE]]
296 ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
297 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[ADJUSTED]], 0
298 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
299 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
300 ; CHECK-NEXT: ret i1 [[R]]
302 %offset = ptrtoint i64* %offsetptr to i64
304 %adjusted = sub i64 %base, %offset
305 call void @use64(i64 %adjusted)
306 %no_underflow = icmp ult i64 %adjusted, %base
307 call void @use1(i1 %no_underflow)
308 %not_null = icmp ne i64 %adjusted, 0
309 call void @use1(i1 %not_null)
310 %r = and i1 %not_null, %no_underflow
313 define i1 @t11_commutative(i64 %base, i64* nonnull %offsetptr) {
314 ; CHECK-LABEL: @t11_commutative(
315 ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
316 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
317 ; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
318 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE]]
319 ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
320 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[ADJUSTED]], 0
321 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
322 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
323 ; CHECK-NEXT: ret i1 [[R]]
325 %offset = ptrtoint i64* %offsetptr to i64
327 %adjusted = sub i64 %base, %offset
328 call void @use64(i64 %adjusted)
329 %no_underflow = icmp ugt i64 %base, %adjusted ; swapped
330 call void @use1(i1 %no_underflow)
331 %not_null = icmp ne i64 %adjusted, 0
332 call void @use1(i1 %not_null)
333 %r = and i1 %not_null, %no_underflow
337 define i1 @t12(i64 %base, i64* nonnull %offsetptr) {
339 ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
340 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
341 ; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
342 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE]]
343 ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
344 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[ADJUSTED]], 0
345 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
346 ; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
347 ; CHECK-NEXT: ret i1 [[R]]
349 %offset = ptrtoint i64* %offsetptr to i64
351 %adjusted = sub i64 %base, %offset
352 call void @use64(i64 %adjusted)
353 %no_underflow = icmp uge i64 %adjusted, %base
354 call void @use1(i1 %no_underflow)
355 %not_null = icmp eq i64 %adjusted, 0
356 call void @use1(i1 %not_null)
357 %r = or i1 %not_null, %no_underflow
360 define i1 @t13(i64 %base, i64* nonnull %offsetptr) {
362 ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
363 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
364 ; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
365 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE]]
366 ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
367 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[ADJUSTED]], 0
368 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
369 ; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
370 ; CHECK-NEXT: ret i1 [[R]]
372 %offset = ptrtoint i64* %offsetptr to i64
374 %adjusted = sub i64 %base, %offset
375 call void @use64(i64 %adjusted)
376 %no_underflow = icmp ule i64 %base, %adjusted ; swapped
377 call void @use1(i1 %no_underflow)
378 %not_null = icmp eq i64 %adjusted, 0
379 call void @use1(i1 %not_null)
380 %r = or i1 %not_null, %no_underflow
384 define i1 @t14_bad(i64 %base, i64 %offset) {
385 ; CHECK-LABEL: @t14_bad(
386 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET:%.*]]
387 ; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
388 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE]]
389 ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
390 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[ADJUSTED]], 0
391 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
392 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
393 ; CHECK-NEXT: ret i1 [[R]]
395 %adjusted = sub i64 %base, %offset
396 call void @use64(i64 %adjusted)
397 %no_underflow = icmp ult i64 %adjusted, %base
398 call void @use1(i1 %no_underflow)
399 %not_null = icmp ne i64 %adjusted, 0
400 call void @use1(i1 %not_null)
401 %r = and i1 %not_null, %no_underflow