1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S -instcombine-infinite-loop-threshold=2 | FileCheck --check-prefixes=CHECK,DEFAULT %s
3 ; RUN: opt < %s -instcombine --enable-knowledge-retention -S -instcombine-infinite-loop-threshold=2 | FileCheck --check-prefixes=CHECK,BUNDLES %s
5 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
6 target triple = "x86_64-unknown-linux-gnu"
8 declare void @llvm.assume(i1) #1
10 ; Check that the alignment has been upgraded and that the assume has not
13 define i32 @foo1(i32* %a) #0 {
14 ; DEFAULT-LABEL: @foo1(
15 ; DEFAULT-NEXT: [[T0:%.*]] = load i32, i32* [[A:%.*]], align 32
16 ; DEFAULT-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
17 ; DEFAULT-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
18 ; DEFAULT-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
19 ; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
20 ; DEFAULT-NEXT: ret i32 [[T0]]
22 ; BUNDLES-LABEL: @foo1(
23 ; BUNDLES-NEXT: [[T0:%.*]] = load i32, i32* [[A:%.*]], align 32
24 ; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
25 ; BUNDLES-NEXT: ret i32 [[T0]]
27 %t0 = load i32, i32* %a, align 4
28 %ptrint = ptrtoint i32* %a to i64
29 %maskedptr = and i64 %ptrint, 31
30 %maskcond = icmp eq i64 %maskedptr, 0
31 tail call void @llvm.assume(i1 %maskcond)
35 ; Same check as in @foo1, but make sure it works if the assume is first too.
37 define i32 @foo2(i32* %a) #0 {
38 ; DEFAULT-LABEL: @foo2(
39 ; DEFAULT-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64
40 ; DEFAULT-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
41 ; DEFAULT-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
42 ; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
43 ; DEFAULT-NEXT: [[T0:%.*]] = load i32, i32* [[A]], align 32
44 ; DEFAULT-NEXT: ret i32 [[T0]]
46 ; BUNDLES-LABEL: @foo2(
47 ; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A:%.*]], i64 32) ]
48 ; BUNDLES-NEXT: [[T0:%.*]] = load i32, i32* [[A]], align 32
49 ; BUNDLES-NEXT: ret i32 [[T0]]
51 %ptrint = ptrtoint i32* %a to i64
52 %maskedptr = and i64 %ptrint, 31
53 %maskcond = icmp eq i64 %maskedptr, 0
54 tail call void @llvm.assume(i1 %maskcond)
55 %t0 = load i32, i32* %a, align 4
59 define i32 @simple(i32 %a) #1 {
60 ; CHECK-LABEL: @simple(
61 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], 4
62 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
63 ; CHECK-NEXT: ret i32 4
65 %cmp = icmp eq i32 %a, 4
66 tail call void @llvm.assume(i1 %cmp)
70 define i32 @can1(i1 %a, i1 %b, i1 %c) {
72 ; CHECK-NEXT: call void @llvm.assume(i1 [[A:%.*]])
73 ; CHECK-NEXT: call void @llvm.assume(i1 [[B:%.*]])
74 ; CHECK-NEXT: call void @llvm.assume(i1 [[C:%.*]])
75 ; CHECK-NEXT: ret i32 5
78 %and = and i1 %and1, %c
79 tail call void @llvm.assume(i1 %and)
83 define i32 @can1_logical(i1 %a, i1 %b, i1 %c) {
84 ; CHECK-LABEL: @can1_logical(
85 ; CHECK-NEXT: call void @llvm.assume(i1 [[A:%.*]])
86 ; CHECK-NEXT: call void @llvm.assume(i1 [[B:%.*]])
87 ; CHECK-NEXT: call void @llvm.assume(i1 [[C:%.*]])
88 ; CHECK-NEXT: ret i32 5
90 %and1 = select i1 %a, i1 %b, i1 false
91 %and = select i1 %and1, i1 %c, i1 false
92 tail call void @llvm.assume(i1 %and)
96 define i32 @can2(i1 %a, i1 %b, i1 %c) {
98 ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true
99 ; CHECK-NEXT: call void @llvm.assume(i1 [[TMP1]])
100 ; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[B:%.*]], true
101 ; CHECK-NEXT: call void @llvm.assume(i1 [[TMP2]])
102 ; CHECK-NEXT: ret i32 5
106 tail call void @llvm.assume(i1 %w)
110 define i32 @can2_logical(i1 %a, i1 %b, i1 %c) {
111 ; CHECK-LABEL: @can2_logical(
112 ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true
113 ; CHECK-NEXT: call void @llvm.assume(i1 [[TMP1]])
114 ; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[B:%.*]], true
115 ; CHECK-NEXT: call void @llvm.assume(i1 [[TMP2]])
116 ; CHECK-NEXT: ret i32 5
118 %v = select i1 %a, i1 true, i1 %b
120 tail call void @llvm.assume(i1 %w)
124 define i32 @bar1(i32 %a) #0 {
125 ; CHECK-LABEL: @bar1(
126 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], 7
127 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 1
128 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
129 ; CHECK-NEXT: ret i32 1
131 %and1 = and i32 %a, 3
133 %cmp = icmp eq i32 %and, 1
134 tail call void @llvm.assume(i1 %cmp)
138 define i32 @bar2(i32 %a) #0 {
139 ; CHECK-LABEL: @bar2(
140 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], 7
141 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 1
142 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
143 ; CHECK-NEXT: ret i32 1
146 %cmp = icmp eq i32 %and, 1
147 tail call void @llvm.assume(i1 %cmp)
148 %and1 = and i32 %a, 3
152 define i32 @bar3(i32 %a, i1 %x, i1 %y) #0 {
153 ; CHECK-LABEL: @bar3(
155 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[X:%.*]])
156 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], 7
157 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 1
158 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
159 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[Y:%.*]])
160 ; CHECK-NEXT: ret i32 1
163 %and1 = and i32 %a, 3
165 ; Don't be fooled by other assumes around.
167 tail call void @llvm.assume(i1 %x)
170 %cmp = icmp eq i32 %and, 1
171 tail call void @llvm.assume(i1 %cmp)
173 tail call void @llvm.assume(i1 %y)
178 ; If we allow recursive known bits queries based on
179 ; assumptions, we could do better here:
180 ; a == b and a & 7 == 1, so b & 7 == 1, so b & 3 == 1, so return 1.
182 define i32 @known_bits_recursion_via_assumes(i32 %a, i32 %b) {
183 ; CHECK-LABEL: @known_bits_recursion_via_assumes(
185 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B:%.*]], 3
186 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], 7
187 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 1
188 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
189 ; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[A]], [[B]]
190 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP2]])
191 ; CHECK-NEXT: ret i32 [[AND1]]
194 %and1 = and i32 %b, 3
196 %cmp = icmp eq i32 %and, 1
197 tail call void @llvm.assume(i1 %cmp)
198 %cmp2 = icmp eq i32 %a, %b
199 tail call void @llvm.assume(i1 %cmp2)
203 define i32 @icmp1(i32 %a) #0 {
204 ; CHECK-LABEL: @icmp1(
205 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5
206 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
207 ; CHECK-NEXT: ret i32 1
209 %cmp = icmp sgt i32 %a, 5
210 tail call void @llvm.assume(i1 %cmp)
211 %conv = zext i1 %cmp to i32
215 define i32 @icmp2(i32 %a) #0 {
216 ; CHECK-LABEL: @icmp2(
217 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5
218 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
219 ; CHECK-NEXT: ret i32 0
221 %cmp = icmp sgt i32 %a, 5
222 tail call void @llvm.assume(i1 %cmp)
223 %t0 = zext i1 %cmp to i32
224 %lnot.ext = xor i32 %t0, 1
228 ; If the 'not' of a condition is known true, then the condition must be false.
230 define i1 @assume_not(i1 %cond) {
231 ; CHECK-LABEL: @assume_not(
232 ; CHECK-NEXT: [[NOTCOND:%.*]] = xor i1 [[COND:%.*]], true
233 ; CHECK-NEXT: call void @llvm.assume(i1 [[NOTCOND]])
234 ; CHECK-NEXT: ret i1 false
236 %notcond = xor i1 %cond, true
237 call void @llvm.assume(i1 %notcond)
241 declare void @escape(i32* %a)
243 ; Canonicalize a nonnull assumption on a load into metadata form.
245 define i32 @bundle1(i32* %P) {
246 ; CHECK-LABEL: @bundle1(
247 ; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P:%.*]]) ]
248 ; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[P]], align 4
249 ; CHECK-NEXT: ret i32 [[LOAD]]
251 tail call void @llvm.assume(i1 true) ["nonnull"(i32* %P)]
252 %load = load i32, i32* %P
256 define i32 @bundle2(i32* %P) {
257 ; CHECK-LABEL: @bundle2(
258 ; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[P:%.*]], align 4
259 ; CHECK-NEXT: ret i32 [[LOAD]]
261 tail call void @llvm.assume(i1 true) ["ignore"(i32* undef)]
262 %load = load i32, i32* %P
266 define i1 @nonnull1(i32** %a) {
267 ; CHECK-LABEL: @nonnull1(
268 ; CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8, !nonnull !6
269 ; CHECK-NEXT: tail call void @escape(i32* nonnull [[LOAD]])
270 ; CHECK-NEXT: ret i1 false
272 %load = load i32*, i32** %a
273 %cmp = icmp ne i32* %load, null
274 tail call void @llvm.assume(i1 %cmp)
275 tail call void @escape(i32* %load)
276 %rval = icmp eq i32* %load, null
280 ; Make sure the above canonicalization applies only
281 ; to pointer types. Doing otherwise would be illegal.
283 define i1 @nonnull2(i32* %a) {
284 ; CHECK-LABEL: @nonnull2(
285 ; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A:%.*]], align 4
286 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LOAD]], 0
287 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
288 ; CHECK-NEXT: ret i1 false
290 %load = load i32, i32* %a
291 %cmp = icmp ne i32 %load, 0
292 tail call void @llvm.assume(i1 %cmp)
293 %rval = icmp eq i32 %load, 0
297 ; Make sure the above canonicalization does not trigger
298 ; if the assume is control dependent on something else
300 define i1 @nonnull3(i32** %a, i1 %control) {
301 ; FIXME: in the BUNDLES version we could duplicate the load and keep the assume nonnull.
302 ; DEFAULT-LABEL: @nonnull3(
303 ; DEFAULT-NEXT: entry:
304 ; DEFAULT-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
305 ; DEFAULT-NEXT: [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
306 ; DEFAULT-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
308 ; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[CMP]])
309 ; DEFAULT-NEXT: ret i1 false
310 ; DEFAULT: not_taken:
311 ; DEFAULT-NEXT: [[RVAL_2:%.*]] = icmp sgt i32* [[LOAD]], null
312 ; DEFAULT-NEXT: ret i1 [[RVAL_2]]
314 ; BUNDLES-LABEL: @nonnull3(
315 ; BUNDLES-NEXT: entry:
316 ; BUNDLES-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
318 ; BUNDLES-NEXT: ret i1 false
319 ; BUNDLES: not_taken:
320 ; BUNDLES-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
321 ; BUNDLES-NEXT: [[RVAL_2:%.*]] = icmp sgt i32* [[LOAD]], null
322 ; BUNDLES-NEXT: ret i1 [[RVAL_2]]
325 %load = load i32*, i32** %a
326 %cmp = icmp ne i32* %load, null
327 br i1 %control, label %taken, label %not_taken
329 tail call void @llvm.assume(i1 %cmp)
330 %rval = icmp eq i32* %load, null
333 %rval.2 = icmp sgt i32* %load, null
337 ; Make sure the above canonicalization does not trigger
338 ; if the path from the load to the assume is potentially
339 ; interrupted by an exception being thrown
341 define i1 @nonnull4(i32** %a) {
342 ; DEFAULT-LABEL: @nonnull4(
343 ; DEFAULT-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
344 ; DEFAULT-NEXT: tail call void @escape(i32* [[LOAD]])
345 ; DEFAULT-NEXT: [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
346 ; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[CMP]])
347 ; DEFAULT-NEXT: ret i1 false
349 ; BUNDLES-LABEL: @nonnull4(
350 ; BUNDLES-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
351 ; BUNDLES-NEXT: tail call void @escape(i32* [[LOAD]])
352 ; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[LOAD]]) ]
353 ; BUNDLES-NEXT: ret i1 false
355 %load = load i32*, i32** %a
356 ;; This call may throw!
357 tail call void @escape(i32* %load)
358 %cmp = icmp ne i32* %load, null
359 tail call void @llvm.assume(i1 %cmp)
360 %rval = icmp eq i32* %load, null
363 define i1 @nonnull5(i32** %a) {
364 ; CHECK-LABEL: @nonnull5(
365 ; CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
366 ; CHECK-NEXT: tail call void @escape(i32* [[LOAD]])
367 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32* [[LOAD]], null
368 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
369 ; CHECK-NEXT: ret i1 false
371 %load = load i32*, i32** %a
372 ;; This call may throw!
373 tail call void @escape(i32* %load)
374 %integral = ptrtoint i32* %load to i64
375 %cmp = icmp slt i64 %integral, 0
376 tail call void @llvm.assume(i1 %cmp) ; %load has at least highest bit set
377 %rval = icmp eq i32* %load, null
381 ; PR35846 - https://bugs.llvm.org/show_bug.cgi?id=35846
383 define i32 @assumption_conflicts_with_known_bits(i32 %a, i32 %b) {
384 ; CHECK-LABEL: @assumption_conflicts_with_known_bits(
385 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B:%.*]], 3
386 ; CHECK-NEXT: tail call void @llvm.assume(i1 false)
387 ; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[AND1]], 0
388 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP2]])
389 ; CHECK-NEXT: ret i32 0
391 %and1 = and i32 %b, 3
392 %B1 = lshr i32 %and1, %and1
393 %B3 = shl nuw nsw i32 %and1, %B1
394 %cmp = icmp eq i32 %B3, 1
395 tail call void @llvm.assume(i1 %cmp)
396 %cmp2 = icmp eq i32 %B1, %B3
397 tail call void @llvm.assume(i1 %cmp2)
401 ; PR37726 - https://bugs.llvm.org/show_bug.cgi?id=37726
402 ; There's a loophole in eliminating a redundant assumption when
403 ; we have conflicting assumptions. Verify that debuginfo doesn't
404 ; get in the way of the fold.
406 define void @debug_interference(i8 %x) {
407 ; CHECK-LABEL: @debug_interference(
408 ; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[X:%.*]], 0
409 ; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 5, metadata [[META7:![0-9]+]], metadata !DIExpression()), !dbg [[DBG9:![0-9]+]]
410 ; CHECK-NEXT: tail call void @llvm.assume(i1 false)
411 ; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 5, metadata [[META7]], metadata !DIExpression()), !dbg [[DBG9]]
412 ; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 5, metadata [[META7]], metadata !DIExpression()), !dbg [[DBG9]]
413 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP2]])
414 ; CHECK-NEXT: ret void
416 %cmp1 = icmp eq i8 %x, 0
417 %cmp2 = icmp ne i8 %x, 0
418 tail call void @llvm.assume(i1 %cmp1)
419 tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9
420 tail call void @llvm.assume(i1 %cmp1)
421 tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9
422 tail call void @llvm.assume(i1 %cmp2)
423 tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9
424 tail call void @llvm.assume(i1 %cmp2)
429 ; Does it ever make sense to peek through a bitcast of the icmp operand?
431 define i32 @PR40940(<4 x i8> %x) {
432 ; CHECK-LABEL: @PR40940(
433 ; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i8> [[X:%.*]], <4 x i8> undef, <4 x i32> <i32 1, i32 1, i32 2, i32 3>
434 ; CHECK-NEXT: [[T2:%.*]] = bitcast <4 x i8> [[SHUF]] to i32
435 ; CHECK-NEXT: [[T3:%.*]] = icmp ult i32 [[T2]], 65536
436 ; CHECK-NEXT: call void @llvm.assume(i1 [[T3]])
437 ; CHECK-NEXT: ret i32 [[T2]]
439 %shuf = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 1, i32 1, i32 2, i32 3>
440 %t2 = bitcast <4 x i8> %shuf to i32
441 %t3 = icmp ult i32 %t2, 65536
442 call void @llvm.assume(i1 %t3)
446 define i1 @nonnull3A(i32** %a, i1 %control) {
447 ; DEFAULT-LABEL: @nonnull3A(
448 ; DEFAULT-NEXT: entry:
449 ; DEFAULT-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
450 ; DEFAULT-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
452 ; DEFAULT-NEXT: [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
453 ; DEFAULT-NEXT: call void @llvm.assume(i1 [[CMP]])
454 ; DEFAULT-NEXT: ret i1 true
455 ; DEFAULT: not_taken:
456 ; DEFAULT-NEXT: [[RVAL_2:%.*]] = icmp sgt i32* [[LOAD]], null
457 ; DEFAULT-NEXT: ret i1 [[RVAL_2]]
459 ; BUNDLES-LABEL: @nonnull3A(
460 ; BUNDLES-NEXT: entry:
461 ; BUNDLES-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
463 ; BUNDLES-NEXT: ret i1 true
464 ; BUNDLES: not_taken:
465 ; BUNDLES-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
466 ; BUNDLES-NEXT: [[RVAL_2:%.*]] = icmp sgt i32* [[LOAD]], null
467 ; BUNDLES-NEXT: ret i1 [[RVAL_2]]
470 %load = load i32*, i32** %a
471 %cmp = icmp ne i32* %load, null
472 br i1 %control, label %taken, label %not_taken
474 call void @llvm.assume(i1 %cmp)
477 call void @llvm.assume(i1 %cmp)
478 %rval.2 = icmp sgt i32* %load, null
482 define i1 @nonnull3B(i32** %a, i1 %control) {
483 ; CHECK-LABEL: @nonnull3B(
485 ; CHECK-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
487 ; CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
488 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
489 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) [ "nonnull"(i32* [[LOAD]]), "nonnull"(i1 [[CMP]]) ]
490 ; CHECK-NEXT: ret i1 true
492 ; CHECK-NEXT: ret i1 [[CONTROL]]
495 %load = load i32*, i32** %a
496 %cmp = icmp ne i32* %load, null
497 br i1 %control, label %taken, label %not_taken
499 call void @llvm.assume(i1 %cmp) ["nonnull"(i32* %load), "nonnull"(i1 %cmp)]
502 call void @llvm.assume(i1 %cmp) ["nonnull"(i32* %load), "nonnull"(i1 %cmp)]
508 define i1 @nonnull3C(i32** %a, i1 %control) {
509 ; CHECK-LABEL: @nonnull3C(
511 ; CHECK-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
513 ; CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
514 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
515 ; CHECK-NEXT: [[CMP2:%.*]] = call i1 @tmp1(i1 [[CMP]])
516 ; CHECK-NEXT: br label [[EXIT:%.*]]
518 ; CHECK-NEXT: ret i1 [[CMP2]]
520 ; CHECK-NEXT: ret i1 [[CONTROL]]
523 %load = load i32*, i32** %a
524 %cmp = icmp ne i32* %load, null
525 br i1 %control, label %taken, label %not_taken
527 %cmp2 = call i1 @tmp1(i1 %cmp)
530 ; FIXME: this shouldn't be dropped because it is still dominated by the new position of %load
531 call void @llvm.assume(i1 %cmp) ["nonnull"(i32* %load), "nonnull"(i1 %cmp)]
534 call void @llvm.assume(i1 %cmp)
538 define i1 @nonnull3D(i32** %a, i1 %control) {
539 ; CHECK-LABEL: @nonnull3D(
541 ; CHECK-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
543 ; CHECK-NEXT: [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
544 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
545 ; CHECK-NEXT: [[CMP2:%.*]] = call i1 @tmp1(i1 [[CMP]])
546 ; CHECK-NEXT: br label [[EXIT:%.*]]
548 ; CHECK-NEXT: ret i1 [[CMP2]]
550 ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "ignore"(i32* undef), "ignore"(i1 undef), "nonnull"(i1 [[CONTROL]]) ]
551 ; CHECK-NEXT: ret i1 [[CONTROL]]
554 %load = load i32*, i32** %a
555 %cmp = icmp ne i32* %load, null
556 br i1 %control, label %taken, label %not_taken
558 %cmp2 = call i1 @tmp1(i1 %cmp)
563 call void @llvm.assume(i1 %cmp) ["nonnull"(i32* %load), "nonnull"(i1 %cmp), "nonnull"(i1 %control)]
568 define void @always_true_assumption() {
569 ; CHECK-LABEL: @always_true_assumption(
570 ; CHECK-NEXT: ret void
572 call void @llvm.assume(i1 true)
576 ; The alloca guarantees that the low bits of %a are zero because of alignment.
577 ; The assume says the opposite. Make sure we don't crash.
579 define i64 @PR31809() {
580 ; CHECK-LABEL: @PR31809(
581 ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
582 ; CHECK-NEXT: [[T1:%.*]] = ptrtoint i32* [[A]] to i64
583 ; CHECK-NEXT: call void @llvm.assume(i1 false)
584 ; CHECK-NEXT: ret i64 [[T1]]
587 %t1 = ptrtoint i32* %a to i64
588 %cond = icmp eq i64 %t1, 3
589 call void @llvm.assume(i1 %cond)
593 ; Similar to above: there's no way to know which assumption is truthful,
594 ; so just don't crash.
596 define i8 @conflicting_assumptions(i8 %x){
597 ; CHECK-LABEL: @conflicting_assumptions(
598 ; CHECK-NEXT: call void @llvm.assume(i1 false)
599 ; CHECK-NEXT: [[COND2:%.*]] = icmp eq i8 [[X:%.*]], 4
600 ; CHECK-NEXT: call void @llvm.assume(i1 [[COND2]])
601 ; CHECK-NEXT: ret i8 5
604 %cond1 = icmp eq i8 %x, 3
605 call void @llvm.assume(i1 %cond1)
606 %cond2 = icmp eq i8 %x, 4
607 call void @llvm.assume(i1 %cond2)
611 ; Another case of conflicting assumptions. This would crash because we'd
612 ; try to set more known bits than existed in the known bits struct.
614 define void @PR36270(i32 %b) {
615 ; CHECK-LABEL: @PR36270(
616 ; CHECK-NEXT: unreachable
618 %B7 = xor i32 -1, 2147483647
619 %and1 = and i32 %b, 3
620 %B12 = lshr i32 %B7, %and1
621 %C1 = icmp ult i32 %and1, %B12
622 tail call void @llvm.assume(i1 %C1)
623 %cmp2 = icmp eq i32 0, %B12
624 tail call void @llvm.assume(i1 %cmp2)
630 define i32 @unreachable_assume(i32 %x, i32 %y) {
631 ; CHECK-LABEL: @unreachable_assume(
633 ; CHECK-NEXT: [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
634 ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
635 ; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP0]], [[CMP1]]
636 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[OR]])
637 ; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[X]], 1
638 ; CHECK-NEXT: br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
640 ; CHECK-NEXT: [[A:%.*]] = and i32 [[Y]], -2
641 ; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i32 [[A]], 104
642 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP3]])
643 ; CHECK-NEXT: br label [[EXIT]]
645 ; CHECK-NEXT: unreachable
648 %cmp0 = icmp sgt i32 %x, 1
649 %cmp1 = icmp eq i32 %y, 1
650 %or = or i1 %cmp0, %cmp1
651 tail call void @llvm.assume(i1 %or)
652 %cmp2 = icmp eq i32 %x, 1
653 br i1 %cmp2, label %if, label %exit
657 %cmp3 = icmp ne i32 %a, 104
658 tail call void @llvm.assume(i1 %cmp3)
662 %cmp4 = icmp eq i32 %x, 2
663 tail call void @llvm.assume(i1 %cmp4)
667 define i32 @unreachable_assume_logical(i32 %x, i32 %y) {
668 ; CHECK-LABEL: @unreachable_assume_logical(
670 ; CHECK-NEXT: [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
671 ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
672 ; CHECK-NEXT: [[OR:%.*]] = select i1 [[CMP0]], i1 true, i1 [[CMP1]]
673 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[OR]])
674 ; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[X]], 1
675 ; CHECK-NEXT: br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
677 ; CHECK-NEXT: [[A:%.*]] = and i32 [[Y]], -2
678 ; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i32 [[A]], 104
679 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP3]])
680 ; CHECK-NEXT: br label [[EXIT]]
682 ; CHECK-NEXT: unreachable
685 %cmp0 = icmp sgt i32 %x, 1
686 %cmp1 = icmp eq i32 %y, 1
687 %or = select i1 %cmp0, i1 true, i1 %cmp1
688 tail call void @llvm.assume(i1 %or)
689 %cmp2 = icmp eq i32 %x, 1
690 br i1 %cmp2, label %if, label %exit
694 %cmp3 = icmp ne i32 %a, 104
695 tail call void @llvm.assume(i1 %cmp3)
699 %cmp4 = icmp eq i32 %x, 2
700 tail call void @llvm.assume(i1 %cmp4)
704 define i32 @unreachable_assumes_and_store(i32 %x, i32 %y, i32* %p) {
705 ; CHECK-LABEL: @unreachable_assumes_and_store(
707 ; CHECK-NEXT: [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
708 ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
709 ; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP0]], [[CMP1]]
710 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[OR]])
711 ; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[X]], 1
712 ; CHECK-NEXT: br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
714 ; CHECK-NEXT: [[A:%.*]] = and i32 [[Y]], -2
715 ; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i32 [[A]], 104
716 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP3]])
717 ; CHECK-NEXT: br label [[EXIT]]
719 ; CHECK-NEXT: unreachable
722 %cmp0 = icmp sgt i32 %x, 1
723 %cmp1 = icmp eq i32 %y, 1
724 %or = or i1 %cmp0, %cmp1
725 tail call void @llvm.assume(i1 %or)
726 %cmp2 = icmp eq i32 %x, 1
727 br i1 %cmp2, label %if, label %exit
731 %cmp3 = icmp ne i32 %a, 104
732 tail call void @llvm.assume(i1 %cmp3)
736 %cmp4 = icmp eq i32 %x, 2
737 tail call void @llvm.assume(i1 %cmp4)
738 %cmp5 = icmp ugt i32 %y, 42
739 tail call void @llvm.assume(i1 %cmp5)
740 store i32 %x, i32* %p
744 define i32 @unreachable_assumes_and_store_logical(i32 %x, i32 %y, i32* %p) {
745 ; CHECK-LABEL: @unreachable_assumes_and_store_logical(
747 ; CHECK-NEXT: [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
748 ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
749 ; CHECK-NEXT: [[OR:%.*]] = select i1 [[CMP0]], i1 true, i1 [[CMP1]]
750 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[OR]])
751 ; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[X]], 1
752 ; CHECK-NEXT: br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
754 ; CHECK-NEXT: [[A:%.*]] = and i32 [[Y]], -2
755 ; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i32 [[A]], 104
756 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP3]])
757 ; CHECK-NEXT: br label [[EXIT]]
759 ; CHECK-NEXT: unreachable
762 %cmp0 = icmp sgt i32 %x, 1
763 %cmp1 = icmp eq i32 %y, 1
764 %or = select i1 %cmp0, i1 true, i1 %cmp1
765 tail call void @llvm.assume(i1 %or)
766 %cmp2 = icmp eq i32 %x, 1
767 br i1 %cmp2, label %if, label %exit
771 %cmp3 = icmp ne i32 %a, 104
772 tail call void @llvm.assume(i1 %cmp3)
776 %cmp4 = icmp eq i32 %x, 2
777 tail call void @llvm.assume(i1 %cmp4)
778 %cmp5 = icmp ugt i32 %y, 42
779 tail call void @llvm.assume(i1 %cmp5)
780 store i32 %x, i32* %p
784 define void @canonicalize_assume(i32* %0) {
785 ; DEFAULT-LABEL: @canonicalize_assume(
786 ; DEFAULT-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 2
787 ; DEFAULT-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to i8*
788 ; DEFAULT-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP3]], i64 16) ]
789 ; DEFAULT-NEXT: ret void
791 ; BUNDLES-LABEL: @canonicalize_assume(
792 ; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP0:%.*]], i64 8) ]
793 ; BUNDLES-NEXT: ret void
795 %2 = getelementptr inbounds i32, i32* %0, i64 2
796 %3 = bitcast i32* %2 to i8*
797 call void @llvm.assume(i1 true) [ "align"(i8* %3, i64 16) ]
801 declare void @llvm.dbg.value(metadata, metadata, metadata)
804 !llvm.module.flags = !{!5, !6, !7, !8}
806 !0 = distinct !DICompileUnit(language: DW_LANG_C, file: !3, producer: "Me", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: null, retainedTypes: null, imports: null)
807 !1 = !DILocalVariable(name: "", arg: 1, scope: !2, file: null, line: 1, type: null)
808 !2 = distinct !DISubprogram(name: "debug", linkageName: "debug", scope: null, file: null, line: 0, type: null, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0)
809 !3 = !DIFile(filename: "consecutive-fences.ll", directory: "")
810 !5 = !{i32 2, !"Dwarf Version", i32 4}
811 !6 = !{i32 2, !"Debug Info Version", i32 3}
812 !7 = !{i32 1, !"wchar_size", i32 4}
813 !8 = !{i32 7, !"PIC Level", i32 2}
814 !9 = !DILocation(line: 0, column: 0, scope: !2)
817 attributes #0 = { nounwind uwtable }
818 attributes #1 = { nounwind }