1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -jump-threading -dce -S | FileCheck %s
4 declare void @llvm.experimental.guard(i1, ...)
9 define i32 @branch_implies_guard(i32 %a) {
10 ; CHECK-LABEL: @branch_implies_guard(
11 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i32 [[A:%.*]], 10
12 ; CHECK-NEXT: br i1 [[COND]], label [[T1_SPLIT:%.*]], label [[F1_SPLIT:%.*]]
14 ; CHECK-NEXT: [[V1:%.*]] = call i32 @f1()
15 ; CHECK-NEXT: [[RETVAL3:%.*]] = add i32 [[V1]], 10
16 ; CHECK-NEXT: br label [[MERGE:%.*]]
18 ; CHECK-NEXT: [[V2:%.*]] = call i32 @f2()
19 ; CHECK-NEXT: [[RETVAL1:%.*]] = add i32 [[V2]], 10
20 ; CHECK-NEXT: [[CONDGUARD2:%.*]] = icmp slt i32 [[A]], 20
21 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CONDGUARD2]]) [ "deopt"() ]
22 ; CHECK-NEXT: br label [[MERGE]]
24 ; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[RETVAL3]], [[T1_SPLIT]] ], [ [[RETVAL1]], [[F1_SPLIT]] ]
25 ; CHECK-NEXT: ret i32 [[TMP1]]
27 %cond = icmp slt i32 %a, 10
28 br i1 %cond, label %T1, label %F1
39 %retPhi = phi i32 [ %v1, %T1 ], [ %v2, %F1 ]
40 %retVal = add i32 %retPhi, 10
41 %condGuard = icmp slt i32 %a, 20
42 call void(i1, ...) @llvm.experimental.guard(i1 %condGuard) [ "deopt"() ]
46 define i32 @not_branch_implies_guard(i32 %a) {
47 ; CHECK-LABEL: @not_branch_implies_guard(
48 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i32 [[A:%.*]], 20
49 ; CHECK-NEXT: br i1 [[COND]], label [[T1_SPLIT:%.*]], label [[F1_SPLIT:%.*]]
51 ; CHECK-NEXT: [[V1:%.*]] = call i32 @f1()
52 ; CHECK-NEXT: [[RETVAL1:%.*]] = add i32 [[V1]], 10
53 ; CHECK-NEXT: [[CONDGUARD2:%.*]] = icmp sgt i32 [[A]], 10
54 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CONDGUARD2]]) [ "deopt"() ]
55 ; CHECK-NEXT: br label [[MERGE:%.*]]
57 ; CHECK-NEXT: [[V2:%.*]] = call i32 @f2()
58 ; CHECK-NEXT: [[RETVAL3:%.*]] = add i32 [[V2]], 10
59 ; CHECK-NEXT: br label [[MERGE]]
61 ; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[RETVAL3]], [[F1_SPLIT]] ], [ [[RETVAL1]], [[T1_SPLIT]] ]
62 ; CHECK-NEXT: ret i32 [[TMP1]]
64 %cond = icmp slt i32 %a, 20
65 br i1 %cond, label %T1, label %F1
76 %retPhi = phi i32 [ %v1, %T1 ], [ %v2, %F1 ]
77 %retVal = add i32 %retPhi, 10
78 %condGuard = icmp sgt i32 %a, 10
79 call void(i1, ...) @llvm.experimental.guard(i1 %condGuard) [ "deopt"() ]
83 define i32 @branch_overlaps_guard(i32 %a) {
84 ; CHECK-LABEL: @branch_overlaps_guard(
85 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i32 [[A:%.*]], 20
86 ; CHECK-NEXT: br i1 [[COND]], label [[T1:%.*]], label [[F1:%.*]]
88 ; CHECK-NEXT: [[V1:%.*]] = call i32 @f1()
89 ; CHECK-NEXT: br label [[MERGE:%.*]]
91 ; CHECK-NEXT: [[V2:%.*]] = call i32 @f2()
92 ; CHECK-NEXT: br label [[MERGE]]
94 ; CHECK-NEXT: [[RETPHI:%.*]] = phi i32 [ [[V1]], [[T1]] ], [ [[V2]], [[F1]] ]
95 ; CHECK-NEXT: [[RETVAL:%.*]] = add i32 [[RETPHI]], 10
96 ; CHECK-NEXT: [[CONDGUARD:%.*]] = icmp slt i32 [[A]], 10
97 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CONDGUARD]]) [ "deopt"() ]
98 ; CHECK-NEXT: ret i32 [[RETVAL]]
100 %cond = icmp slt i32 %a, 20
101 br i1 %cond, label %T1, label %F1
112 %retPhi = phi i32 [ %v1, %T1 ], [ %v2, %F1 ]
113 %retVal = add i32 %retPhi, 10
114 %condGuard = icmp slt i32 %a, 10
115 call void(i1, ...) @llvm.experimental.guard(i1 %condGuard) [ "deopt"() ]
119 define i32 @branch_doesnt_overlap_guard(i32 %a) {
120 ; CHECK-LABEL: @branch_doesnt_overlap_guard(
121 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i32 [[A:%.*]], 10
122 ; CHECK-NEXT: br i1 [[COND]], label [[T1:%.*]], label [[F1:%.*]]
124 ; CHECK-NEXT: [[V1:%.*]] = call i32 @f1()
125 ; CHECK-NEXT: br label [[MERGE:%.*]]
127 ; CHECK-NEXT: [[V2:%.*]] = call i32 @f2()
128 ; CHECK-NEXT: br label [[MERGE]]
130 ; CHECK-NEXT: [[RETPHI:%.*]] = phi i32 [ [[V1]], [[T1]] ], [ [[V2]], [[F1]] ]
131 ; CHECK-NEXT: [[RETVAL:%.*]] = add i32 [[RETPHI]], 10
132 ; CHECK-NEXT: [[CONDGUARD:%.*]] = icmp sgt i32 [[A]], 20
133 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CONDGUARD]]) [ "deopt"() ]
134 ; CHECK-NEXT: ret i32 [[RETVAL]]
136 %cond = icmp slt i32 %a, 10
137 br i1 %cond, label %T1, label %F1
148 %retPhi = phi i32 [ %v1, %T1 ], [ %v2, %F1 ]
149 %retVal = add i32 %retPhi, 10
150 %condGuard = icmp sgt i32 %a, 20
151 call void(i1, ...) @llvm.experimental.guard(i1 %condGuard) [ "deopt"() ]
155 define i32 @not_a_diamond1(i32 %a, i1 %cond1) {
156 ; CHECK-LABEL: @not_a_diamond1(
157 ; CHECK-NEXT: br i1 [[COND1:%.*]], label [[PRED:%.*]], label [[EXIT:%.*]]
159 ; CHECK-NEXT: switch i32 [[A:%.*]], label [[EXIT]] [
160 ; CHECK-NEXT: i32 10, label [[MERGE:%.*]]
161 ; CHECK-NEXT: i32 20, label [[MERGE]]
164 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND1]]) [ "deopt"() ]
165 ; CHECK-NEXT: br label [[EXIT]]
167 ; CHECK-NEXT: ret i32 [[A]]
169 br i1 %cond1, label %Pred, label %Exit
172 switch i32 %a, label %Exit [
178 call void(i1, ...) @llvm.experimental.guard(i1 %cond1) [ "deopt"() ]
185 define void @not_a_diamond2(i32 %a, i1 %cond1) {
186 ; CHECK-LABEL: @not_a_diamond2(
188 ; CHECK-NEXT: switch i32 [[A:%.*]], label [[EXIT:%.*]] [
189 ; CHECK-NEXT: i32 10, label [[MERGE:%.*]]
190 ; CHECK-NEXT: i32 20, label [[MERGE]]
193 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND1:%.*]]) [ "deopt"() ]
194 ; CHECK-NEXT: ret void
196 ; CHECK-NEXT: ret void
201 call void(i1, ...) @llvm.experimental.guard(i1 %cond1)[ "deopt"() ]
205 switch i32 %a, label %Exit [
217 declare void @never_called(i1)
219 ; LVI uses guard to identify value of %c2 in branch as true, we cannot replace that
220 ; guard with guard(true & c1).
221 define void @dont_fold_guard(i8* %addr, i32 %i, i32 %length) {
222 ; CHECK-LABEL: @dont_fold_guard(
224 ; CHECK-NEXT: [[C1:%.*]] = icmp ult i32 [[I:%.*]], [[LENGTH:%.*]]
225 ; CHECK-NEXT: [[C2:%.*]] = icmp eq i32 [[I]], 0
226 ; CHECK-NEXT: [[WIDE_CHK:%.*]] = and i1 [[C1]], [[C2]]
227 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WIDE_CHK]]) [ "deopt"() ]
228 ; CHECK-NEXT: call void @never_called(i1 true)
229 ; CHECK-NEXT: ret void
231 %c1 = icmp ult i32 %i, %length
232 %c2 = icmp eq i32 %i, 0
233 %wide.chk = and i1 %c1, %c2
234 call void(i1, ...) @llvm.experimental.guard(i1 %wide.chk) [ "deopt"() ]
235 br i1 %c2, label %BB1, label %BB2
238 call void @never_called(i1 %c2)
245 declare void @dummy(i1) nounwind willreturn
246 ; same as dont_fold_guard1 but there's a use immediately after guard and before
247 ; branch. We can fold that use.
248 define void @dont_fold_guard2(i8* %addr, i32 %i, i32 %length) {
249 ; CHECK-LABEL: @dont_fold_guard2(
251 ; CHECK-NEXT: [[C1:%.*]] = icmp ult i32 [[I:%.*]], [[LENGTH:%.*]]
252 ; CHECK-NEXT: [[C2:%.*]] = icmp eq i32 [[I]], 0
253 ; CHECK-NEXT: [[WIDE_CHK:%.*]] = and i1 [[C1]], [[C2]]
254 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WIDE_CHK]]) [ "deopt"() ]
255 ; CHECK-NEXT: call void @dummy(i1 true)
256 ; CHECK-NEXT: call void @never_called(i1 true)
257 ; CHECK-NEXT: ret void
259 %c1 = icmp ult i32 %i, %length
260 %c2 = icmp eq i32 %i, 0
261 %wide.chk = and i1 %c1, %c2
262 call void(i1, ...) @llvm.experimental.guard(i1 %wide.chk) [ "deopt"() ]
263 call void @dummy(i1 %c2)
264 br i1 %c2, label %BB1, label %BB2
267 call void @never_called(i1 %c2)
274 ; same as dont_fold_guard1 but condition %cmp is not an instruction.
275 ; We cannot fold the guard under any circumstance.
276 ; FIXME: We can merge unreachableBB2 into not_zero.
277 define void @dont_fold_guard3(i8* %addr, i1 %cmp, i32 %i, i32 %length) {
278 ; CHECK-LABEL: @dont_fold_guard3(
279 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP:%.*]]) [ "deopt"() ]
280 ; CHECK-NEXT: br i1 [[CMP]], label [[BB1:%.*]], label [[BB2:%.*]]
282 ; CHECK-NEXT: call void @never_called(i1 [[CMP]])
283 ; CHECK-NEXT: ret void
285 ; CHECK-NEXT: ret void
287 call void(i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
288 br i1 %cmp, label %BB1, label %BB2
291 call void @never_called(i1 %cmp)
299 ; Same as dont_fold_guard1 but use switch instead of branch.
300 ; triggers source code `ProcessThreadableEdges`.
301 define void @dont_fold_guard4(i1 %cmp1, i32 %i) nounwind {
302 ; CHECK-LABEL: @dont_fold_guard4(
304 ; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[L2:%.*]], label [[L3:%.*]]
306 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I:%.*]], 0
307 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
308 ; CHECK-NEXT: call void @dummy(i1 true)
309 ; CHECK-NEXT: call void @f(i1 true)
310 ; CHECK-NEXT: ret void
312 ; CHECK-NEXT: ret void
315 br i1 %cmp1, label %L0, label %L3
317 %cmp = icmp eq i32 %i, 0
318 call void(i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
319 call void @dummy(i1 %cmp)
320 switch i1 %cmp, label %L3 [
328 call void @f(i1 %cmp)
334 ; Make sure that we don't PRE a non-speculable load across a guard.
335 define void @unsafe_pre_across_guard(i8* %p, i1 %load.is.valid) {
336 ; CHECK-LABEL: @unsafe_pre_across_guard(
338 ; CHECK-NEXT: br label [[LOOP:%.*]]
340 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[LOAD_IS_VALID:%.*]]) [ "deopt"() ]
341 ; CHECK-NEXT: [[LOADED:%.*]] = load i8, i8* [[P:%.*]], align 1
342 ; CHECK-NEXT: [[CONTINUE:%.*]] = icmp eq i8 [[LOADED]], 0
343 ; CHECK-NEXT: br i1 [[CONTINUE]], label [[EXIT:%.*]], label [[LOOP]]
345 ; CHECK-NEXT: ret void
350 loop: ; preds = %loop, %entry
351 call void (i1, ...) @llvm.experimental.guard(i1 %load.is.valid) [ "deopt"() ]
352 %loaded = load i8, i8* %p
353 %continue = icmp eq i8 %loaded, 0
354 br i1 %continue, label %exit, label %loop
356 exit: ; preds = %loop
360 ; Make sure that we can safely PRE a speculable load across a guard.
361 define void @safe_pre_across_guard(i8* noalias nocapture readonly dereferenceable(8) %p, i1 %load.is.valid) nofree nosync {
362 ; CHECK-LABEL: @safe_pre_across_guard(
364 ; CHECK-NEXT: [[LOADED_PR:%.*]] = load i8, i8* [[P:%.*]], align 1
365 ; CHECK-NEXT: br label [[LOOP:%.*]]
367 ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[LOADED]], [[LOOP]] ], [ [[LOADED_PR]], [[ENTRY:%.*]] ]
368 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[LOAD_IS_VALID:%.*]]) [ "deopt"() ]
369 ; CHECK-NEXT: [[CONTINUE:%.*]] = icmp eq i8 [[LOADED]], 0
370 ; CHECK-NEXT: br i1 [[CONTINUE]], label [[EXIT:%.*]], label [[LOOP]]
372 ; CHECK-NEXT: ret void
378 loop: ; preds = %loop, %entry
379 call void (i1, ...) @llvm.experimental.guard(i1 %load.is.valid) [ "deopt"() ]
380 %loaded = load i8, i8* %p
381 %continue = icmp eq i8 %loaded, 0
382 br i1 %continue, label %exit, label %loop
384 exit: ; preds = %loop
388 ; Make sure that we don't PRE a non-speculable load across a call which may
389 ; alias with the load.
390 define void @unsafe_pre_across_call(i8* %p) {
391 ; CHECK-LABEL: @unsafe_pre_across_call(
393 ; CHECK-NEXT: br label [[LOOP:%.*]]
395 ; CHECK-NEXT: [[TMP0:%.*]] = call i32 @f1()
396 ; CHECK-NEXT: [[LOADED:%.*]] = load i8, i8* [[P:%.*]], align 1
397 ; CHECK-NEXT: [[CONTINUE:%.*]] = icmp eq i8 [[LOADED]], 0
398 ; CHECK-NEXT: br i1 [[CONTINUE]], label [[EXIT:%.*]], label [[LOOP]]
400 ; CHECK-NEXT: ret void
405 loop: ; preds = %loop, %entry
407 %loaded = load i8, i8* %p
408 %continue = icmp eq i8 %loaded, 0
409 br i1 %continue, label %exit, label %loop
411 exit: ; preds = %loop
415 ; Make sure that we can safely PRE a speculable load across a call.
416 define void @safe_pre_across_call(i8* noalias nocapture readonly dereferenceable(8) %p) nofree nosync {
417 ; CHECK-LABEL: @safe_pre_across_call(
419 ; CHECK-NEXT: [[LOADED_PR:%.*]] = load i8, i8* [[P:%.*]], align 1
420 ; CHECK-NEXT: br label [[LOOP:%.*]]
422 ; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[LOADED]], [[LOOP]] ], [ [[LOADED_PR]], [[ENTRY:%.*]] ]
423 ; CHECK-NEXT: [[TMP0:%.*]] = call i32 @f1()
424 ; CHECK-NEXT: [[CONTINUE:%.*]] = icmp eq i8 [[LOADED]], 0
425 ; CHECK-NEXT: br i1 [[CONTINUE]], label [[EXIT:%.*]], label [[LOOP]]
427 ; CHECK-NEXT: ret void
433 loop: ; preds = %loop, %entry
435 %loaded = load i8, i8* %p
436 %continue = icmp eq i8 %loaded, 0
437 br i1 %continue, label %exit, label %loop
439 exit: ; preds = %loop