1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
2 ; RUN: opt -passes='loop-mssa(loop-rotate,licm),instcombine,indvars,loop-unroll' -S %s | FileCheck %s
4 ; PR18361: ScalarEvolution::getAddRecExpr():
5 ; Assertion `isLoopInvariant(Operands[i],...
7 ; After a series of loop optimizations, SCEV's LoopDispositions grow stale.
8 ; In particular, LoopSimplify hoists %cmp4, resulting in this SCEV for %add:
9 ; {(zext i1 %cmp4 to i32),+,1}<nw><%for.cond1.preheader>
11 ; When recomputing the SCEV for %ashr, we truncate the operands to get:
12 ; (zext i1 %cmp4 to i16)
14 ; This SCEV was never mapped to a value so never invalidated. It's
15 ; loop disposition is still marked as non-loop-invariant, which is
16 ; inconsistent with the AddRec.
18 target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
19 target triple = "x86_64-apple-macosx"
21 @d = common global i32 0, align 4
22 @a = common global i32 0, align 4
23 @c = common global i32 0, align 4
24 @b = common global i32 0, align 4
26 ; Check that the def-use chain that leads to the bad SCEV is still
29 ; CHECK-LABEL: define void @foo() {
31 ; CHECK-NEXT: store i32 0, ptr @d, align 4
32 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @c, align 4
33 ; CHECK-NEXT: br label [[FOR_COND1_PREHEADER:%.*]]
34 ; CHECK: for.cond1.preheader:
35 ; CHECK-NEXT: br label [[FOR_BODY3:%.*]]
37 ; CHECK-NEXT: store i32 1, ptr @a, align 4
38 ; CHECK-NEXT: store i32 1, ptr @d, align 4
39 ; CHECK-NEXT: [[CMP4_LE_LE_INV:%.*]] = icmp sgt i32 [[TMP0]], 0
40 ; CHECK-NEXT: br i1 [[CMP4_LE_LE_INV]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
42 ; CHECK-NEXT: store i32 0, ptr @b, align 4
43 ; CHECK-NEXT: br label [[IF_END]]
45 ; CHECK-NEXT: ret void
50 for.cond: ; preds = %for.inc7, %entry
51 %storemerge = phi i32 [ 0, %entry ], [ %inc8, %for.inc7 ]
52 %f.0 = phi i32 [ undef, %entry ], [ %f.1, %for.inc7 ]
53 store i32 %storemerge, ptr @d, align 4
54 %cmp = icmp slt i32 %storemerge, 1
55 br i1 %cmp, label %for.cond1, label %for.end9
57 for.cond1: ; preds = %for.cond, %for.body3
58 %storemerge1 = phi i32 [ %inc, %for.body3 ], [ 0, %for.cond ]
59 %f.1 = phi i32 [ %xor, %for.body3 ], [ %f.0, %for.cond ]
60 store i32 %storemerge1, ptr @a, align 4
61 %cmp2 = icmp slt i32 %storemerge1, 1
62 br i1 %cmp2, label %for.body3, label %for.inc7
64 for.body3: ; preds = %for.cond1
65 %0 = load i32, ptr @c, align 4
66 %cmp4 = icmp sge i32 %storemerge1, %0
67 %conv = zext i1 %cmp4 to i32
68 %1 = load i32, ptr @d, align 4
69 %add = add nsw i32 %conv, %1
70 %sext = shl i32 %add, 16
71 %conv6 = ashr exact i32 %sext, 16
72 %xor = xor i32 %conv6, 1
73 %inc = add nsw i32 %storemerge1, 1
76 for.inc7: ; preds = %for.cond1
77 %2 = load i32, ptr @d, align 4
78 %inc8 = add nsw i32 %2, 1
81 for.end9: ; preds = %for.cond
82 %cmp10 = icmp sgt i32 %f.0, 0
83 br i1 %cmp10, label %if.then, label %if.end
85 if.then: ; preds = %for.end9
86 store i32 0, ptr @b, align 4
89 if.end: ; preds = %if.then, %for.end9