Recommit "rL366894: [yaml2obj] - Allow custom fields for the SHT_UNDEF sections."
[llvm-complete.git] / unittests / Analysis / ScalarEvolutionTest.cpp
blobfb8f6689ca7c2061f4e992bcf7914764184a00b2
1 //===- ScalarEvolutionsTest.cpp - ScalarEvolution unit tests --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "llvm/ADT/SmallVector.h"
10 #include "llvm/Analysis/AssumptionCache.h"
11 #include "llvm/Analysis/LoopInfo.h"
12 #include "llvm/Analysis/ScalarEvolutionExpander.h"
13 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
14 #include "llvm/Analysis/TargetLibraryInfo.h"
15 #include "llvm/AsmParser/Parser.h"
16 #include "llvm/IR/Constants.h"
17 #include "llvm/IR/Dominators.h"
18 #include "llvm/IR/GlobalVariable.h"
19 #include "llvm/IR/IRBuilder.h"
20 #include "llvm/IR/InstIterator.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/IR/LegacyPassManager.h"
23 #include "llvm/IR/Module.h"
24 #include "llvm/IR/Verifier.h"
25 #include "llvm/Support/SourceMgr.h"
26 #include "gtest/gtest.h"
28 namespace llvm {
29 namespace {
31 // We use this fixture to ensure that we clean up ScalarEvolution before
32 // deleting the PassManager.
33 class ScalarEvolutionsTest : public testing::Test {
34 protected:
35 LLVMContext Context;
36 Module M;
37 TargetLibraryInfoImpl TLII;
38 TargetLibraryInfo TLI;
40 std::unique_ptr<AssumptionCache> AC;
41 std::unique_ptr<DominatorTree> DT;
42 std::unique_ptr<LoopInfo> LI;
44 ScalarEvolutionsTest() : M("", Context), TLII(), TLI(TLII) {}
46 ScalarEvolution buildSE(Function &F) {
47 AC.reset(new AssumptionCache(F));
48 DT.reset(new DominatorTree(F));
49 LI.reset(new LoopInfo(*DT));
50 return ScalarEvolution(F, TLI, *AC, *DT, *LI);
53 void runWithSE(
54 Module &M, StringRef FuncName,
55 function_ref<void(Function &F, LoopInfo &LI, ScalarEvolution &SE)> Test) {
56 auto *F = M.getFunction(FuncName);
57 ASSERT_NE(F, nullptr) << "Could not find " << FuncName;
58 ScalarEvolution SE = buildSE(*F);
59 Test(*F, *LI, SE);
63 TEST_F(ScalarEvolutionsTest, SCEVUnknownRAUW) {
64 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
65 std::vector<Type *>(), false);
66 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
67 BasicBlock *BB = BasicBlock::Create(Context, "entry", F);
68 ReturnInst::Create(Context, nullptr, BB);
70 Type *Ty = Type::getInt1Ty(Context);
71 Constant *Init = Constant::getNullValue(Ty);
72 Value *V0 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V0");
73 Value *V1 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V1");
74 Value *V2 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V2");
76 ScalarEvolution SE = buildSE(*F);
78 const SCEV *S0 = SE.getSCEV(V0);
79 const SCEV *S1 = SE.getSCEV(V1);
80 const SCEV *S2 = SE.getSCEV(V2);
82 const SCEV *P0 = SE.getAddExpr(S0, S0);
83 const SCEV *P1 = SE.getAddExpr(S1, S1);
84 const SCEV *P2 = SE.getAddExpr(S2, S2);
86 const SCEVMulExpr *M0 = cast<SCEVMulExpr>(P0);
87 const SCEVMulExpr *M1 = cast<SCEVMulExpr>(P1);
88 const SCEVMulExpr *M2 = cast<SCEVMulExpr>(P2);
90 EXPECT_EQ(cast<SCEVConstant>(M0->getOperand(0))->getValue()->getZExtValue(),
91 2u);
92 EXPECT_EQ(cast<SCEVConstant>(M1->getOperand(0))->getValue()->getZExtValue(),
93 2u);
94 EXPECT_EQ(cast<SCEVConstant>(M2->getOperand(0))->getValue()->getZExtValue(),
95 2u);
97 // Before the RAUWs, these are all pointing to separate values.
98 EXPECT_EQ(cast<SCEVUnknown>(M0->getOperand(1))->getValue(), V0);
99 EXPECT_EQ(cast<SCEVUnknown>(M1->getOperand(1))->getValue(), V1);
100 EXPECT_EQ(cast<SCEVUnknown>(M2->getOperand(1))->getValue(), V2);
102 // Do some RAUWs.
103 V2->replaceAllUsesWith(V1);
104 V1->replaceAllUsesWith(V0);
106 // After the RAUWs, these should all be pointing to V0.
107 EXPECT_EQ(cast<SCEVUnknown>(M0->getOperand(1))->getValue(), V0);
108 EXPECT_EQ(cast<SCEVUnknown>(M1->getOperand(1))->getValue(), V0);
109 EXPECT_EQ(cast<SCEVUnknown>(M2->getOperand(1))->getValue(), V0);
112 TEST_F(ScalarEvolutionsTest, SimplifiedPHI) {
113 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
114 std::vector<Type *>(), false);
115 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
116 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
117 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
118 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
119 BranchInst::Create(LoopBB, EntryBB);
120 BranchInst::Create(LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)),
121 LoopBB);
122 ReturnInst::Create(Context, nullptr, ExitBB);
123 auto *Ty = Type::getInt32Ty(Context);
124 auto *PN = PHINode::Create(Ty, 2, "", &*LoopBB->begin());
125 PN->addIncoming(Constant::getNullValue(Ty), EntryBB);
126 PN->addIncoming(UndefValue::get(Ty), LoopBB);
127 ScalarEvolution SE = buildSE(*F);
128 auto *S1 = SE.getSCEV(PN);
129 auto *S2 = SE.getSCEV(PN);
130 auto *ZeroConst = SE.getConstant(Ty, 0);
132 // At some point, only the first call to getSCEV returned the simplified
133 // SCEVConstant and later calls just returned a SCEVUnknown referencing the
134 // PHI node.
135 EXPECT_EQ(S1, ZeroConst);
136 EXPECT_EQ(S1, S2);
139 TEST_F(ScalarEvolutionsTest, ExpandPtrTypeSCEV) {
140 // It is to test the fix for PR30213. It exercises the branch in scev
141 // expansion when the value in ValueOffsetPair is a ptr and the offset
142 // is not divisible by the elem type size of value.
143 auto *I8Ty = Type::getInt8Ty(Context);
144 auto *I8PtrTy = Type::getInt8PtrTy(Context);
145 auto *I32Ty = Type::getInt32Ty(Context);
146 auto *I32PtrTy = Type::getInt32PtrTy(Context);
147 FunctionType *FTy =
148 FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
149 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
150 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
151 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
152 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
153 BranchInst::Create(LoopBB, EntryBB);
154 ReturnInst::Create(Context, nullptr, ExitBB);
156 // loop: ; preds = %loop, %entry
157 // %alloca = alloca i32
158 // %gep0 = getelementptr i32, i32* %alloca, i32 1
159 // %bitcast1 = bitcast i32* %gep0 to i8*
160 // %gep1 = getelementptr i8, i8* %bitcast1, i32 1
161 // %gep2 = getelementptr i8, i8* undef, i32 1
162 // %cmp = icmp ult i8* undef, %bitcast1
163 // %select = select i1 %cmp, i8* %gep1, i8* %gep2
164 // %bitcast2 = bitcast i8* %select to i32*
165 // br i1 undef, label %loop, label %exit
167 const DataLayout &DL = F->getParent()->getDataLayout();
168 BranchInst *Br = BranchInst::Create(
169 LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
170 AllocaInst *Alloca = new AllocaInst(I32Ty, DL.getAllocaAddrSpace(),
171 "alloca", Br);
172 ConstantInt *Ci32 = ConstantInt::get(Context, APInt(32, 1));
173 GetElementPtrInst *Gep0 =
174 GetElementPtrInst::Create(I32Ty, Alloca, Ci32, "gep0", Br);
175 CastInst *CastA =
176 CastInst::CreateBitOrPointerCast(Gep0, I8PtrTy, "bitcast1", Br);
177 GetElementPtrInst *Gep1 =
178 GetElementPtrInst::Create(I8Ty, CastA, Ci32, "gep1", Br);
179 GetElementPtrInst *Gep2 = GetElementPtrInst::Create(
180 I8Ty, UndefValue::get(I8PtrTy), Ci32, "gep2", Br);
181 CmpInst *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT,
182 UndefValue::get(I8PtrTy), CastA, "cmp", Br);
183 SelectInst *Sel = SelectInst::Create(Cmp, Gep1, Gep2, "select", Br);
184 CastInst *CastB =
185 CastInst::CreateBitOrPointerCast(Sel, I32PtrTy, "bitcast2", Br);
187 ScalarEvolution SE = buildSE(*F);
188 auto *S = SE.getSCEV(CastB);
189 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
190 Value *V =
191 Exp.expandCodeFor(cast<SCEVAddExpr>(S)->getOperand(1), nullptr, Br);
193 // Expect the expansion code contains:
194 // %0 = bitcast i32* %bitcast2 to i8*
195 // %uglygep = getelementptr i8, i8* %0, i64 -1
196 // %1 = bitcast i8* %uglygep to i32*
197 EXPECT_TRUE(isa<BitCastInst>(V));
198 Instruction *Gep = cast<Instruction>(V)->getPrevNode();
199 EXPECT_TRUE(isa<GetElementPtrInst>(Gep));
200 EXPECT_TRUE(isa<ConstantInt>(Gep->getOperand(1)));
201 EXPECT_EQ(cast<ConstantInt>(Gep->getOperand(1))->getSExtValue(), -1);
202 EXPECT_TRUE(isa<BitCastInst>(Gep->getPrevNode()));
205 static Instruction *getInstructionByName(Function &F, StringRef Name) {
206 for (auto &I : instructions(F))
207 if (I.getName() == Name)
208 return &I;
209 llvm_unreachable("Expected to find instruction!");
212 TEST_F(ScalarEvolutionsTest, CommutativeExprOperandOrder) {
213 LLVMContext C;
214 SMDiagnostic Err;
215 std::unique_ptr<Module> M = parseAssemblyString(
216 "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
218 "@var_0 = external global i32, align 4"
219 "@var_1 = external global i32, align 4"
220 "@var_2 = external global i32, align 4"
222 "declare i32 @unknown(i32, i32, i32)"
224 "define void @f_1(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) "
225 " local_unnamed_addr { "
226 "entry: "
227 " %entrycond = icmp sgt i32 %n, 0 "
228 " br i1 %entrycond, label %loop.ph, label %for.end "
230 "loop.ph: "
231 " %a = load i32, i32* %A, align 4 "
232 " %b = load i32, i32* %B, align 4 "
233 " %mul = mul nsw i32 %b, %a "
234 " %iv0.init = getelementptr inbounds i8, i8* %arr, i32 %mul "
235 " br label %loop "
237 "loop: "
238 " %iv0 = phi i8* [ %iv0.inc, %loop ], [ %iv0.init, %loop.ph ] "
239 " %iv1 = phi i32 [ %iv1.inc, %loop ], [ 0, %loop.ph ] "
240 " %conv = trunc i32 %iv1 to i8 "
241 " store i8 %conv, i8* %iv0, align 1 "
242 " %iv0.inc = getelementptr inbounds i8, i8* %iv0, i32 %b "
243 " %iv1.inc = add nuw nsw i32 %iv1, 1 "
244 " %exitcond = icmp eq i32 %iv1.inc, %n "
245 " br i1 %exitcond, label %for.end.loopexit, label %loop "
247 "for.end.loopexit: "
248 " br label %for.end "
250 "for.end: "
251 " ret void "
252 "} "
254 "define void @f_2(i32* %X, i32* %Y, i32* %Z) { "
255 " %x = load i32, i32* %X "
256 " %y = load i32, i32* %Y "
257 " %z = load i32, i32* %Z "
258 " ret void "
259 "} "
261 "define void @f_3() { "
262 " %x = load i32, i32* @var_0"
263 " %y = load i32, i32* @var_1"
264 " %z = load i32, i32* @var_2"
265 " ret void"
266 "} "
268 "define void @f_4(i32 %a, i32 %b, i32 %c) { "
269 " %x = call i32 @unknown(i32 %a, i32 %b, i32 %c)"
270 " %y = call i32 @unknown(i32 %b, i32 %c, i32 %a)"
271 " %z = call i32 @unknown(i32 %c, i32 %a, i32 %b)"
272 " ret void"
273 "} "
275 Err, C);
277 assert(M && "Could not parse module?");
278 assert(!verifyModule(*M) && "Must have been well formed!");
280 runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
281 auto *IV0 = getInstructionByName(F, "iv0");
282 auto *IV0Inc = getInstructionByName(F, "iv0.inc");
284 auto *FirstExprForIV0 = SE.getSCEV(IV0);
285 auto *FirstExprForIV0Inc = SE.getSCEV(IV0Inc);
286 auto *SecondExprForIV0 = SE.getSCEV(IV0);
288 EXPECT_TRUE(isa<SCEVAddRecExpr>(FirstExprForIV0));
289 EXPECT_TRUE(isa<SCEVAddRecExpr>(FirstExprForIV0Inc));
290 EXPECT_TRUE(isa<SCEVAddRecExpr>(SecondExprForIV0));
293 auto CheckCommutativeMulExprs = [&](ScalarEvolution &SE, const SCEV *A,
294 const SCEV *B, const SCEV *C) {
295 EXPECT_EQ(SE.getMulExpr(A, B), SE.getMulExpr(B, A));
296 EXPECT_EQ(SE.getMulExpr(B, C), SE.getMulExpr(C, B));
297 EXPECT_EQ(SE.getMulExpr(A, C), SE.getMulExpr(C, A));
299 SmallVector<const SCEV *, 3> Ops0 = {A, B, C};
300 SmallVector<const SCEV *, 3> Ops1 = {A, C, B};
301 SmallVector<const SCEV *, 3> Ops2 = {B, A, C};
302 SmallVector<const SCEV *, 3> Ops3 = {B, C, A};
303 SmallVector<const SCEV *, 3> Ops4 = {C, B, A};
304 SmallVector<const SCEV *, 3> Ops5 = {C, A, B};
306 auto *Mul0 = SE.getMulExpr(Ops0);
307 auto *Mul1 = SE.getMulExpr(Ops1);
308 auto *Mul2 = SE.getMulExpr(Ops2);
309 auto *Mul3 = SE.getMulExpr(Ops3);
310 auto *Mul4 = SE.getMulExpr(Ops4);
311 auto *Mul5 = SE.getMulExpr(Ops5);
313 EXPECT_EQ(Mul0, Mul1) << "Expected " << *Mul0 << " == " << *Mul1;
314 EXPECT_EQ(Mul1, Mul2) << "Expected " << *Mul1 << " == " << *Mul2;
315 EXPECT_EQ(Mul2, Mul3) << "Expected " << *Mul2 << " == " << *Mul3;
316 EXPECT_EQ(Mul3, Mul4) << "Expected " << *Mul3 << " == " << *Mul4;
317 EXPECT_EQ(Mul4, Mul5) << "Expected " << *Mul4 << " == " << *Mul5;
320 for (StringRef FuncName : {"f_2", "f_3", "f_4"})
321 runWithSE(
322 *M, FuncName, [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
323 CheckCommutativeMulExprs(SE, SE.getSCEV(getInstructionByName(F, "x")),
324 SE.getSCEV(getInstructionByName(F, "y")),
325 SE.getSCEV(getInstructionByName(F, "z")));
329 TEST_F(ScalarEvolutionsTest, CompareSCEVComplexity) {
330 FunctionType *FTy =
331 FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
332 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
333 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
334 BasicBlock *LoopBB = BasicBlock::Create(Context, "bb1", F);
335 BranchInst::Create(LoopBB, EntryBB);
337 auto *Ty = Type::getInt32Ty(Context);
338 SmallVector<Instruction*, 8> Muls(8), Acc(8), NextAcc(8);
340 Acc[0] = PHINode::Create(Ty, 2, "", LoopBB);
341 Acc[1] = PHINode::Create(Ty, 2, "", LoopBB);
342 Acc[2] = PHINode::Create(Ty, 2, "", LoopBB);
343 Acc[3] = PHINode::Create(Ty, 2, "", LoopBB);
344 Acc[4] = PHINode::Create(Ty, 2, "", LoopBB);
345 Acc[5] = PHINode::Create(Ty, 2, "", LoopBB);
346 Acc[6] = PHINode::Create(Ty, 2, "", LoopBB);
347 Acc[7] = PHINode::Create(Ty, 2, "", LoopBB);
349 for (int i = 0; i < 20; i++) {
350 Muls[0] = BinaryOperator::CreateMul(Acc[0], Acc[0], "", LoopBB);
351 NextAcc[0] = BinaryOperator::CreateAdd(Muls[0], Acc[4], "", LoopBB);
352 Muls[1] = BinaryOperator::CreateMul(Acc[1], Acc[1], "", LoopBB);
353 NextAcc[1] = BinaryOperator::CreateAdd(Muls[1], Acc[5], "", LoopBB);
354 Muls[2] = BinaryOperator::CreateMul(Acc[2], Acc[2], "", LoopBB);
355 NextAcc[2] = BinaryOperator::CreateAdd(Muls[2], Acc[6], "", LoopBB);
356 Muls[3] = BinaryOperator::CreateMul(Acc[3], Acc[3], "", LoopBB);
357 NextAcc[3] = BinaryOperator::CreateAdd(Muls[3], Acc[7], "", LoopBB);
359 Muls[4] = BinaryOperator::CreateMul(Acc[4], Acc[4], "", LoopBB);
360 NextAcc[4] = BinaryOperator::CreateAdd(Muls[4], Acc[0], "", LoopBB);
361 Muls[5] = BinaryOperator::CreateMul(Acc[5], Acc[5], "", LoopBB);
362 NextAcc[5] = BinaryOperator::CreateAdd(Muls[5], Acc[1], "", LoopBB);
363 Muls[6] = BinaryOperator::CreateMul(Acc[6], Acc[6], "", LoopBB);
364 NextAcc[6] = BinaryOperator::CreateAdd(Muls[6], Acc[2], "", LoopBB);
365 Muls[7] = BinaryOperator::CreateMul(Acc[7], Acc[7], "", LoopBB);
366 NextAcc[7] = BinaryOperator::CreateAdd(Muls[7], Acc[3], "", LoopBB);
367 Acc = NextAcc;
370 auto II = LoopBB->begin();
371 for (int i = 0; i < 8; i++) {
372 PHINode *Phi = cast<PHINode>(&*II++);
373 Phi->addIncoming(Acc[i], LoopBB);
374 Phi->addIncoming(UndefValue::get(Ty), EntryBB);
377 BasicBlock *ExitBB = BasicBlock::Create(Context, "bb2", F);
378 BranchInst::Create(LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)),
379 LoopBB);
381 Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB);
382 Acc[1] = BinaryOperator::CreateAdd(Acc[2], Acc[3], "", ExitBB);
383 Acc[2] = BinaryOperator::CreateAdd(Acc[4], Acc[5], "", ExitBB);
384 Acc[3] = BinaryOperator::CreateAdd(Acc[6], Acc[7], "", ExitBB);
385 Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB);
386 Acc[1] = BinaryOperator::CreateAdd(Acc[2], Acc[3], "", ExitBB);
387 Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB);
389 ReturnInst::Create(Context, nullptr, ExitBB);
391 ScalarEvolution SE = buildSE(*F);
393 EXPECT_NE(nullptr, SE.getSCEV(Acc[0]));
396 TEST_F(ScalarEvolutionsTest, CompareValueComplexity) {
397 IntegerType *IntPtrTy = M.getDataLayout().getIntPtrType(Context);
398 PointerType *IntPtrPtrTy = IntPtrTy->getPointerTo();
400 FunctionType *FTy =
401 FunctionType::get(Type::getVoidTy(Context), {IntPtrTy, IntPtrTy}, false);
402 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
403 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
405 Value *X = &*F->arg_begin();
406 Value *Y = &*std::next(F->arg_begin());
408 const int ValueDepth = 10;
409 for (int i = 0; i < ValueDepth; i++) {
410 X = new LoadInst(IntPtrTy, new IntToPtrInst(X, IntPtrPtrTy, "", EntryBB),
412 /*isVolatile*/ false, EntryBB);
413 Y = new LoadInst(IntPtrTy, new IntToPtrInst(Y, IntPtrPtrTy, "", EntryBB),
415 /*isVolatile*/ false, EntryBB);
418 auto *MulA = BinaryOperator::CreateMul(X, Y, "", EntryBB);
419 auto *MulB = BinaryOperator::CreateMul(Y, X, "", EntryBB);
420 ReturnInst::Create(Context, nullptr, EntryBB);
422 // This test isn't checking for correctness. Today making A and B resolve to
423 // the same SCEV would require deeper searching in CompareValueComplexity,
424 // which will slow down compilation. However, this test can fail (with LLVM's
425 // behavior still being correct) if we ever have a smarter
426 // CompareValueComplexity that is both fast and more accurate.
428 ScalarEvolution SE = buildSE(*F);
429 auto *A = SE.getSCEV(MulA);
430 auto *B = SE.getSCEV(MulB);
431 EXPECT_NE(A, B);
434 TEST_F(ScalarEvolutionsTest, SCEVAddExpr) {
435 Type *Ty32 = Type::getInt32Ty(Context);
436 Type *ArgTys[] = {Type::getInt64Ty(Context), Ty32};
438 FunctionType *FTy =
439 FunctionType::get(Type::getVoidTy(Context), ArgTys, false);
440 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
442 Argument *A1 = &*F->arg_begin();
443 Argument *A2 = &*(std::next(F->arg_begin()));
444 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
446 Instruction *Trunc = CastInst::CreateTruncOrBitCast(A1, Ty32, "", EntryBB);
447 Instruction *Mul1 = BinaryOperator::CreateMul(Trunc, A2, "", EntryBB);
448 Instruction *Add1 = BinaryOperator::CreateAdd(Mul1, Trunc, "", EntryBB);
449 Mul1 = BinaryOperator::CreateMul(Add1, Trunc, "", EntryBB);
450 Instruction *Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB);
451 // FIXME: The size of this is arbitrary and doesn't seem to change the
452 // result, but SCEV will do quadratic work for these so a large number here
453 // will be extremely slow. We should revisit what and how this is testing
454 // SCEV.
455 for (int i = 0; i < 10; i++) {
456 Mul1 = BinaryOperator::CreateMul(Add2, Add1, "", EntryBB);
457 Add1 = Add2;
458 Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB);
461 ReturnInst::Create(Context, nullptr, EntryBB);
462 ScalarEvolution SE = buildSE(*F);
463 EXPECT_NE(nullptr, SE.getSCEV(Mul1));
466 static Instruction &GetInstByName(Function &F, StringRef Name) {
467 for (auto &I : instructions(F))
468 if (I.getName() == Name)
469 return I;
470 llvm_unreachable("Could not find instructions!");
473 TEST_F(ScalarEvolutionsTest, SCEVNormalization) {
474 LLVMContext C;
475 SMDiagnostic Err;
476 std::unique_ptr<Module> M = parseAssemblyString(
477 "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
479 "@var_0 = external global i32, align 4"
480 "@var_1 = external global i32, align 4"
481 "@var_2 = external global i32, align 4"
483 "declare i32 @unknown(i32, i32, i32)"
485 "define void @f_1(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) "
486 " local_unnamed_addr { "
487 "entry: "
488 " br label %loop.ph "
490 "loop.ph: "
491 " br label %loop "
493 "loop: "
494 " %iv0 = phi i32 [ %iv0.inc, %loop ], [ 0, %loop.ph ] "
495 " %iv1 = phi i32 [ %iv1.inc, %loop ], [ -2147483648, %loop.ph ] "
496 " %iv0.inc = add i32 %iv0, 1 "
497 " %iv1.inc = add i32 %iv1, 3 "
498 " br i1 undef, label %for.end.loopexit, label %loop "
500 "for.end.loopexit: "
501 " ret void "
502 "} "
504 "define void @f_2(i32 %a, i32 %b, i32 %c, i32 %d) "
505 " local_unnamed_addr { "
506 "entry: "
507 " br label %loop_0 "
509 "loop_0: "
510 " br i1 undef, label %loop_0, label %loop_1 "
512 "loop_1: "
513 " br i1 undef, label %loop_2, label %loop_1 "
516 "loop_2: "
517 " br i1 undef, label %end, label %loop_2 "
519 "end: "
520 " ret void "
521 "} "
523 Err, C);
525 assert(M && "Could not parse module?");
526 assert(!verifyModule(*M) && "Must have been well formed!");
528 runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
529 auto &I0 = GetInstByName(F, "iv0");
530 auto &I1 = *I0.getNextNode();
532 auto *S0 = cast<SCEVAddRecExpr>(SE.getSCEV(&I0));
533 PostIncLoopSet Loops;
534 Loops.insert(S0->getLoop());
535 auto *N0 = normalizeForPostIncUse(S0, Loops, SE);
536 auto *D0 = denormalizeForPostIncUse(N0, Loops, SE);
537 EXPECT_EQ(S0, D0) << *S0 << " " << *D0;
539 auto *S1 = cast<SCEVAddRecExpr>(SE.getSCEV(&I1));
540 Loops.clear();
541 Loops.insert(S1->getLoop());
542 auto *N1 = normalizeForPostIncUse(S1, Loops, SE);
543 auto *D1 = denormalizeForPostIncUse(N1, Loops, SE);
544 EXPECT_EQ(S1, D1) << *S1 << " " << *D1;
547 runWithSE(*M, "f_2", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
548 auto *L2 = *LI.begin();
549 auto *L1 = *std::next(LI.begin());
550 auto *L0 = *std::next(LI.begin(), 2);
552 auto GetAddRec = [&SE](const Loop *L, std::initializer_list<const SCEV *> Ops) {
553 SmallVector<const SCEV *, 4> OpsCopy(Ops);
554 return SE.getAddRecExpr(OpsCopy, L, SCEV::FlagAnyWrap);
557 auto GetAdd = [&SE](std::initializer_list<const SCEV *> Ops) {
558 SmallVector<const SCEV *, 4> OpsCopy(Ops);
559 return SE.getAddExpr(OpsCopy, SCEV::FlagAnyWrap);
562 // We first populate the AddRecs vector with a few "interesting" SCEV
563 // expressions, and then we go through the list and assert that each
564 // expression in it has an invertible normalization.
566 std::vector<const SCEV *> Exprs;
568 const SCEV *V0 = SE.getSCEV(&*F.arg_begin());
569 const SCEV *V1 = SE.getSCEV(&*std::next(F.arg_begin(), 1));
570 const SCEV *V2 = SE.getSCEV(&*std::next(F.arg_begin(), 2));
571 const SCEV *V3 = SE.getSCEV(&*std::next(F.arg_begin(), 3));
573 Exprs.push_back(GetAddRec(L0, {V0})); // 0
574 Exprs.push_back(GetAddRec(L0, {V0, V1})); // 1
575 Exprs.push_back(GetAddRec(L0, {V0, V1, V2})); // 2
576 Exprs.push_back(GetAddRec(L0, {V0, V1, V2, V3})); // 3
578 Exprs.push_back(
579 GetAddRec(L1, {Exprs[1], Exprs[2], Exprs[3], Exprs[0]})); // 4
580 Exprs.push_back(
581 GetAddRec(L1, {Exprs[1], Exprs[2], Exprs[0], Exprs[3]})); // 5
582 Exprs.push_back(
583 GetAddRec(L1, {Exprs[1], Exprs[3], Exprs[3], Exprs[1]})); // 6
585 Exprs.push_back(GetAdd({Exprs[6], Exprs[3], V2})); // 7
587 Exprs.push_back(
588 GetAddRec(L2, {Exprs[4], Exprs[3], Exprs[3], Exprs[5]})); // 8
590 Exprs.push_back(
591 GetAddRec(L2, {Exprs[4], Exprs[6], Exprs[7], Exprs[3], V0})); // 9
594 std::vector<PostIncLoopSet> LoopSets;
595 for (int i = 0; i < 8; i++) {
596 LoopSets.emplace_back();
597 if (i & 1)
598 LoopSets.back().insert(L0);
599 if (i & 2)
600 LoopSets.back().insert(L1);
601 if (i & 4)
602 LoopSets.back().insert(L2);
605 for (const auto &LoopSet : LoopSets)
606 for (auto *S : Exprs) {
608 auto *N = llvm::normalizeForPostIncUse(S, LoopSet, SE);
609 auto *D = llvm::denormalizeForPostIncUse(N, LoopSet, SE);
611 // Normalization and then denormalizing better give us back the same
612 // value.
613 EXPECT_EQ(S, D) << "S = " << *S << " D = " << *D << " N = " << *N;
616 auto *D = llvm::denormalizeForPostIncUse(S, LoopSet, SE);
617 auto *N = llvm::normalizeForPostIncUse(D, LoopSet, SE);
619 // Denormalization and then normalizing better give us back the same
620 // value.
621 EXPECT_EQ(S, N) << "S = " << *S << " N = " << *N;
627 // Expect the call of getZeroExtendExpr will not cost exponential time.
628 TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExpr) {
629 LLVMContext C;
630 SMDiagnostic Err;
632 // Generate a function like below:
633 // define void @foo() {
634 // entry:
635 // br label %for.cond
637 // for.cond:
638 // %0 = phi i64 [ 100, %entry ], [ %dec, %for.inc ]
639 // %cmp = icmp sgt i64 %0, 90
640 // br i1 %cmp, label %for.inc, label %for.cond1
642 // for.inc:
643 // %dec = add nsw i64 %0, -1
644 // br label %for.cond
646 // for.cond1:
647 // %1 = phi i64 [ 100, %for.cond ], [ %dec5, %for.inc2 ]
648 // %cmp3 = icmp sgt i64 %1, 90
649 // br i1 %cmp3, label %for.inc2, label %for.cond4
651 // for.inc2:
652 // %dec5 = add nsw i64 %1, -1
653 // br label %for.cond1
655 // ......
657 // for.cond89:
658 // %19 = phi i64 [ 100, %for.cond84 ], [ %dec94, %for.inc92 ]
659 // %cmp93 = icmp sgt i64 %19, 90
660 // br i1 %cmp93, label %for.inc92, label %for.end
662 // for.inc92:
663 // %dec94 = add nsw i64 %19, -1
664 // br label %for.cond89
666 // for.end:
667 // %gep = getelementptr i8, i8* null, i64 %dec
668 // %gep6 = getelementptr i8, i8* %gep, i64 %dec5
669 // ......
670 // %gep95 = getelementptr i8, i8* %gep91, i64 %dec94
671 // ret void
672 // }
673 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), {}, false);
674 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", M);
676 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
677 BasicBlock *CondBB = BasicBlock::Create(Context, "for.cond", F);
678 BasicBlock *EndBB = BasicBlock::Create(Context, "for.end", F);
679 BranchInst::Create(CondBB, EntryBB);
680 BasicBlock *PrevBB = EntryBB;
682 Type *I64Ty = Type::getInt64Ty(Context);
683 Type *I8Ty = Type::getInt8Ty(Context);
684 Type *I8PtrTy = Type::getInt8PtrTy(Context);
685 Value *Accum = Constant::getNullValue(I8PtrTy);
686 int Iters = 20;
687 for (int i = 0; i < Iters; i++) {
688 BasicBlock *IncBB = BasicBlock::Create(Context, "for.inc", F, EndBB);
689 auto *PN = PHINode::Create(I64Ty, 2, "", CondBB);
690 PN->addIncoming(ConstantInt::get(Context, APInt(64, 100)), PrevBB);
691 auto *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_SGT, PN,
692 ConstantInt::get(Context, APInt(64, 90)), "cmp",
693 CondBB);
694 BasicBlock *NextBB;
695 if (i != Iters - 1)
696 NextBB = BasicBlock::Create(Context, "for.cond", F, EndBB);
697 else
698 NextBB = EndBB;
699 BranchInst::Create(IncBB, NextBB, Cmp, CondBB);
700 auto *Dec = BinaryOperator::CreateNSWAdd(
701 PN, ConstantInt::get(Context, APInt(64, -1)), "dec", IncBB);
702 PN->addIncoming(Dec, IncBB);
703 BranchInst::Create(CondBB, IncBB);
705 Accum = GetElementPtrInst::Create(I8Ty, Accum, PN, "gep", EndBB);
707 PrevBB = CondBB;
708 CondBB = NextBB;
710 ReturnInst::Create(Context, nullptr, EndBB);
711 ScalarEvolution SE = buildSE(*F);
712 const SCEV *S = SE.getSCEV(Accum);
713 Type *I128Ty = Type::getInt128Ty(Context);
714 SE.getZeroExtendExpr(S, I128Ty);
717 // Make sure that SCEV doesn't introduce illegal ptrtoint/inttoptr instructions
718 TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExprNonIntegral) {
720 * Create the following code:
721 * func(i64 addrspace(10)* %arg)
722 * top:
723 * br label %L.ph
724 * L.ph:
725 * br label %L
726 * L:
727 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
728 * %add = add i64 %phi2, 1
729 * br i1 undef, label %post, label %L2
730 * post:
731 * %gepbase = getelementptr i64 addrspace(10)* %arg, i64 1
732 * #= %gep = getelementptr i64 addrspace(10)* %gepbase, i64 %add =#
733 * ret void
735 * We will create the appropriate SCEV expression for %gep and expand it,
736 * then check that no inttoptr/ptrtoint instructions got inserted.
739 // Create a module with non-integral pointers in it's datalayout
740 Module NIM("nonintegral", Context);
741 std::string DataLayout = M.getDataLayoutStr();
742 if (!DataLayout.empty())
743 DataLayout += "-";
744 DataLayout += "ni:10";
745 NIM.setDataLayout(DataLayout);
747 Type *T_int1 = Type::getInt1Ty(Context);
748 Type *T_int64 = Type::getInt64Ty(Context);
749 Type *T_pint64 = T_int64->getPointerTo(10);
751 FunctionType *FTy =
752 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
753 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
755 Argument *Arg = &*F->arg_begin();
757 BasicBlock *Top = BasicBlock::Create(Context, "top", F);
758 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
759 BasicBlock *L = BasicBlock::Create(Context, "L", F);
760 BasicBlock *Post = BasicBlock::Create(Context, "post", F);
762 IRBuilder<> Builder(Top);
763 Builder.CreateBr(LPh);
765 Builder.SetInsertPoint(LPh);
766 Builder.CreateBr(L);
768 Builder.SetInsertPoint(L);
769 PHINode *Phi = Builder.CreatePHI(T_int64, 2);
770 Value *Add = Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add");
771 Builder.CreateCondBr(UndefValue::get(T_int1), L, Post);
772 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
773 Phi->addIncoming(Add, L);
775 Builder.SetInsertPoint(Post);
776 Value *GepBase =
777 Builder.CreateGEP(T_int64, Arg, ConstantInt::get(T_int64, 1));
778 Instruction *Ret = Builder.CreateRetVoid();
780 ScalarEvolution SE = buildSE(*F);
781 auto *AddRec =
782 SE.getAddRecExpr(SE.getUnknown(GepBase), SE.getConstant(T_int64, 1),
783 LI->getLoopFor(L), SCEV::FlagNUW);
785 SCEVExpander Exp(SE, NIM.getDataLayout(), "expander");
786 Exp.disableCanonicalMode();
787 Exp.expandCodeFor(AddRec, T_pint64, Ret);
789 // Make sure none of the instructions inserted were inttoptr/ptrtoint.
790 // The verifier will check this.
791 EXPECT_FALSE(verifyFunction(*F, &errs()));
794 // Make sure that SCEV invalidates exit limits after invalidating the values it
795 // depends on when we forget a loop.
796 TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetLoop) {
798 * Create the following code:
799 * func(i64 addrspace(10)* %arg)
800 * top:
801 * br label %L.ph
802 * L.ph:
803 * br label %L
804 * L:
805 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
806 * %add = add i64 %phi2, 1
807 * %cond = icmp slt i64 %add, 1000; then becomes 2000.
808 * br i1 %cond, label %post, label %L2
809 * post:
810 * ret void
814 // Create a module with non-integral pointers in it's datalayout
815 Module NIM("nonintegral", Context);
816 std::string DataLayout = M.getDataLayoutStr();
817 if (!DataLayout.empty())
818 DataLayout += "-";
819 DataLayout += "ni:10";
820 NIM.setDataLayout(DataLayout);
822 Type *T_int64 = Type::getInt64Ty(Context);
823 Type *T_pint64 = T_int64->getPointerTo(10);
825 FunctionType *FTy =
826 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
827 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
829 BasicBlock *Top = BasicBlock::Create(Context, "top", F);
830 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
831 BasicBlock *L = BasicBlock::Create(Context, "L", F);
832 BasicBlock *Post = BasicBlock::Create(Context, "post", F);
834 IRBuilder<> Builder(Top);
835 Builder.CreateBr(LPh);
837 Builder.SetInsertPoint(LPh);
838 Builder.CreateBr(L);
840 Builder.SetInsertPoint(L);
841 PHINode *Phi = Builder.CreatePHI(T_int64, 2);
842 auto *Add = cast<Instruction>(
843 Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
844 auto *Limit = ConstantInt::get(T_int64, 1000);
845 auto *Cond = cast<Instruction>(
846 Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond"));
847 auto *Br = cast<Instruction>(Builder.CreateCondBr(Cond, L, Post));
848 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
849 Phi->addIncoming(Add, L);
851 Builder.SetInsertPoint(Post);
852 Builder.CreateRetVoid();
854 ScalarEvolution SE = buildSE(*F);
855 auto *Loop = LI->getLoopFor(L);
856 const SCEV *EC = SE.getBackedgeTakenCount(Loop);
857 EXPECT_FALSE(isa<SCEVCouldNotCompute>(EC));
858 EXPECT_TRUE(isa<SCEVConstant>(EC));
859 EXPECT_EQ(cast<SCEVConstant>(EC)->getAPInt().getLimitedValue(), 999u);
861 // The add recurrence {5,+,1} does not correspond to any PHI in the IR, and
862 // that is relevant to this test.
863 auto *Five = SE.getConstant(APInt(/*numBits=*/64, 5));
864 auto *AR =
865 SE.getAddRecExpr(Five, SE.getOne(T_int64), Loop, SCEV::FlagAnyWrap);
866 const SCEV *ARAtLoopExit = SE.getSCEVAtScope(AR, nullptr);
867 EXPECT_FALSE(isa<SCEVCouldNotCompute>(ARAtLoopExit));
868 EXPECT_TRUE(isa<SCEVConstant>(ARAtLoopExit));
869 EXPECT_EQ(cast<SCEVConstant>(ARAtLoopExit)->getAPInt().getLimitedValue(),
870 1004u);
872 SE.forgetLoop(Loop);
873 Br->eraseFromParent();
874 Cond->eraseFromParent();
876 Builder.SetInsertPoint(L);
877 auto *NewCond = Builder.CreateICmp(
878 ICmpInst::ICMP_SLT, Add, ConstantInt::get(T_int64, 2000), "new.cond");
879 Builder.CreateCondBr(NewCond, L, Post);
880 const SCEV *NewEC = SE.getBackedgeTakenCount(Loop);
881 EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewEC));
882 EXPECT_TRUE(isa<SCEVConstant>(NewEC));
883 EXPECT_EQ(cast<SCEVConstant>(NewEC)->getAPInt().getLimitedValue(), 1999u);
884 const SCEV *NewARAtLoopExit = SE.getSCEVAtScope(AR, nullptr);
885 EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewARAtLoopExit));
886 EXPECT_TRUE(isa<SCEVConstant>(NewARAtLoopExit));
887 EXPECT_EQ(cast<SCEVConstant>(NewARAtLoopExit)->getAPInt().getLimitedValue(),
888 2004u);
891 // Make sure that SCEV invalidates exit limits after invalidating the values it
892 // depends on when we forget a value.
893 TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetValue) {
895 * Create the following code:
896 * func(i64 addrspace(10)* %arg)
897 * top:
898 * br label %L.ph
899 * L.ph:
900 * %load = load i64 addrspace(10)* %arg
901 * br label %L
902 * L:
903 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
904 * %add = add i64 %phi2, 1
905 * %cond = icmp slt i64 %add, %load ; then becomes 2000.
906 * br i1 %cond, label %post, label %L2
907 * post:
908 * ret void
912 // Create a module with non-integral pointers in it's datalayout
913 Module NIM("nonintegral", Context);
914 std::string DataLayout = M.getDataLayoutStr();
915 if (!DataLayout.empty())
916 DataLayout += "-";
917 DataLayout += "ni:10";
918 NIM.setDataLayout(DataLayout);
920 Type *T_int64 = Type::getInt64Ty(Context);
921 Type *T_pint64 = T_int64->getPointerTo(10);
923 FunctionType *FTy =
924 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
925 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
927 Argument *Arg = &*F->arg_begin();
929 BasicBlock *Top = BasicBlock::Create(Context, "top", F);
930 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
931 BasicBlock *L = BasicBlock::Create(Context, "L", F);
932 BasicBlock *Post = BasicBlock::Create(Context, "post", F);
934 IRBuilder<> Builder(Top);
935 Builder.CreateBr(LPh);
937 Builder.SetInsertPoint(LPh);
938 auto *Load = cast<Instruction>(Builder.CreateLoad(T_int64, Arg, "load"));
939 Builder.CreateBr(L);
941 Builder.SetInsertPoint(L);
942 PHINode *Phi = Builder.CreatePHI(T_int64, 2);
943 auto *Add = cast<Instruction>(
944 Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
945 auto *Cond = cast<Instruction>(
946 Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Load, "cond"));
947 auto *Br = cast<Instruction>(Builder.CreateCondBr(Cond, L, Post));
948 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
949 Phi->addIncoming(Add, L);
951 Builder.SetInsertPoint(Post);
952 Builder.CreateRetVoid();
954 ScalarEvolution SE = buildSE(*F);
955 auto *Loop = LI->getLoopFor(L);
956 const SCEV *EC = SE.getBackedgeTakenCount(Loop);
957 EXPECT_FALSE(isa<SCEVCouldNotCompute>(EC));
958 EXPECT_FALSE(isa<SCEVConstant>(EC));
960 SE.forgetValue(Load);
961 Br->eraseFromParent();
962 Cond->eraseFromParent();
963 Load->eraseFromParent();
965 Builder.SetInsertPoint(L);
966 auto *NewCond = Builder.CreateICmp(
967 ICmpInst::ICMP_SLT, Add, ConstantInt::get(T_int64, 2000), "new.cond");
968 Builder.CreateCondBr(NewCond, L, Post);
969 const SCEV *NewEC = SE.getBackedgeTakenCount(Loop);
970 EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewEC));
971 EXPECT_TRUE(isa<SCEVConstant>(NewEC));
972 EXPECT_EQ(cast<SCEVConstant>(NewEC)->getAPInt().getLimitedValue(), 1999u);
975 TEST_F(ScalarEvolutionsTest, SCEVAddRecFromPHIwithLargeConstants) {
976 // Reference: https://reviews.llvm.org/D37265
977 // Make sure that SCEV does not blow up when constructing an AddRec
978 // with predicates for a phi with the update pattern:
979 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
980 // when either the initial value of the Phi or the InvariantAccum are
981 // constants that are too large to fit in an ix but are zero when truncated to
982 // ix.
983 FunctionType *FTy =
984 FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
985 Function *F =
986 Function::Create(FTy, Function::ExternalLinkage, "addrecphitest", M);
989 Create IR:
990 entry:
991 br label %loop
992 loop:
993 %0 = phi i64 [-9223372036854775808, %entry], [%3, %loop]
994 %1 = shl i64 %0, 32
995 %2 = ashr exact i64 %1, 32
996 %3 = add i64 %2, -9223372036854775808
997 br i1 undef, label %exit, label %loop
998 exit:
999 ret void
1001 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
1002 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
1003 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
1005 // entry:
1006 BranchInst::Create(LoopBB, EntryBB);
1007 // loop:
1008 auto *MinInt64 =
1009 ConstantInt::get(Context, APInt(64, 0x8000000000000000U, true));
1010 auto *Int64_32 = ConstantInt::get(Context, APInt(64, 32));
1011 auto *Br = BranchInst::Create(
1012 LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
1013 auto *Phi = PHINode::Create(Type::getInt64Ty(Context), 2, "", Br);
1014 auto *Shl = BinaryOperator::CreateShl(Phi, Int64_32, "", Br);
1015 auto *AShr = BinaryOperator::CreateExactAShr(Shl, Int64_32, "", Br);
1016 auto *Add = BinaryOperator::CreateAdd(AShr, MinInt64, "", Br);
1017 Phi->addIncoming(MinInt64, EntryBB);
1018 Phi->addIncoming(Add, LoopBB);
1019 // exit:
1020 ReturnInst::Create(Context, nullptr, ExitBB);
1022 // Make sure that SCEV doesn't blow up
1023 ScalarEvolution SE = buildSE(*F);
1024 SCEVUnionPredicate Preds;
1025 const SCEV *Expr = SE.getSCEV(Phi);
1026 EXPECT_NE(nullptr, Expr);
1027 EXPECT_TRUE(isa<SCEVUnknown>(Expr));
1028 auto Result = SE.createAddRecFromPHIWithCasts(cast<SCEVUnknown>(Expr));
1031 TEST_F(ScalarEvolutionsTest, SCEVAddRecFromPHIwithLargeConstantAccum) {
1032 // Make sure that SCEV does not blow up when constructing an AddRec
1033 // with predicates for a phi with the update pattern:
1034 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
1035 // when the InvariantAccum is a constant that is too large to fit in an
1036 // ix but are zero when truncated to ix, and the initial value of the
1037 // phi is not a constant.
1038 Type *Int32Ty = Type::getInt32Ty(Context);
1039 SmallVector<Type *, 1> Types;
1040 Types.push_back(Int32Ty);
1041 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), Types, false);
1042 Function *F =
1043 Function::Create(FTy, Function::ExternalLinkage, "addrecphitest", M);
1046 Create IR:
1047 define @addrecphitest(i32)
1048 entry:
1049 br label %loop
1050 loop:
1051 %1 = phi i32 [%0, %entry], [%4, %loop]
1052 %2 = shl i32 %1, 16
1053 %3 = ashr exact i32 %2, 16
1054 %4 = add i32 %3, -2147483648
1055 br i1 undef, label %exit, label %loop
1056 exit:
1057 ret void
1059 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
1060 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
1061 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
1063 // entry:
1064 BranchInst::Create(LoopBB, EntryBB);
1065 // loop:
1066 auto *MinInt32 = ConstantInt::get(Context, APInt(32, 0x80000000U, true));
1067 auto *Int32_16 = ConstantInt::get(Context, APInt(32, 16));
1068 auto *Br = BranchInst::Create(
1069 LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
1070 auto *Phi = PHINode::Create(Int32Ty, 2, "", Br);
1071 auto *Shl = BinaryOperator::CreateShl(Phi, Int32_16, "", Br);
1072 auto *AShr = BinaryOperator::CreateExactAShr(Shl, Int32_16, "", Br);
1073 auto *Add = BinaryOperator::CreateAdd(AShr, MinInt32, "", Br);
1074 auto *Arg = &*(F->arg_begin());
1075 Phi->addIncoming(Arg, EntryBB);
1076 Phi->addIncoming(Add, LoopBB);
1077 // exit:
1078 ReturnInst::Create(Context, nullptr, ExitBB);
1080 // Make sure that SCEV doesn't blow up
1081 ScalarEvolution SE = buildSE(*F);
1082 SCEVUnionPredicate Preds;
1083 const SCEV *Expr = SE.getSCEV(Phi);
1084 EXPECT_NE(nullptr, Expr);
1085 EXPECT_TRUE(isa<SCEVUnknown>(Expr));
1086 auto Result = SE.createAddRecFromPHIWithCasts(cast<SCEVUnknown>(Expr));
1089 TEST_F(ScalarEvolutionsTest, SCEVFoldSumOfTruncs) {
1090 // Verify that the following SCEV gets folded to a zero:
1091 // (-1 * (trunc i64 (-1 * %0) to i32)) + (-1 * (trunc i64 %0 to i32)
1092 Type *ArgTy = Type::getInt64Ty(Context);
1093 Type *Int32Ty = Type::getInt32Ty(Context);
1094 SmallVector<Type *, 1> Types;
1095 Types.push_back(ArgTy);
1096 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), Types, false);
1097 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
1098 BasicBlock *BB = BasicBlock::Create(Context, "entry", F);
1099 ReturnInst::Create(Context, nullptr, BB);
1101 ScalarEvolution SE = buildSE(*F);
1103 auto *Arg = &*(F->arg_begin());
1104 const auto *ArgSCEV = SE.getSCEV(Arg);
1106 // Build the SCEV
1107 const auto *A0 = SE.getNegativeSCEV(ArgSCEV);
1108 const auto *A1 = SE.getTruncateExpr(A0, Int32Ty);
1109 const auto *A = SE.getNegativeSCEV(A1);
1111 const auto *B0 = SE.getTruncateExpr(ArgSCEV, Int32Ty);
1112 const auto *B = SE.getNegativeSCEV(B0);
1114 const auto *Expr = SE.getAddExpr(A, B);
1115 // Verify that the SCEV was folded to 0
1116 const auto *ZeroConst = SE.getConstant(Int32Ty, 0);
1117 EXPECT_EQ(Expr, ZeroConst);
1120 // Check that we can correctly identify the points at which the SCEV of the
1121 // AddRec can be expanded.
1122 TEST_F(ScalarEvolutionsTest, SCEVExpanderIsSafeToExpandAt) {
1124 * Create the following code:
1125 * func(i64 addrspace(10)* %arg)
1126 * top:
1127 * br label %L.ph
1128 * L.ph:
1129 * br label %L
1130 * L:
1131 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
1132 * %add = add i64 %phi2, 1
1133 * %cond = icmp slt i64 %add, 1000; then becomes 2000.
1134 * br i1 %cond, label %post, label %L2
1135 * post:
1136 * ret void
1140 // Create a module with non-integral pointers in it's datalayout
1141 Module NIM("nonintegral", Context);
1142 std::string DataLayout = M.getDataLayoutStr();
1143 if (!DataLayout.empty())
1144 DataLayout += "-";
1145 DataLayout += "ni:10";
1146 NIM.setDataLayout(DataLayout);
1148 Type *T_int64 = Type::getInt64Ty(Context);
1149 Type *T_pint64 = T_int64->getPointerTo(10);
1151 FunctionType *FTy =
1152 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
1153 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
1155 BasicBlock *Top = BasicBlock::Create(Context, "top", F);
1156 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
1157 BasicBlock *L = BasicBlock::Create(Context, "L", F);
1158 BasicBlock *Post = BasicBlock::Create(Context, "post", F);
1160 IRBuilder<> Builder(Top);
1161 Builder.CreateBr(LPh);
1163 Builder.SetInsertPoint(LPh);
1164 Builder.CreateBr(L);
1166 Builder.SetInsertPoint(L);
1167 PHINode *Phi = Builder.CreatePHI(T_int64, 2);
1168 auto *Add = cast<Instruction>(
1169 Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
1170 auto *Limit = ConstantInt::get(T_int64, 1000);
1171 auto *Cond = cast<Instruction>(
1172 Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond"));
1173 Builder.CreateCondBr(Cond, L, Post);
1174 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
1175 Phi->addIncoming(Add, L);
1177 Builder.SetInsertPoint(Post);
1178 Builder.CreateRetVoid();
1180 ScalarEvolution SE = buildSE(*F);
1181 const SCEV *S = SE.getSCEV(Phi);
1182 EXPECT_TRUE(isa<SCEVAddRecExpr>(S));
1183 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
1184 EXPECT_TRUE(AR->isAffine());
1185 EXPECT_FALSE(isSafeToExpandAt(AR, Top->getTerminator(), SE));
1186 EXPECT_FALSE(isSafeToExpandAt(AR, LPh->getTerminator(), SE));
1187 EXPECT_TRUE(isSafeToExpandAt(AR, L->getTerminator(), SE));
1188 EXPECT_TRUE(isSafeToExpandAt(AR, Post->getTerminator(), SE));
1191 // Check that SCEV expander does not use the nuw instruction
1192 // for expansion.
1193 TEST_F(ScalarEvolutionsTest, SCEVExpanderNUW) {
1195 * Create the following code:
1196 * func(i64 %a)
1197 * entry:
1198 * br false, label %exit, label %body
1199 * body:
1200 * %s1 = add i64 %a, -1
1201 * br label %exit
1202 * exit:
1203 * %s = add nuw i64 %a, -1
1204 * ret %s
1207 // Create a module.
1208 Module M("SCEVExpanderNUW", Context);
1210 Type *T_int64 = Type::getInt64Ty(Context);
1212 FunctionType *FTy =
1213 FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
1214 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
1215 Argument *Arg = &*F->arg_begin();
1216 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
1218 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
1219 BasicBlock *Body = BasicBlock::Create(Context, "body", F);
1220 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
1222 IRBuilder<> Builder(Entry);
1223 ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
1224 Builder.CreateCondBr(Cond, Exit, Body);
1226 Builder.SetInsertPoint(Body);
1227 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1228 Builder.CreateBr(Exit);
1230 Builder.SetInsertPoint(Exit);
1231 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1232 S2->setHasNoUnsignedWrap(true);
1233 auto *R = cast<Instruction>(Builder.CreateRetVoid());
1235 ScalarEvolution SE = buildSE(*F);
1236 const SCEV *S = SE.getSCEV(S1);
1237 EXPECT_TRUE(isa<SCEVAddExpr>(S));
1238 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
1239 auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
1240 EXPECT_FALSE(I->hasNoUnsignedWrap());
1243 // Check that SCEV expander does not use the nsw instruction
1244 // for expansion.
1245 TEST_F(ScalarEvolutionsTest, SCEVExpanderNSW) {
1247 * Create the following code:
1248 * func(i64 %a)
1249 * entry:
1250 * br false, label %exit, label %body
1251 * body:
1252 * %s1 = add i64 %a, -1
1253 * br label %exit
1254 * exit:
1255 * %s = add nsw i64 %a, -1
1256 * ret %s
1259 // Create a module.
1260 Module M("SCEVExpanderNSW", Context);
1262 Type *T_int64 = Type::getInt64Ty(Context);
1264 FunctionType *FTy =
1265 FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
1266 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
1267 Argument *Arg = &*F->arg_begin();
1268 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
1270 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
1271 BasicBlock *Body = BasicBlock::Create(Context, "body", F);
1272 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
1274 IRBuilder<> Builder(Entry);
1275 ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
1276 Builder.CreateCondBr(Cond, Exit, Body);
1278 Builder.SetInsertPoint(Body);
1279 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1280 Builder.CreateBr(Exit);
1282 Builder.SetInsertPoint(Exit);
1283 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1284 S2->setHasNoSignedWrap(true);
1285 auto *R = cast<Instruction>(Builder.CreateRetVoid());
1287 ScalarEvolution SE = buildSE(*F);
1288 const SCEV *S = SE.getSCEV(S1);
1289 EXPECT_TRUE(isa<SCEVAddExpr>(S));
1290 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
1291 auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
1292 EXPECT_FALSE(I->hasNoSignedWrap());
1295 // Check that SCEV does not save the SCEV -> V
1296 // mapping of SCEV differ from V in NUW flag.
1297 TEST_F(ScalarEvolutionsTest, SCEVCacheNUW) {
1299 * Create the following code:
1300 * func(i64 %a)
1301 * entry:
1302 * %s1 = add i64 %a, -1
1303 * %s2 = add nuw i64 %a, -1
1304 * br label %exit
1305 * exit:
1306 * ret %s
1309 // Create a module.
1310 Module M("SCEVCacheNUW", Context);
1312 Type *T_int64 = Type::getInt64Ty(Context);
1314 FunctionType *FTy =
1315 FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
1316 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
1317 Argument *Arg = &*F->arg_begin();
1318 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
1320 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
1321 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
1323 IRBuilder<> Builder(Entry);
1324 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1325 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1326 S2->setHasNoUnsignedWrap(true);
1327 Builder.CreateBr(Exit);
1329 Builder.SetInsertPoint(Exit);
1330 auto *R = cast<Instruction>(Builder.CreateRetVoid());
1332 ScalarEvolution SE = buildSE(*F);
1333 // Get S2 first to move it to cache.
1334 const SCEV *SC2 = SE.getSCEV(S2);
1335 EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
1336 // Now get S1.
1337 const SCEV *SC1 = SE.getSCEV(S1);
1338 EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
1339 // Expand for S1, it should use S1 not S2 in spite S2
1340 // first in the cache.
1341 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
1342 auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
1343 EXPECT_FALSE(I->hasNoUnsignedWrap());
1346 // Check that SCEV does not save the SCEV -> V
1347 // mapping of SCEV differ from V in NSW flag.
1348 TEST_F(ScalarEvolutionsTest, SCEVCacheNSW) {
1350 * Create the following code:
1351 * func(i64 %a)
1352 * entry:
1353 * %s1 = add i64 %a, -1
1354 * %s2 = add nsw i64 %a, -1
1355 * br label %exit
1356 * exit:
1357 * ret %s
1360 // Create a module.
1361 Module M("SCEVCacheNUW", Context);
1363 Type *T_int64 = Type::getInt64Ty(Context);
1365 FunctionType *FTy =
1366 FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
1367 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
1368 Argument *Arg = &*F->arg_begin();
1369 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
1371 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
1372 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
1374 IRBuilder<> Builder(Entry);
1375 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1376 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1377 S2->setHasNoSignedWrap(true);
1378 Builder.CreateBr(Exit);
1380 Builder.SetInsertPoint(Exit);
1381 auto *R = cast<Instruction>(Builder.CreateRetVoid());
1383 ScalarEvolution SE = buildSE(*F);
1384 // Get S2 first to move it to cache.
1385 const SCEV *SC2 = SE.getSCEV(S2);
1386 EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
1387 // Now get S1.
1388 const SCEV *SC1 = SE.getSCEV(S1);
1389 EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
1390 // Expand for S1, it should use S1 not S2 in spite S2
1391 // first in the cache.
1392 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
1393 auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
1394 EXPECT_FALSE(I->hasNoSignedWrap());
1397 // Check logic of SCEV expression size computation.
1398 TEST_F(ScalarEvolutionsTest, SCEVComputeExpressionSize) {
1400 * Create the following code:
1401 * void func(i64 %a, i64 %b)
1402 * entry:
1403 * %s1 = add i64 %a, 1
1404 * %s2 = udiv i64 %s1, %b
1405 * br label %exit
1406 * exit:
1407 * ret
1410 // Create a module.
1411 Module M("SCEVComputeExpressionSize", Context);
1413 Type *T_int64 = Type::getInt64Ty(Context);
1415 FunctionType *FTy =
1416 FunctionType::get(Type::getVoidTy(Context), { T_int64, T_int64 }, false);
1417 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
1418 Argument *A = &*F->arg_begin();
1419 Argument *B = &*std::next(F->arg_begin());
1420 ConstantInt *C = ConstantInt::get(Context, APInt(64, 1));
1422 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
1423 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
1425 IRBuilder<> Builder(Entry);
1426 auto *S1 = cast<Instruction>(Builder.CreateAdd(A, C, "s1"));
1427 auto *S2 = cast<Instruction>(Builder.CreateUDiv(S1, B, "s2"));
1428 Builder.CreateBr(Exit);
1430 Builder.SetInsertPoint(Exit);
1431 Builder.CreateRetVoid();
1433 ScalarEvolution SE = buildSE(*F);
1434 // Get S2 first to move it to cache.
1435 const SCEV *AS = SE.getSCEV(A);
1436 const SCEV *BS = SE.getSCEV(B);
1437 const SCEV *CS = SE.getSCEV(C);
1438 const SCEV *S1S = SE.getSCEV(S1);
1439 const SCEV *S2S = SE.getSCEV(S2);
1440 EXPECT_EQ(AS->getExpressionSize(), 1u);
1441 EXPECT_EQ(BS->getExpressionSize(), 1u);
1442 EXPECT_EQ(CS->getExpressionSize(), 1u);
1443 EXPECT_EQ(S1S->getExpressionSize(), 3u);
1444 EXPECT_EQ(S2S->getExpressionSize(), 5u);
1447 TEST_F(ScalarEvolutionsTest, SCEVExpandInsertCanonicalIV) {
1448 LLVMContext C;
1449 SMDiagnostic Err;
1451 // Expand the addrec produced by GetAddRec into a loop without a canonical IV.
1452 // SCEVExpander will insert one.
1453 auto TestNoCanonicalIV = [&](
1454 std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec) {
1455 std::unique_ptr<Module> M =
1456 parseAssemblyString("define i32 @test(i32 %limit) { "
1457 "entry: "
1458 " br label %loop "
1459 "loop: "
1460 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
1461 " %i.inc = add nsw i32 %i, 1 "
1462 " %cont = icmp slt i32 %i.inc, %limit "
1463 " br i1 %cont, label %loop, label %exit "
1464 "exit: "
1465 " ret i32 %i.inc "
1466 "}",
1467 Err, C);
1469 assert(M && "Could not parse module?");
1470 assert(!verifyModule(*M) && "Must have been well formed!");
1472 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
1473 auto &I = GetInstByName(F, "i");
1474 auto *Loop = LI.getLoopFor(I.getParent());
1475 EXPECT_FALSE(Loop->getCanonicalInductionVariable());
1477 auto *AR = GetAddRec(SE, Loop);
1478 unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
1480 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
1481 auto *InsertAt = I.getNextNode();
1482 Exp.expandCodeFor(AR, nullptr, InsertAt);
1483 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
1484 unsigned CanonicalIVBitWidth =
1485 cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
1486 EXPECT_EQ(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
1490 // Expand the addrec produced by GetAddRec into a loop with a canonical IV
1491 // which is narrower than addrec type.
1492 // SCEVExpander will insert a canonical IV of a wider type to expand the
1493 // addrec.
1494 auto TestNarrowCanonicalIV = [&](
1495 std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec) {
1496 std::unique_ptr<Module> M = parseAssemblyString(
1497 "define i32 @test(i32 %limit) { "
1498 "entry: "
1499 " br label %loop "
1500 "loop: "
1501 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
1502 " %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
1503 " %i.inc = add nsw i32 %i, 1 "
1504 " %canonical.iv.inc = add i8 %canonical.iv, 1 "
1505 " %cont = icmp slt i32 %i.inc, %limit "
1506 " br i1 %cont, label %loop, label %exit "
1507 "exit: "
1508 " ret i32 %i.inc "
1509 "}",
1510 Err, C);
1512 assert(M && "Could not parse module?");
1513 assert(!verifyModule(*M) && "Must have been well formed!");
1515 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
1516 auto &I = GetInstByName(F, "i");
1518 auto *LoopHeaderBB = I.getParent();
1519 auto *Loop = LI.getLoopFor(LoopHeaderBB);
1520 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
1521 EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv"));
1523 auto *AR = GetAddRec(SE, Loop);
1525 unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
1526 unsigned CanonicalIVBitWidth =
1527 cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
1528 EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
1530 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
1531 auto *InsertAt = I.getNextNode();
1532 Exp.expandCodeFor(AR, nullptr, InsertAt);
1534 // Loop over all of the PHI nodes, looking for the new canonical indvar.
1535 PHINode *NewCanonicalIV = nullptr;
1536 for (BasicBlock::iterator i = LoopHeaderBB->begin(); isa<PHINode>(i);
1537 ++i) {
1538 PHINode *PN = cast<PHINode>(i);
1539 if (PN == &I || PN == CanonicalIV)
1540 continue;
1541 // We expect that the only PHI added is the new canonical IV
1542 EXPECT_FALSE(NewCanonicalIV);
1543 NewCanonicalIV = PN;
1546 // Check that NewCanonicalIV is a canonical IV, i.e {0,+,1}
1547 BasicBlock *Incoming = nullptr, *Backedge = nullptr;
1548 EXPECT_TRUE(Loop->getIncomingAndBackEdge(Incoming, Backedge));
1549 auto *Start = NewCanonicalIV->getIncomingValueForBlock(Incoming);
1550 EXPECT_TRUE(isa<ConstantInt>(Start));
1551 EXPECT_TRUE(dyn_cast<ConstantInt>(Start)->isZero());
1552 auto *Next = NewCanonicalIV->getIncomingValueForBlock(Backedge);
1553 EXPECT_TRUE(isa<BinaryOperator>(Next));
1554 auto *NextBinOp = dyn_cast<BinaryOperator>(Next);
1555 EXPECT_EQ(NextBinOp->getOpcode(), Instruction::Add);
1556 EXPECT_EQ(NextBinOp->getOperand(0), NewCanonicalIV);
1557 auto *Step = NextBinOp->getOperand(1);
1558 EXPECT_TRUE(isa<ConstantInt>(Step));
1559 EXPECT_TRUE(dyn_cast<ConstantInt>(Step)->isOne());
1561 unsigned NewCanonicalIVBitWidth =
1562 cast<IntegerType>(NewCanonicalIV->getType())->getBitWidth();
1563 EXPECT_EQ(NewCanonicalIVBitWidth, ExpectedCanonicalIVWidth);
1567 // Expand the addrec produced by GetAddRec into a loop with a canonical IV
1568 // of addrec width.
1569 // To expand the addrec SCEVExpander should use the existing canonical IV.
1570 auto TestMatchingCanonicalIV = [&](
1571 std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec,
1572 unsigned ARBitWidth) {
1573 auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth);
1574 std::unique_ptr<Module> M = parseAssemblyString(
1575 "define i32 @test(i32 %limit) { "
1576 "entry: "
1577 " br label %loop "
1578 "loop: "
1579 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
1580 " %canonical.iv = phi " + ARBitWidthTypeStr +
1581 " [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
1582 " %i.inc = add nsw i32 %i, 1 "
1583 " %canonical.iv.inc = add " + ARBitWidthTypeStr +
1584 " %canonical.iv, 1 "
1585 " %cont = icmp slt i32 %i.inc, %limit "
1586 " br i1 %cont, label %loop, label %exit "
1587 "exit: "
1588 " ret i32 %i.inc "
1589 "}",
1590 Err, C);
1592 assert(M && "Could not parse module?");
1593 assert(!verifyModule(*M) && "Must have been well formed!");
1595 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
1596 auto &I = GetInstByName(F, "i");
1597 auto &CanonicalIV = GetInstByName(F, "canonical.iv");
1599 auto *LoopHeaderBB = I.getParent();
1600 auto *Loop = LI.getLoopFor(LoopHeaderBB);
1601 EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable());
1602 unsigned CanonicalIVBitWidth =
1603 cast<IntegerType>(CanonicalIV.getType())->getBitWidth();
1605 auto *AR = GetAddRec(SE, Loop);
1606 EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType()));
1607 EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth);
1609 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
1610 auto *InsertAt = I.getNextNode();
1611 Exp.expandCodeFor(AR, nullptr, InsertAt);
1613 // Loop over all of the PHI nodes, looking if a new canonical indvar was
1614 // introduced.
1615 PHINode *NewCanonicalIV = nullptr;
1616 for (BasicBlock::iterator i = LoopHeaderBB->begin(); isa<PHINode>(i);
1617 ++i) {
1618 PHINode *PN = cast<PHINode>(i);
1619 if (PN == &I || PN == &CanonicalIV)
1620 continue;
1621 NewCanonicalIV = PN;
1623 EXPECT_FALSE(NewCanonicalIV);
1627 unsigned ARBitWidth = 16;
1628 Type *ARType = IntegerType::get(C, ARBitWidth);
1630 // Expand {5,+,1}
1631 auto GetAR2 = [&](ScalarEvolution &SE, Loop *L) -> const SCEV * {
1632 return SE.getAddRecExpr(SE.getConstant(APInt(ARBitWidth, 5)),
1633 SE.getOne(ARType), L, SCEV::FlagAnyWrap);
1635 TestNoCanonicalIV(GetAR2);
1636 TestNarrowCanonicalIV(GetAR2);
1637 TestMatchingCanonicalIV(GetAR2, ARBitWidth);
1640 TEST_F(ScalarEvolutionsTest, SCEVExpanderShlNSW) {
1642 auto checkOneCase = [this](std::string &&str) {
1643 LLVMContext C;
1644 SMDiagnostic Err;
1645 std::unique_ptr<Module> M = parseAssemblyString(str, Err, C);
1647 assert(M && "Could not parse module?");
1648 assert(!verifyModule(*M) && "Must have been well formed!");
1650 Function *F = M->getFunction("f");
1651 ASSERT_NE(F, nullptr) << "Could not find function 'f'";
1653 BasicBlock &Entry = F->getEntryBlock();
1654 LoadInst *Load = cast<LoadInst>(&Entry.front());
1655 BinaryOperator *And = cast<BinaryOperator>(*Load->user_begin());
1657 ScalarEvolution SE = buildSE(*F);
1658 const SCEV *AndSCEV = SE.getSCEV(And);
1659 EXPECT_TRUE(isa<SCEVMulExpr>(AndSCEV));
1660 EXPECT_TRUE(cast<SCEVMulExpr>(AndSCEV)->hasNoSignedWrap());
1662 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
1663 auto *I = cast<Instruction>(Exp.expandCodeFor(AndSCEV, nullptr, And));
1664 EXPECT_EQ(I->getOpcode(), Instruction::Shl);
1665 EXPECT_FALSE(I->hasNoSignedWrap());
1668 checkOneCase("define void @f(i16* %arrayidx) { "
1669 " %1 = load i16, i16* %arrayidx "
1670 " %2 = and i16 %1, -32768 "
1671 " ret void "
1672 "} ");
1674 checkOneCase("define void @f(i8* %arrayidx) { "
1675 " %1 = load i8, i8* %arrayidx "
1676 " %2 = and i8 %1, -128 "
1677 " ret void "
1678 "} ");
1681 } // end anonymous namespace
1682 } // end namespace llvm