Use BranchProbability instead of floating points in IfConverter.
[llvm/stm8.git] / lib / Analysis / ScalarEvolutionExpander.cpp
blob9f6063e56ac7f071962a5073e30e447ba2013688
1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the scalar evolution expander,
11 // which is used to generate the code corresponding to a given scalar evolution
12 // expression.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/ScalarEvolutionExpander.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/IntrinsicInst.h"
19 #include "llvm/LLVMContext.h"
20 #include "llvm/Target/TargetData.h"
21 #include "llvm/ADT/STLExtras.h"
22 using namespace llvm;
24 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
25 /// reusing an existing cast if a suitable one exists, moving an existing
26 /// cast if a suitable one exists but isn't in the right place, or
27 /// creating a new one.
28 Value *SCEVExpander::ReuseOrCreateCast(Value *V, const Type *Ty,
29 Instruction::CastOps Op,
30 BasicBlock::iterator IP) {
31 // Check to see if there is already a cast!
32 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
33 UI != E; ++UI) {
34 User *U = *UI;
35 if (U->getType() == Ty)
36 if (CastInst *CI = dyn_cast<CastInst>(U))
37 if (CI->getOpcode() == Op) {
38 // If the cast isn't where we want it, fix it.
39 if (BasicBlock::iterator(CI) != IP) {
40 // Create a new cast, and leave the old cast in place in case
41 // it is being used as an insert point. Clear its operand
42 // so that it doesn't hold anything live.
43 Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP);
44 NewCI->takeName(CI);
45 CI->replaceAllUsesWith(NewCI);
46 CI->setOperand(0, UndefValue::get(V->getType()));
47 rememberInstruction(NewCI);
48 return NewCI;
50 rememberInstruction(CI);
51 return CI;
55 // Create a new cast.
56 Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP);
57 rememberInstruction(I);
58 return I;
61 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
62 /// which must be possible with a noop cast, doing what we can to share
63 /// the casts.
64 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, const Type *Ty) {
65 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
66 assert((Op == Instruction::BitCast ||
67 Op == Instruction::PtrToInt ||
68 Op == Instruction::IntToPtr) &&
69 "InsertNoopCastOfTo cannot perform non-noop casts!");
70 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
71 "InsertNoopCastOfTo cannot change sizes!");
73 // Short-circuit unnecessary bitcasts.
74 if (Op == Instruction::BitCast && V->getType() == Ty)
75 return V;
77 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
78 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
79 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
80 if (CastInst *CI = dyn_cast<CastInst>(V))
81 if ((CI->getOpcode() == Instruction::PtrToInt ||
82 CI->getOpcode() == Instruction::IntToPtr) &&
83 SE.getTypeSizeInBits(CI->getType()) ==
84 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
85 return CI->getOperand(0);
86 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
87 if ((CE->getOpcode() == Instruction::PtrToInt ||
88 CE->getOpcode() == Instruction::IntToPtr) &&
89 SE.getTypeSizeInBits(CE->getType()) ==
90 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
91 return CE->getOperand(0);
94 // Fold a cast of a constant.
95 if (Constant *C = dyn_cast<Constant>(V))
96 return ConstantExpr::getCast(Op, C, Ty);
98 // Cast the argument at the beginning of the entry block, after
99 // any bitcasts of other arguments.
100 if (Argument *A = dyn_cast<Argument>(V)) {
101 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
102 while ((isa<BitCastInst>(IP) &&
103 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
104 cast<BitCastInst>(IP)->getOperand(0) != A) ||
105 isa<DbgInfoIntrinsic>(IP))
106 ++IP;
107 return ReuseOrCreateCast(A, Ty, Op, IP);
110 // Cast the instruction immediately after the instruction.
111 Instruction *I = cast<Instruction>(V);
112 BasicBlock::iterator IP = I; ++IP;
113 if (InvokeInst *II = dyn_cast<InvokeInst>(I))
114 IP = II->getNormalDest()->begin();
115 while (isa<PHINode>(IP) || isa<DbgInfoIntrinsic>(IP)) ++IP;
116 return ReuseOrCreateCast(I, Ty, Op, IP);
119 /// InsertBinop - Insert the specified binary operator, doing a small amount
120 /// of work to avoid inserting an obviously redundant operation.
121 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
122 Value *LHS, Value *RHS) {
123 // Fold a binop with constant operands.
124 if (Constant *CLHS = dyn_cast<Constant>(LHS))
125 if (Constant *CRHS = dyn_cast<Constant>(RHS))
126 return ConstantExpr::get(Opcode, CLHS, CRHS);
128 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
129 unsigned ScanLimit = 6;
130 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
131 // Scanning starts from the last instruction before the insertion point.
132 BasicBlock::iterator IP = Builder.GetInsertPoint();
133 if (IP != BlockBegin) {
134 --IP;
135 for (; ScanLimit; --IP, --ScanLimit) {
136 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
137 // generated code.
138 if (isa<DbgInfoIntrinsic>(IP))
139 ScanLimit++;
140 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
141 IP->getOperand(1) == RHS)
142 return IP;
143 if (IP == BlockBegin) break;
147 // Save the original insertion point so we can restore it when we're done.
148 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
149 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
151 // Move the insertion point out of as many loops as we can.
152 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
153 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
154 BasicBlock *Preheader = L->getLoopPreheader();
155 if (!Preheader) break;
157 // Ok, move up a level.
158 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
161 // If we haven't found this binop, insert it.
162 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS, "tmp"));
163 BO->setDebugLoc(SaveInsertPt->getDebugLoc());
164 rememberInstruction(BO);
166 // Restore the original insert point.
167 if (SaveInsertBB)
168 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
170 return BO;
173 /// FactorOutConstant - Test if S is divisible by Factor, using signed
174 /// division. If so, update S with Factor divided out and return true.
175 /// S need not be evenly divisible if a reasonable remainder can be
176 /// computed.
177 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
178 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
179 /// check to see if the divide was folded.
180 static bool FactorOutConstant(const SCEV *&S,
181 const SCEV *&Remainder,
182 const SCEV *Factor,
183 ScalarEvolution &SE,
184 const TargetData *TD) {
185 // Everything is divisible by one.
186 if (Factor->isOne())
187 return true;
189 // x/x == 1.
190 if (S == Factor) {
191 S = SE.getConstant(S->getType(), 1);
192 return true;
195 // For a Constant, check for a multiple of the given factor.
196 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
197 // 0/x == 0.
198 if (C->isZero())
199 return true;
200 // Check for divisibility.
201 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
202 ConstantInt *CI =
203 ConstantInt::get(SE.getContext(),
204 C->getValue()->getValue().sdiv(
205 FC->getValue()->getValue()));
206 // If the quotient is zero and the remainder is non-zero, reject
207 // the value at this scale. It will be considered for subsequent
208 // smaller scales.
209 if (!CI->isZero()) {
210 const SCEV *Div = SE.getConstant(CI);
211 S = Div;
212 Remainder =
213 SE.getAddExpr(Remainder,
214 SE.getConstant(C->getValue()->getValue().srem(
215 FC->getValue()->getValue())));
216 return true;
221 // In a Mul, check if there is a constant operand which is a multiple
222 // of the given factor.
223 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
224 if (TD) {
225 // With TargetData, the size is known. Check if there is a constant
226 // operand which is a multiple of the given factor. If so, we can
227 // factor it.
228 const SCEVConstant *FC = cast<SCEVConstant>(Factor);
229 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
230 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
231 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
232 NewMulOps[0] =
233 SE.getConstant(C->getValue()->getValue().sdiv(
234 FC->getValue()->getValue()));
235 S = SE.getMulExpr(NewMulOps);
236 return true;
238 } else {
239 // Without TargetData, check if Factor can be factored out of any of the
240 // Mul's operands. If so, we can just remove it.
241 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
242 const SCEV *SOp = M->getOperand(i);
243 const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
244 if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
245 Remainder->isZero()) {
246 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
247 NewMulOps[i] = SOp;
248 S = SE.getMulExpr(NewMulOps);
249 return true;
255 // In an AddRec, check if both start and step are divisible.
256 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
257 const SCEV *Step = A->getStepRecurrence(SE);
258 const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
259 if (!FactorOutConstant(Step, StepRem, Factor, SE, TD))
260 return false;
261 if (!StepRem->isZero())
262 return false;
263 const SCEV *Start = A->getStart();
264 if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
265 return false;
266 // FIXME: can use A->getNoWrapFlags(FlagNW)
267 S = SE.getAddRecExpr(Start, Step, A->getLoop(), SCEV::FlagAnyWrap);
268 return true;
271 return false;
274 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
275 /// is the number of SCEVAddRecExprs present, which are kept at the end of
276 /// the list.
278 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
279 const Type *Ty,
280 ScalarEvolution &SE) {
281 unsigned NumAddRecs = 0;
282 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
283 ++NumAddRecs;
284 // Group Ops into non-addrecs and addrecs.
285 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
286 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
287 // Let ScalarEvolution sort and simplify the non-addrecs list.
288 const SCEV *Sum = NoAddRecs.empty() ?
289 SE.getConstant(Ty, 0) :
290 SE.getAddExpr(NoAddRecs);
291 // If it returned an add, use the operands. Otherwise it simplified
292 // the sum into a single value, so just use that.
293 Ops.clear();
294 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
295 Ops.append(Add->op_begin(), Add->op_end());
296 else if (!Sum->isZero())
297 Ops.push_back(Sum);
298 // Then append the addrecs.
299 Ops.append(AddRecs.begin(), AddRecs.end());
302 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
303 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
304 /// This helps expose more opportunities for folding parts of the expressions
305 /// into GEP indices.
307 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
308 const Type *Ty,
309 ScalarEvolution &SE) {
310 // Find the addrecs.
311 SmallVector<const SCEV *, 8> AddRecs;
312 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
313 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
314 const SCEV *Start = A->getStart();
315 if (Start->isZero()) break;
316 const SCEV *Zero = SE.getConstant(Ty, 0);
317 AddRecs.push_back(SE.getAddRecExpr(Zero,
318 A->getStepRecurrence(SE),
319 A->getLoop(),
320 // FIXME: A->getNoWrapFlags(FlagNW)
321 SCEV::FlagAnyWrap));
322 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
323 Ops[i] = Zero;
324 Ops.append(Add->op_begin(), Add->op_end());
325 e += Add->getNumOperands();
326 } else {
327 Ops[i] = Start;
330 if (!AddRecs.empty()) {
331 // Add the addrecs onto the end of the list.
332 Ops.append(AddRecs.begin(), AddRecs.end());
333 // Resort the operand list, moving any constants to the front.
334 SimplifyAddOperands(Ops, Ty, SE);
338 /// expandAddToGEP - Expand an addition expression with a pointer type into
339 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
340 /// BasicAliasAnalysis and other passes analyze the result. See the rules
341 /// for getelementptr vs. inttoptr in
342 /// http://llvm.org/docs/LangRef.html#pointeraliasing
343 /// for details.
345 /// Design note: The correctness of using getelementptr here depends on
346 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
347 /// they may introduce pointer arithmetic which may not be safely converted
348 /// into getelementptr.
350 /// Design note: It might seem desirable for this function to be more
351 /// loop-aware. If some of the indices are loop-invariant while others
352 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
353 /// loop-invariant portions of the overall computation outside the loop.
354 /// However, there are a few reasons this is not done here. Hoisting simple
355 /// arithmetic is a low-level optimization that often isn't very
356 /// important until late in the optimization process. In fact, passes
357 /// like InstructionCombining will combine GEPs, even if it means
358 /// pushing loop-invariant computation down into loops, so even if the
359 /// GEPs were split here, the work would quickly be undone. The
360 /// LoopStrengthReduction pass, which is usually run quite late (and
361 /// after the last InstructionCombining pass), takes care of hoisting
362 /// loop-invariant portions of expressions, after considering what
363 /// can be folded using target addressing modes.
365 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
366 const SCEV *const *op_end,
367 const PointerType *PTy,
368 const Type *Ty,
369 Value *V) {
370 const Type *ElTy = PTy->getElementType();
371 SmallVector<Value *, 4> GepIndices;
372 SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
373 bool AnyNonZeroIndices = false;
375 // Split AddRecs up into parts as either of the parts may be usable
376 // without the other.
377 SplitAddRecs(Ops, Ty, SE);
379 // Descend down the pointer's type and attempt to convert the other
380 // operands into GEP indices, at each level. The first index in a GEP
381 // indexes into the array implied by the pointer operand; the rest of
382 // the indices index into the element or field type selected by the
383 // preceding index.
384 for (;;) {
385 // If the scale size is not 0, attempt to factor out a scale for
386 // array indexing.
387 SmallVector<const SCEV *, 8> ScaledOps;
388 if (ElTy->isSized()) {
389 const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
390 if (!ElSize->isZero()) {
391 SmallVector<const SCEV *, 8> NewOps;
392 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
393 const SCEV *Op = Ops[i];
394 const SCEV *Remainder = SE.getConstant(Ty, 0);
395 if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
396 // Op now has ElSize factored out.
397 ScaledOps.push_back(Op);
398 if (!Remainder->isZero())
399 NewOps.push_back(Remainder);
400 AnyNonZeroIndices = true;
401 } else {
402 // The operand was not divisible, so add it to the list of operands
403 // we'll scan next iteration.
404 NewOps.push_back(Ops[i]);
407 // If we made any changes, update Ops.
408 if (!ScaledOps.empty()) {
409 Ops = NewOps;
410 SimplifyAddOperands(Ops, Ty, SE);
415 // Record the scaled array index for this level of the type. If
416 // we didn't find any operands that could be factored, tentatively
417 // assume that element zero was selected (since the zero offset
418 // would obviously be folded away).
419 Value *Scaled = ScaledOps.empty() ?
420 Constant::getNullValue(Ty) :
421 expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
422 GepIndices.push_back(Scaled);
424 // Collect struct field index operands.
425 while (const StructType *STy = dyn_cast<StructType>(ElTy)) {
426 bool FoundFieldNo = false;
427 // An empty struct has no fields.
428 if (STy->getNumElements() == 0) break;
429 if (SE.TD) {
430 // With TargetData, field offsets are known. See if a constant offset
431 // falls within any of the struct fields.
432 if (Ops.empty()) break;
433 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
434 if (SE.getTypeSizeInBits(C->getType()) <= 64) {
435 const StructLayout &SL = *SE.TD->getStructLayout(STy);
436 uint64_t FullOffset = C->getValue()->getZExtValue();
437 if (FullOffset < SL.getSizeInBytes()) {
438 unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
439 GepIndices.push_back(
440 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
441 ElTy = STy->getTypeAtIndex(ElIdx);
442 Ops[0] =
443 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
444 AnyNonZeroIndices = true;
445 FoundFieldNo = true;
448 } else {
449 // Without TargetData, just check for an offsetof expression of the
450 // appropriate struct type.
451 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
452 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
453 const Type *CTy;
454 Constant *FieldNo;
455 if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
456 GepIndices.push_back(FieldNo);
457 ElTy =
458 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue());
459 Ops[i] = SE.getConstant(Ty, 0);
460 AnyNonZeroIndices = true;
461 FoundFieldNo = true;
462 break;
466 // If no struct field offsets were found, tentatively assume that
467 // field zero was selected (since the zero offset would obviously
468 // be folded away).
469 if (!FoundFieldNo) {
470 ElTy = STy->getTypeAtIndex(0u);
471 GepIndices.push_back(
472 Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
476 if (const ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
477 ElTy = ATy->getElementType();
478 else
479 break;
482 // If none of the operands were convertible to proper GEP indices, cast
483 // the base to i8* and do an ugly getelementptr with that. It's still
484 // better than ptrtoint+arithmetic+inttoptr at least.
485 if (!AnyNonZeroIndices) {
486 // Cast the base to i8*.
487 V = InsertNoopCastOfTo(V,
488 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
490 // Expand the operands for a plain byte offset.
491 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
493 // Fold a GEP with constant operands.
494 if (Constant *CLHS = dyn_cast<Constant>(V))
495 if (Constant *CRHS = dyn_cast<Constant>(Idx))
496 return ConstantExpr::getGetElementPtr(CLHS, &CRHS, 1);
498 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
499 unsigned ScanLimit = 6;
500 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
501 // Scanning starts from the last instruction before the insertion point.
502 BasicBlock::iterator IP = Builder.GetInsertPoint();
503 if (IP != BlockBegin) {
504 --IP;
505 for (; ScanLimit; --IP, --ScanLimit) {
506 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
507 // generated code.
508 if (isa<DbgInfoIntrinsic>(IP))
509 ScanLimit++;
510 if (IP->getOpcode() == Instruction::GetElementPtr &&
511 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
512 return IP;
513 if (IP == BlockBegin) break;
517 // Save the original insertion point so we can restore it when we're done.
518 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
519 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
521 // Move the insertion point out of as many loops as we can.
522 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
523 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
524 BasicBlock *Preheader = L->getLoopPreheader();
525 if (!Preheader) break;
527 // Ok, move up a level.
528 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
531 // Emit a GEP.
532 Value *GEP = Builder.CreateGEP(V, Idx, "uglygep");
533 rememberInstruction(GEP);
535 // Restore the original insert point.
536 if (SaveInsertBB)
537 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
539 return GEP;
542 // Save the original insertion point so we can restore it when we're done.
543 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
544 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
546 // Move the insertion point out of as many loops as we can.
547 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
548 if (!L->isLoopInvariant(V)) break;
550 bool AnyIndexNotLoopInvariant = false;
551 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(),
552 E = GepIndices.end(); I != E; ++I)
553 if (!L->isLoopInvariant(*I)) {
554 AnyIndexNotLoopInvariant = true;
555 break;
557 if (AnyIndexNotLoopInvariant)
558 break;
560 BasicBlock *Preheader = L->getLoopPreheader();
561 if (!Preheader) break;
563 // Ok, move up a level.
564 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
567 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
568 // because ScalarEvolution may have changed the address arithmetic to
569 // compute a value which is beyond the end of the allocated object.
570 Value *Casted = V;
571 if (V->getType() != PTy)
572 Casted = InsertNoopCastOfTo(Casted, PTy);
573 Value *GEP = Builder.CreateGEP(Casted,
574 GepIndices.begin(),
575 GepIndices.end(),
576 "scevgep");
577 Ops.push_back(SE.getUnknown(GEP));
578 rememberInstruction(GEP);
580 // Restore the original insert point.
581 if (SaveInsertBB)
582 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
584 return expand(SE.getAddExpr(Ops));
587 /// isNonConstantNegative - Return true if the specified scev is negated, but
588 /// not a constant.
589 static bool isNonConstantNegative(const SCEV *F) {
590 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(F);
591 if (!Mul) return false;
593 // If there is a constant factor, it will be first.
594 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
595 if (!SC) return false;
597 // Return true if the value is negative, this matches things like (-42 * V).
598 return SC->getValue()->getValue().isNegative();
601 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
602 /// SCEV expansion. If they are nested, this is the most nested. If they are
603 /// neighboring, pick the later.
604 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
605 DominatorTree &DT) {
606 if (!A) return B;
607 if (!B) return A;
608 if (A->contains(B)) return B;
609 if (B->contains(A)) return A;
610 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
611 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
612 return A; // Arbitrarily break the tie.
615 /// getRelevantLoop - Get the most relevant loop associated with the given
616 /// expression, according to PickMostRelevantLoop.
617 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
618 // Test whether we've already computed the most relevant loop for this SCEV.
619 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
620 RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0)));
621 if (!Pair.second)
622 return Pair.first->second;
624 if (isa<SCEVConstant>(S))
625 // A constant has no relevant loops.
626 return 0;
627 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
628 if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
629 return Pair.first->second = SE.LI->getLoopFor(I->getParent());
630 // A non-instruction has no relevant loops.
631 return 0;
633 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
634 const Loop *L = 0;
635 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
636 L = AR->getLoop();
637 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
638 I != E; ++I)
639 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
640 return RelevantLoops[N] = L;
642 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
643 const Loop *Result = getRelevantLoop(C->getOperand());
644 return RelevantLoops[C] = Result;
646 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
647 const Loop *Result =
648 PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
649 getRelevantLoop(D->getRHS()),
650 *SE.DT);
651 return RelevantLoops[D] = Result;
653 llvm_unreachable("Unexpected SCEV type!");
654 return 0;
657 namespace {
659 /// LoopCompare - Compare loops by PickMostRelevantLoop.
660 class LoopCompare {
661 DominatorTree &DT;
662 public:
663 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
665 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
666 std::pair<const Loop *, const SCEV *> RHS) const {
667 // Keep pointer operands sorted at the end.
668 if (LHS.second->getType()->isPointerTy() !=
669 RHS.second->getType()->isPointerTy())
670 return LHS.second->getType()->isPointerTy();
672 // Compare loops with PickMostRelevantLoop.
673 if (LHS.first != RHS.first)
674 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
676 // If one operand is a non-constant negative and the other is not,
677 // put the non-constant negative on the right so that a sub can
678 // be used instead of a negate and add.
679 if (isNonConstantNegative(LHS.second)) {
680 if (!isNonConstantNegative(RHS.second))
681 return false;
682 } else if (isNonConstantNegative(RHS.second))
683 return true;
685 // Otherwise they are equivalent according to this comparison.
686 return false;
692 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
693 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
695 // Collect all the add operands in a loop, along with their associated loops.
696 // Iterate in reverse so that constants are emitted last, all else equal, and
697 // so that pointer operands are inserted first, which the code below relies on
698 // to form more involved GEPs.
699 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
700 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
701 E(S->op_begin()); I != E; ++I)
702 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
704 // Sort by loop. Use a stable sort so that constants follow non-constants and
705 // pointer operands precede non-pointer operands.
706 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
708 // Emit instructions to add all the operands. Hoist as much as possible
709 // out of loops, and form meaningful getelementptrs where possible.
710 Value *Sum = 0;
711 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
712 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
713 const Loop *CurLoop = I->first;
714 const SCEV *Op = I->second;
715 if (!Sum) {
716 // This is the first operand. Just expand it.
717 Sum = expand(Op);
718 ++I;
719 } else if (const PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
720 // The running sum expression is a pointer. Try to form a getelementptr
721 // at this level with that as the base.
722 SmallVector<const SCEV *, 4> NewOps;
723 for (; I != E && I->first == CurLoop; ++I) {
724 // If the operand is SCEVUnknown and not instructions, peek through
725 // it, to enable more of it to be folded into the GEP.
726 const SCEV *X = I->second;
727 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
728 if (!isa<Instruction>(U->getValue()))
729 X = SE.getSCEV(U->getValue());
730 NewOps.push_back(X);
732 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
733 } else if (const PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
734 // The running sum is an integer, and there's a pointer at this level.
735 // Try to form a getelementptr. If the running sum is instructions,
736 // use a SCEVUnknown to avoid re-analyzing them.
737 SmallVector<const SCEV *, 4> NewOps;
738 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
739 SE.getSCEV(Sum));
740 for (++I; I != E && I->first == CurLoop; ++I)
741 NewOps.push_back(I->second);
742 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
743 } else if (isNonConstantNegative(Op)) {
744 // Instead of doing a negate and add, just do a subtract.
745 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
746 Sum = InsertNoopCastOfTo(Sum, Ty);
747 Sum = InsertBinop(Instruction::Sub, Sum, W);
748 ++I;
749 } else {
750 // A simple add.
751 Value *W = expandCodeFor(Op, Ty);
752 Sum = InsertNoopCastOfTo(Sum, Ty);
753 // Canonicalize a constant to the RHS.
754 if (isa<Constant>(Sum)) std::swap(Sum, W);
755 Sum = InsertBinop(Instruction::Add, Sum, W);
756 ++I;
760 return Sum;
763 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
764 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
766 // Collect all the mul operands in a loop, along with their associated loops.
767 // Iterate in reverse so that constants are emitted last, all else equal.
768 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
769 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
770 E(S->op_begin()); I != E; ++I)
771 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
773 // Sort by loop. Use a stable sort so that constants follow non-constants.
774 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
776 // Emit instructions to mul all the operands. Hoist as much as possible
777 // out of loops.
778 Value *Prod = 0;
779 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
780 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
781 const SCEV *Op = I->second;
782 if (!Prod) {
783 // This is the first operand. Just expand it.
784 Prod = expand(Op);
785 ++I;
786 } else if (Op->isAllOnesValue()) {
787 // Instead of doing a multiply by negative one, just do a negate.
788 Prod = InsertNoopCastOfTo(Prod, Ty);
789 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
790 ++I;
791 } else {
792 // A simple mul.
793 Value *W = expandCodeFor(Op, Ty);
794 Prod = InsertNoopCastOfTo(Prod, Ty);
795 // Canonicalize a constant to the RHS.
796 if (isa<Constant>(Prod)) std::swap(Prod, W);
797 Prod = InsertBinop(Instruction::Mul, Prod, W);
798 ++I;
802 return Prod;
805 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
806 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
808 Value *LHS = expandCodeFor(S->getLHS(), Ty);
809 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
810 const APInt &RHS = SC->getValue()->getValue();
811 if (RHS.isPowerOf2())
812 return InsertBinop(Instruction::LShr, LHS,
813 ConstantInt::get(Ty, RHS.logBase2()));
816 Value *RHS = expandCodeFor(S->getRHS(), Ty);
817 return InsertBinop(Instruction::UDiv, LHS, RHS);
820 /// Move parts of Base into Rest to leave Base with the minimal
821 /// expression that provides a pointer operand suitable for a
822 /// GEP expansion.
823 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
824 ScalarEvolution &SE) {
825 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
826 Base = A->getStart();
827 Rest = SE.getAddExpr(Rest,
828 SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
829 A->getStepRecurrence(SE),
830 A->getLoop(),
831 // FIXME: A->getNoWrapFlags(FlagNW)
832 SCEV::FlagAnyWrap));
834 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
835 Base = A->getOperand(A->getNumOperands()-1);
836 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
837 NewAddOps.back() = Rest;
838 Rest = SE.getAddExpr(NewAddOps);
839 ExposePointerBase(Base, Rest, SE);
843 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
844 /// the base addrec, which is the addrec without any non-loop-dominating
845 /// values, and return the PHI.
846 PHINode *
847 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
848 const Loop *L,
849 const Type *ExpandTy,
850 const Type *IntTy) {
851 // Reuse a previously-inserted PHI, if present.
852 for (BasicBlock::iterator I = L->getHeader()->begin();
853 PHINode *PN = dyn_cast<PHINode>(I); ++I)
854 if (SE.isSCEVable(PN->getType()) &&
855 (SE.getEffectiveSCEVType(PN->getType()) ==
856 SE.getEffectiveSCEVType(Normalized->getType())) &&
857 SE.getSCEV(PN) == Normalized)
858 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
859 Instruction *IncV =
860 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
862 // Determine if this is a well-behaved chain of instructions leading
863 // back to the PHI. It probably will be, if we're scanning an inner
864 // loop already visited by LSR for example, but it wouldn't have
865 // to be.
866 do {
867 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
868 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) {
869 IncV = 0;
870 break;
872 // If any of the operands don't dominate the insert position, bail.
873 // Addrec operands are always loop-invariant, so this can only happen
874 // if there are instructions which haven't been hoisted.
875 for (User::op_iterator OI = IncV->op_begin()+1,
876 OE = IncV->op_end(); OI != OE; ++OI)
877 if (Instruction *OInst = dyn_cast<Instruction>(OI))
878 if (!SE.DT->dominates(OInst, IVIncInsertPos)) {
879 IncV = 0;
880 break;
882 if (!IncV)
883 break;
884 // Advance to the next instruction.
885 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
886 if (!IncV)
887 break;
888 if (IncV->mayHaveSideEffects()) {
889 IncV = 0;
890 break;
892 } while (IncV != PN);
894 if (IncV) {
895 // Ok, the add recurrence looks usable.
896 // Remember this PHI, even in post-inc mode.
897 InsertedValues.insert(PN);
898 // Remember the increment.
899 IncV = cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
900 rememberInstruction(IncV);
901 if (L == IVIncInsertLoop)
902 do {
903 if (SE.DT->dominates(IncV, IVIncInsertPos))
904 break;
905 // Make sure the increment is where we want it. But don't move it
906 // down past a potential existing post-inc user.
907 IncV->moveBefore(IVIncInsertPos);
908 IVIncInsertPos = IncV;
909 IncV = cast<Instruction>(IncV->getOperand(0));
910 } while (IncV != PN);
911 return PN;
915 // Save the original insertion point so we can restore it when we're done.
916 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
917 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
919 // Expand code for the start value.
920 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
921 L->getHeader()->begin());
923 // Expand code for the step value. Insert instructions right before the
924 // terminator corresponding to the back-edge. Do this before creating the PHI
925 // so that PHI reuse code doesn't see an incomplete PHI. If the stride is
926 // negative, insert a sub instead of an add for the increment (unless it's a
927 // constant, because subtracts of constants are canonicalized to adds).
928 const SCEV *Step = Normalized->getStepRecurrence(SE);
929 bool isPointer = ExpandTy->isPointerTy();
930 bool isNegative = !isPointer && isNonConstantNegative(Step);
931 if (isNegative)
932 Step = SE.getNegativeSCEV(Step);
933 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
935 // Create the PHI.
936 BasicBlock *Header = L->getHeader();
937 Builder.SetInsertPoint(Header, Header->begin());
938 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
939 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
940 Twine(IVName) + ".iv");
941 rememberInstruction(PN);
943 // Create the step instructions and populate the PHI.
944 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
945 BasicBlock *Pred = *HPI;
947 // Add a start value.
948 if (!L->contains(Pred)) {
949 PN->addIncoming(StartV, Pred);
950 continue;
953 // Create a step value and add it to the PHI. If IVIncInsertLoop is
954 // non-null and equal to the addrec's loop, insert the instructions
955 // at IVIncInsertPos.
956 Instruction *InsertPos = L == IVIncInsertLoop ?
957 IVIncInsertPos : Pred->getTerminator();
958 Builder.SetInsertPoint(InsertPos);
959 Value *IncV;
960 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
961 if (isPointer) {
962 const PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
963 // If the step isn't constant, don't use an implicitly scaled GEP, because
964 // that would require a multiply inside the loop.
965 if (!isa<ConstantInt>(StepV))
966 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
967 GEPPtrTy->getAddressSpace());
968 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
969 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
970 if (IncV->getType() != PN->getType()) {
971 IncV = Builder.CreateBitCast(IncV, PN->getType(), "tmp");
972 rememberInstruction(IncV);
974 } else {
975 IncV = isNegative ?
976 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
977 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
978 rememberInstruction(IncV);
980 PN->addIncoming(IncV, Pred);
983 // Restore the original insert point.
984 if (SaveInsertBB)
985 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
987 // Remember this PHI, even in post-inc mode.
988 InsertedValues.insert(PN);
990 return PN;
993 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
994 const Type *STy = S->getType();
995 const Type *IntTy = SE.getEffectiveSCEVType(STy);
996 const Loop *L = S->getLoop();
998 // Determine a normalized form of this expression, which is the expression
999 // before any post-inc adjustment is made.
1000 const SCEVAddRecExpr *Normalized = S;
1001 if (PostIncLoops.count(L)) {
1002 PostIncLoopSet Loops;
1003 Loops.insert(L);
1004 Normalized =
1005 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0,
1006 Loops, SE, *SE.DT));
1009 // Strip off any non-loop-dominating component from the addrec start.
1010 const SCEV *Start = Normalized->getStart();
1011 const SCEV *PostLoopOffset = 0;
1012 if (!SE.properlyDominates(Start, L->getHeader())) {
1013 PostLoopOffset = Start;
1014 Start = SE.getConstant(Normalized->getType(), 0);
1015 Normalized = cast<SCEVAddRecExpr>(
1016 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1017 Normalized->getLoop(),
1018 // FIXME: Normalized->getNoWrapFlags(FlagNW)
1019 SCEV::FlagAnyWrap));
1022 // Strip off any non-loop-dominating component from the addrec step.
1023 const SCEV *Step = Normalized->getStepRecurrence(SE);
1024 const SCEV *PostLoopScale = 0;
1025 if (!SE.dominates(Step, L->getHeader())) {
1026 PostLoopScale = Step;
1027 Step = SE.getConstant(Normalized->getType(), 1);
1028 Normalized =
1029 cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step,
1030 Normalized->getLoop(),
1031 // FIXME: Normalized
1032 // ->getNoWrapFlags(FlagNW)
1033 SCEV::FlagAnyWrap));
1036 // Expand the core addrec. If we need post-loop scaling, force it to
1037 // expand to an integer type to avoid the need for additional casting.
1038 const Type *ExpandTy = PostLoopScale ? IntTy : STy;
1039 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy);
1041 // Accommodate post-inc mode, if necessary.
1042 Value *Result;
1043 if (!PostIncLoops.count(L))
1044 Result = PN;
1045 else {
1046 // In PostInc mode, use the post-incremented value.
1047 BasicBlock *LatchBlock = L->getLoopLatch();
1048 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1049 Result = PN->getIncomingValueForBlock(LatchBlock);
1052 // Re-apply any non-loop-dominating scale.
1053 if (PostLoopScale) {
1054 Result = InsertNoopCastOfTo(Result, IntTy);
1055 Result = Builder.CreateMul(Result,
1056 expandCodeFor(PostLoopScale, IntTy));
1057 rememberInstruction(Result);
1060 // Re-apply any non-loop-dominating offset.
1061 if (PostLoopOffset) {
1062 if (const PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1063 const SCEV *const OffsetArray[1] = { PostLoopOffset };
1064 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
1065 } else {
1066 Result = InsertNoopCastOfTo(Result, IntTy);
1067 Result = Builder.CreateAdd(Result,
1068 expandCodeFor(PostLoopOffset, IntTy));
1069 rememberInstruction(Result);
1073 return Result;
1076 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1077 if (!CanonicalMode) return expandAddRecExprLiterally(S);
1079 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
1080 const Loop *L = S->getLoop();
1082 // First check for an existing canonical IV in a suitable type.
1083 PHINode *CanonicalIV = 0;
1084 if (PHINode *PN = L->getCanonicalInductionVariable())
1085 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1086 CanonicalIV = PN;
1088 // Rewrite an AddRec in terms of the canonical induction variable, if
1089 // its type is more narrow.
1090 if (CanonicalIV &&
1091 SE.getTypeSizeInBits(CanonicalIV->getType()) >
1092 SE.getTypeSizeInBits(Ty)) {
1093 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1094 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1095 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1096 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1097 // FIXME: S->getNoWrapFlags(FlagNW)
1098 SCEV::FlagAnyWrap));
1099 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1100 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1101 BasicBlock::iterator NewInsertPt =
1102 llvm::next(BasicBlock::iterator(cast<Instruction>(V)));
1103 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt))
1104 ++NewInsertPt;
1105 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
1106 NewInsertPt);
1107 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1108 return V;
1111 // {X,+,F} --> X + {0,+,F}
1112 if (!S->getStart()->isZero()) {
1113 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
1114 NewOps[0] = SE.getConstant(Ty, 0);
1115 // FIXME: can use S->getNoWrapFlags()
1116 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, SCEV::FlagAnyWrap);
1118 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1119 // comments on expandAddToGEP for details.
1120 const SCEV *Base = S->getStart();
1121 const SCEV *RestArray[1] = { Rest };
1122 // Dig into the expression to find the pointer base for a GEP.
1123 ExposePointerBase(Base, RestArray[0], SE);
1124 // If we found a pointer, expand the AddRec with a GEP.
1125 if (const PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1126 // Make sure the Base isn't something exotic, such as a multiplied
1127 // or divided pointer value. In those cases, the result type isn't
1128 // actually a pointer type.
1129 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1130 Value *StartV = expand(Base);
1131 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1132 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
1136 // Just do a normal add. Pre-expand the operands to suppress folding.
1137 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
1138 SE.getUnknown(expand(Rest))));
1141 // If we don't yet have a canonical IV, create one.
1142 if (!CanonicalIV) {
1143 // Create and insert the PHI node for the induction variable in the
1144 // specified loop.
1145 BasicBlock *Header = L->getHeader();
1146 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1147 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1148 Header->begin());
1149 rememberInstruction(CanonicalIV);
1151 Constant *One = ConstantInt::get(Ty, 1);
1152 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1153 BasicBlock *HP = *HPI;
1154 if (L->contains(HP)) {
1155 // Insert a unit add instruction right before the terminator
1156 // corresponding to the back-edge.
1157 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1158 "indvar.next",
1159 HP->getTerminator());
1160 Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1161 rememberInstruction(Add);
1162 CanonicalIV->addIncoming(Add, HP);
1163 } else {
1164 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1169 // {0,+,1} --> Insert a canonical induction variable into the loop!
1170 if (S->isAffine() && S->getOperand(1)->isOne()) {
1171 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1172 "IVs with types different from the canonical IV should "
1173 "already have been handled!");
1174 return CanonicalIV;
1177 // {0,+,F} --> {0,+,1} * F
1179 // If this is a simple linear addrec, emit it now as a special case.
1180 if (S->isAffine()) // {0,+,F} --> i*F
1181 return
1182 expand(SE.getTruncateOrNoop(
1183 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1184 SE.getNoopOrAnyExtend(S->getOperand(1),
1185 CanonicalIV->getType())),
1186 Ty));
1188 // If this is a chain of recurrences, turn it into a closed form, using the
1189 // folders, then expandCodeFor the closed form. This allows the folders to
1190 // simplify the expression without having to build a bunch of special code
1191 // into this folder.
1192 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1194 // Promote S up to the canonical IV type, if the cast is foldable.
1195 const SCEV *NewS = S;
1196 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1197 if (isa<SCEVAddRecExpr>(Ext))
1198 NewS = Ext;
1200 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1201 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1203 // Truncate the result down to the original type, if needed.
1204 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1205 return expand(T);
1208 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1209 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
1210 Value *V = expandCodeFor(S->getOperand(),
1211 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1212 Value *I = Builder.CreateTrunc(V, Ty, "tmp");
1213 rememberInstruction(I);
1214 return I;
1217 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1218 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
1219 Value *V = expandCodeFor(S->getOperand(),
1220 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1221 Value *I = Builder.CreateZExt(V, Ty, "tmp");
1222 rememberInstruction(I);
1223 return I;
1226 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1227 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
1228 Value *V = expandCodeFor(S->getOperand(),
1229 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1230 Value *I = Builder.CreateSExt(V, Ty, "tmp");
1231 rememberInstruction(I);
1232 return I;
1235 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1236 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1237 const Type *Ty = LHS->getType();
1238 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1239 // In the case of mixed integer and pointer types, do the
1240 // rest of the comparisons as integer.
1241 if (S->getOperand(i)->getType() != Ty) {
1242 Ty = SE.getEffectiveSCEVType(Ty);
1243 LHS = InsertNoopCastOfTo(LHS, Ty);
1245 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1246 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS, "tmp");
1247 rememberInstruction(ICmp);
1248 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1249 rememberInstruction(Sel);
1250 LHS = Sel;
1252 // In the case of mixed integer and pointer types, cast the
1253 // final result back to the pointer type.
1254 if (LHS->getType() != S->getType())
1255 LHS = InsertNoopCastOfTo(LHS, S->getType());
1256 return LHS;
1259 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1260 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1261 const Type *Ty = LHS->getType();
1262 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1263 // In the case of mixed integer and pointer types, do the
1264 // rest of the comparisons as integer.
1265 if (S->getOperand(i)->getType() != Ty) {
1266 Ty = SE.getEffectiveSCEVType(Ty);
1267 LHS = InsertNoopCastOfTo(LHS, Ty);
1269 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1270 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS, "tmp");
1271 rememberInstruction(ICmp);
1272 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1273 rememberInstruction(Sel);
1274 LHS = Sel;
1276 // In the case of mixed integer and pointer types, cast the
1277 // final result back to the pointer type.
1278 if (LHS->getType() != S->getType())
1279 LHS = InsertNoopCastOfTo(LHS, S->getType());
1280 return LHS;
1283 Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty,
1284 Instruction *I) {
1285 BasicBlock::iterator IP = I;
1286 while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP))
1287 ++IP;
1288 Builder.SetInsertPoint(IP->getParent(), IP);
1289 return expandCodeFor(SH, Ty);
1292 Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty) {
1293 // Expand the code for this SCEV.
1294 Value *V = expand(SH);
1295 if (Ty) {
1296 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1297 "non-trivial casts should be done with the SCEVs directly!");
1298 V = InsertNoopCastOfTo(V, Ty);
1300 return V;
1303 Value *SCEVExpander::expand(const SCEV *S) {
1304 // Compute an insertion point for this SCEV object. Hoist the instructions
1305 // as far out in the loop nest as possible.
1306 Instruction *InsertPt = Builder.GetInsertPoint();
1307 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
1308 L = L->getParentLoop())
1309 if (SE.isLoopInvariant(S, L)) {
1310 if (!L) break;
1311 if (BasicBlock *Preheader = L->getLoopPreheader())
1312 InsertPt = Preheader->getTerminator();
1313 } else {
1314 // If the SCEV is computable at this level, insert it into the header
1315 // after the PHIs (and after any other instructions that we've inserted
1316 // there) so that it is guaranteed to dominate any user inside the loop.
1317 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1318 InsertPt = L->getHeader()->getFirstNonPHI();
1319 while (isInsertedInstruction(InsertPt) || isa<DbgInfoIntrinsic>(InsertPt))
1320 InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
1321 break;
1324 // Check to see if we already expanded this here.
1325 std::map<std::pair<const SCEV *, Instruction *>,
1326 AssertingVH<Value> >::iterator I =
1327 InsertedExpressions.find(std::make_pair(S, InsertPt));
1328 if (I != InsertedExpressions.end())
1329 return I->second;
1331 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1332 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1333 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1335 // Expand the expression into instructions.
1336 Value *V = visit(S);
1338 // Remember the expanded value for this SCEV at this location.
1339 if (PostIncLoops.empty())
1340 InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1342 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1343 return V;
1346 void SCEVExpander::rememberInstruction(Value *I) {
1347 if (!PostIncLoops.empty())
1348 InsertedPostIncValues.insert(I);
1349 else
1350 InsertedValues.insert(I);
1352 // If we just claimed an existing instruction and that instruction had
1353 // been the insert point, adjust the insert point forward so that
1354 // subsequently inserted code will be dominated.
1355 if (Builder.GetInsertPoint() == I) {
1356 BasicBlock::iterator It = cast<Instruction>(I);
1357 do { ++It; } while (isInsertedInstruction(It) ||
1358 isa<DbgInfoIntrinsic>(It));
1359 Builder.SetInsertPoint(Builder.GetInsertBlock(), It);
1363 void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
1364 // If we acquired more instructions since the old insert point was saved,
1365 // advance past them.
1366 while (isInsertedInstruction(I) || isa<DbgInfoIntrinsic>(I)) ++I;
1368 Builder.SetInsertPoint(BB, I);
1371 /// getOrInsertCanonicalInductionVariable - This method returns the
1372 /// canonical induction variable of the specified type for the specified
1373 /// loop (inserting one if there is none). A canonical induction variable
1374 /// starts at zero and steps by one on each iteration.
1375 PHINode *
1376 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
1377 const Type *Ty) {
1378 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
1380 // Build a SCEV for {0,+,1}<L>.
1381 // Conservatively use FlagAnyWrap for now.
1382 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1383 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
1385 // Emit code for it.
1386 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1387 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1388 PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin()));
1389 if (SaveInsertBB)
1390 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1392 return V;