Merge branch 'master' into msp430
[llvm/msp430.git] / lib / Transforms / Scalar / LoopStrengthReduce.cpp
blobed12d8683d2926197ae4fe5738ce480d9c846c4d
1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass performs a strength reduction on array references inside loops that
11 // have as one or more of their components the loop induction variable.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "loop-reduce"
16 #include "llvm/Transforms/Scalar.h"
17 #include "llvm/Constants.h"
18 #include "llvm/Instructions.h"
19 #include "llvm/IntrinsicInst.h"
20 #include "llvm/Type.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Analysis/Dominators.h"
23 #include "llvm/Analysis/IVUsers.h"
24 #include "llvm/Analysis/LoopInfo.h"
25 #include "llvm/Analysis/LoopPass.h"
26 #include "llvm/Analysis/ScalarEvolutionExpander.h"
27 #include "llvm/Transforms/Utils/AddrModeMatcher.h"
28 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
29 #include "llvm/Transforms/Utils/Local.h"
30 #include "llvm/ADT/SmallPtrSet.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/Support/CFG.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/Compiler.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/ValueHandle.h"
37 #include "llvm/Target/TargetLowering.h"
38 #include <algorithm>
39 using namespace llvm;
41 STATISTIC(NumReduced , "Number of IV uses strength reduced");
42 STATISTIC(NumInserted, "Number of PHIs inserted");
43 STATISTIC(NumVariable, "Number of PHIs with variable strides");
44 STATISTIC(NumEliminated, "Number of strides eliminated");
45 STATISTIC(NumShadow, "Number of Shadow IVs optimized");
46 STATISTIC(NumImmSunk, "Number of common expr immediates sunk into uses");
47 STATISTIC(NumLoopCond, "Number of loop terminating conds optimized");
49 static cl::opt<bool> EnableFullLSRMode("enable-full-lsr",
50 cl::init(false),
51 cl::Hidden);
53 namespace {
55 struct BasedUser;
57 /// IVInfo - This structure keeps track of one IV expression inserted during
58 /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as
59 /// well as the PHI node and increment value created for rewrite.
60 struct VISIBILITY_HIDDEN IVExpr {
61 SCEVHandle Stride;
62 SCEVHandle Base;
63 PHINode *PHI;
65 IVExpr(const SCEVHandle &stride, const SCEVHandle &base, PHINode *phi)
66 : Stride(stride), Base(base), PHI(phi) {}
69 /// IVsOfOneStride - This structure keeps track of all IV expression inserted
70 /// during StrengthReduceStridedIVUsers for a particular stride of the IV.
71 struct VISIBILITY_HIDDEN IVsOfOneStride {
72 std::vector<IVExpr> IVs;
74 void addIV(const SCEVHandle &Stride, const SCEVHandle &Base, PHINode *PHI) {
75 IVs.push_back(IVExpr(Stride, Base, PHI));
79 class VISIBILITY_HIDDEN LoopStrengthReduce : public LoopPass {
80 IVUsers *IU;
81 LoopInfo *LI;
82 DominatorTree *DT;
83 ScalarEvolution *SE;
84 bool Changed;
86 /// IVsByStride - Keep track of all IVs that have been inserted for a
87 /// particular stride.
88 std::map<SCEVHandle, IVsOfOneStride> IVsByStride;
90 /// StrideNoReuse - Keep track of all the strides whose ivs cannot be
91 /// reused (nor should they be rewritten to reuse other strides).
92 SmallSet<SCEVHandle, 4> StrideNoReuse;
94 /// DeadInsts - Keep track of instructions we may have made dead, so that
95 /// we can remove them after we are done working.
96 SmallVector<WeakVH, 16> DeadInsts;
98 /// TLI - Keep a pointer of a TargetLowering to consult for determining
99 /// transformation profitability.
100 const TargetLowering *TLI;
102 public:
103 static char ID; // Pass ID, replacement for typeid
104 explicit LoopStrengthReduce(const TargetLowering *tli = NULL) :
105 LoopPass(&ID), TLI(tli) {
108 bool runOnLoop(Loop *L, LPPassManager &LPM);
110 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
111 // We split critical edges, so we change the CFG. However, we do update
112 // many analyses if they are around.
113 AU.addPreservedID(LoopSimplifyID);
114 AU.addPreserved<LoopInfo>();
115 AU.addPreserved<DominanceFrontier>();
116 AU.addPreserved<DominatorTree>();
118 AU.addRequiredID(LoopSimplifyID);
119 AU.addRequired<LoopInfo>();
120 AU.addRequired<DominatorTree>();
121 AU.addRequired<ScalarEvolution>();
122 AU.addPreserved<ScalarEvolution>();
123 AU.addRequired<IVUsers>();
124 AU.addPreserved<IVUsers>();
127 private:
128 ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond,
129 IVStrideUse* &CondUse,
130 const SCEVHandle* &CondStride);
132 void OptimizeIndvars(Loop *L);
133 void OptimizeLoopCountIV(Loop *L);
134 void OptimizeLoopTermCond(Loop *L);
136 /// OptimizeShadowIV - If IV is used in a int-to-float cast
137 /// inside the loop then try to eliminate the cast opeation.
138 void OptimizeShadowIV(Loop *L);
140 /// OptimizeSMax - Rewrite the loop's terminating condition
141 /// if it uses an smax computation.
142 ICmpInst *OptimizeSMax(Loop *L, ICmpInst *Cond,
143 IVStrideUse* &CondUse);
145 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
146 const SCEVHandle *&CondStride);
147 bool RequiresTypeConversion(const Type *Ty, const Type *NewTy);
148 SCEVHandle CheckForIVReuse(bool, bool, bool, const SCEVHandle&,
149 IVExpr&, const Type*,
150 const std::vector<BasedUser>& UsersToProcess);
151 bool ValidScale(bool, int64_t,
152 const std::vector<BasedUser>& UsersToProcess);
153 bool ValidOffset(bool, int64_t, int64_t,
154 const std::vector<BasedUser>& UsersToProcess);
155 SCEVHandle CollectIVUsers(const SCEVHandle &Stride,
156 IVUsersOfOneStride &Uses,
157 Loop *L,
158 bool &AllUsesAreAddresses,
159 bool &AllUsesAreOutsideLoop,
160 std::vector<BasedUser> &UsersToProcess);
161 bool ShouldUseFullStrengthReductionMode(
162 const std::vector<BasedUser> &UsersToProcess,
163 const Loop *L,
164 bool AllUsesAreAddresses,
165 SCEVHandle Stride);
166 void PrepareToStrengthReduceFully(
167 std::vector<BasedUser> &UsersToProcess,
168 SCEVHandle Stride,
169 SCEVHandle CommonExprs,
170 const Loop *L,
171 SCEVExpander &PreheaderRewriter);
172 void PrepareToStrengthReduceFromSmallerStride(
173 std::vector<BasedUser> &UsersToProcess,
174 Value *CommonBaseV,
175 const IVExpr &ReuseIV,
176 Instruction *PreInsertPt);
177 void PrepareToStrengthReduceWithNewPhi(
178 std::vector<BasedUser> &UsersToProcess,
179 SCEVHandle Stride,
180 SCEVHandle CommonExprs,
181 Value *CommonBaseV,
182 Instruction *IVIncInsertPt,
183 const Loop *L,
184 SCEVExpander &PreheaderRewriter);
185 void StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
186 IVUsersOfOneStride &Uses,
187 Loop *L);
188 void DeleteTriviallyDeadInstructions();
192 char LoopStrengthReduce::ID = 0;
193 static RegisterPass<LoopStrengthReduce>
194 X("loop-reduce", "Loop Strength Reduction");
196 Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
197 return new LoopStrengthReduce(TLI);
200 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
201 /// specified set are trivially dead, delete them and see if this makes any of
202 /// their operands subsequently dead.
203 void LoopStrengthReduce::DeleteTriviallyDeadInstructions() {
204 if (DeadInsts.empty()) return;
206 while (!DeadInsts.empty()) {
207 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.back());
208 DeadInsts.pop_back();
210 if (I == 0 || !isInstructionTriviallyDead(I))
211 continue;
213 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) {
214 if (Instruction *U = dyn_cast<Instruction>(*OI)) {
215 *OI = 0;
216 if (U->use_empty())
217 DeadInsts.push_back(U);
221 I->eraseFromParent();
222 Changed = true;
226 /// containsAddRecFromDifferentLoop - Determine whether expression S involves a
227 /// subexpression that is an AddRec from a loop other than L. An outer loop
228 /// of L is OK, but not an inner loop nor a disjoint loop.
229 static bool containsAddRecFromDifferentLoop(SCEVHandle S, Loop *L) {
230 // This is very common, put it first.
231 if (isa<SCEVConstant>(S))
232 return false;
233 if (const SCEVCommutativeExpr *AE = dyn_cast<SCEVCommutativeExpr>(S)) {
234 for (unsigned int i=0; i< AE->getNumOperands(); i++)
235 if (containsAddRecFromDifferentLoop(AE->getOperand(i), L))
236 return true;
237 return false;
239 if (const SCEVAddRecExpr *AE = dyn_cast<SCEVAddRecExpr>(S)) {
240 if (const Loop *newLoop = AE->getLoop()) {
241 if (newLoop == L)
242 return false;
243 // if newLoop is an outer loop of L, this is OK.
244 if (!LoopInfoBase<BasicBlock>::isNotAlreadyContainedIn(L, newLoop))
245 return false;
247 return true;
249 if (const SCEVUDivExpr *DE = dyn_cast<SCEVUDivExpr>(S))
250 return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
251 containsAddRecFromDifferentLoop(DE->getRHS(), L);
252 #if 0
253 // SCEVSDivExpr has been backed out temporarily, but will be back; we'll
254 // need this when it is.
255 if (const SCEVSDivExpr *DE = dyn_cast<SCEVSDivExpr>(S))
256 return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
257 containsAddRecFromDifferentLoop(DE->getRHS(), L);
258 #endif
259 if (const SCEVCastExpr *CE = dyn_cast<SCEVCastExpr>(S))
260 return containsAddRecFromDifferentLoop(CE->getOperand(), L);
261 return false;
264 /// isAddressUse - Returns true if the specified instruction is using the
265 /// specified value as an address.
266 static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
267 bool isAddress = isa<LoadInst>(Inst);
268 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
269 if (SI->getOperand(1) == OperandVal)
270 isAddress = true;
271 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
272 // Addressing modes can also be folded into prefetches and a variety
273 // of intrinsics.
274 switch (II->getIntrinsicID()) {
275 default: break;
276 case Intrinsic::prefetch:
277 case Intrinsic::x86_sse2_loadu_dq:
278 case Intrinsic::x86_sse2_loadu_pd:
279 case Intrinsic::x86_sse_loadu_ps:
280 case Intrinsic::x86_sse_storeu_ps:
281 case Intrinsic::x86_sse2_storeu_pd:
282 case Intrinsic::x86_sse2_storeu_dq:
283 case Intrinsic::x86_sse2_storel_dq:
284 if (II->getOperand(1) == OperandVal)
285 isAddress = true;
286 break;
289 return isAddress;
292 /// getAccessType - Return the type of the memory being accessed.
293 static const Type *getAccessType(const Instruction *Inst) {
294 const Type *UseTy = Inst->getType();
295 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst))
296 UseTy = SI->getOperand(0)->getType();
297 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
298 // Addressing modes can also be folded into prefetches and a variety
299 // of intrinsics.
300 switch (II->getIntrinsicID()) {
301 default: break;
302 case Intrinsic::x86_sse_storeu_ps:
303 case Intrinsic::x86_sse2_storeu_pd:
304 case Intrinsic::x86_sse2_storeu_dq:
305 case Intrinsic::x86_sse2_storel_dq:
306 UseTy = II->getOperand(1)->getType();
307 break;
310 return UseTy;
313 namespace {
314 /// BasedUser - For a particular base value, keep information about how we've
315 /// partitioned the expression so far.
316 struct BasedUser {
317 /// SE - The current ScalarEvolution object.
318 ScalarEvolution *SE;
320 /// Base - The Base value for the PHI node that needs to be inserted for
321 /// this use. As the use is processed, information gets moved from this
322 /// field to the Imm field (below). BasedUser values are sorted by this
323 /// field.
324 SCEVHandle Base;
326 /// Inst - The instruction using the induction variable.
327 Instruction *Inst;
329 /// OperandValToReplace - The operand value of Inst to replace with the
330 /// EmittedBase.
331 Value *OperandValToReplace;
333 /// isSigned - The stride (and thus also the Base) of this use may be in
334 /// a narrower type than the use itself (OperandValToReplace->getType()).
335 /// When this is the case, the isSigned field indicates whether the
336 /// IV expression should be signed-extended instead of zero-extended to
337 /// fit the type of the use.
338 bool isSigned;
340 /// Imm - The immediate value that should be added to the base immediately
341 /// before Inst, because it will be folded into the imm field of the
342 /// instruction. This is also sometimes used for loop-variant values that
343 /// must be added inside the loop.
344 SCEVHandle Imm;
346 /// Phi - The induction variable that performs the striding that
347 /// should be used for this user.
348 PHINode *Phi;
350 // isUseOfPostIncrementedValue - True if this should use the
351 // post-incremented version of this IV, not the preincremented version.
352 // This can only be set in special cases, such as the terminating setcc
353 // instruction for a loop and uses outside the loop that are dominated by
354 // the loop.
355 bool isUseOfPostIncrementedValue;
357 BasedUser(IVStrideUse &IVSU, ScalarEvolution *se)
358 : SE(se), Base(IVSU.getOffset()), Inst(IVSU.getUser()),
359 OperandValToReplace(IVSU.getOperandValToReplace()),
360 isSigned(IVSU.isSigned()),
361 Imm(SE->getIntegerSCEV(0, Base->getType())),
362 isUseOfPostIncrementedValue(IVSU.isUseOfPostIncrementedValue()) {}
364 // Once we rewrite the code to insert the new IVs we want, update the
365 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
366 // to it.
367 void RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
368 Instruction *InsertPt,
369 SCEVExpander &Rewriter, Loop *L, Pass *P,
370 SmallVectorImpl<WeakVH> &DeadInsts);
372 Value *InsertCodeForBaseAtPosition(const SCEVHandle &NewBase,
373 const Type *Ty,
374 SCEVExpander &Rewriter,
375 Instruction *IP, Loop *L);
376 void dump() const;
380 void BasedUser::dump() const {
381 cerr << " Base=" << *Base;
382 cerr << " Imm=" << *Imm;
383 cerr << " Inst: " << *Inst;
386 Value *BasedUser::InsertCodeForBaseAtPosition(const SCEVHandle &NewBase,
387 const Type *Ty,
388 SCEVExpander &Rewriter,
389 Instruction *IP, Loop *L) {
390 // Figure out where we *really* want to insert this code. In particular, if
391 // the user is inside of a loop that is nested inside of L, we really don't
392 // want to insert this expression before the user, we'd rather pull it out as
393 // many loops as possible.
394 LoopInfo &LI = Rewriter.getLoopInfo();
395 Instruction *BaseInsertPt = IP;
397 // Figure out the most-nested loop that IP is in.
398 Loop *InsertLoop = LI.getLoopFor(IP->getParent());
400 // If InsertLoop is not L, and InsertLoop is nested inside of L, figure out
401 // the preheader of the outer-most loop where NewBase is not loop invariant.
402 if (L->contains(IP->getParent()))
403 while (InsertLoop && NewBase->isLoopInvariant(InsertLoop)) {
404 BaseInsertPt = InsertLoop->getLoopPreheader()->getTerminator();
405 InsertLoop = InsertLoop->getParentLoop();
408 Value *Base = Rewriter.expandCodeFor(NewBase, NewBase->getType(),
409 BaseInsertPt);
411 SCEVHandle NewValSCEV = SE->getUnknown(Base);
413 // If there is no immediate value, skip the next part.
414 if (!Imm->isZero()) {
415 // If we are inserting the base and imm values in the same block, make sure
416 // to adjust the IP position if insertion reused a result.
417 if (IP == BaseInsertPt)
418 IP = Rewriter.getInsertionPoint();
420 // Always emit the immediate (if non-zero) into the same block as the user.
421 NewValSCEV = SE->getAddExpr(NewValSCEV, Imm);
424 if (isSigned)
425 NewValSCEV = SE->getTruncateOrSignExtend(NewValSCEV, Ty);
426 else
427 NewValSCEV = SE->getTruncateOrZeroExtend(NewValSCEV, Ty);
429 return Rewriter.expandCodeFor(NewValSCEV, Ty, IP);
433 // Once we rewrite the code to insert the new IVs we want, update the
434 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
435 // to it. NewBasePt is the last instruction which contributes to the
436 // value of NewBase in the case that it's a diffferent instruction from
437 // the PHI that NewBase is computed from, or null otherwise.
439 void BasedUser::RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
440 Instruction *NewBasePt,
441 SCEVExpander &Rewriter, Loop *L, Pass *P,
442 SmallVectorImpl<WeakVH> &DeadInsts) {
443 if (!isa<PHINode>(Inst)) {
444 // By default, insert code at the user instruction.
445 BasicBlock::iterator InsertPt = Inst;
447 // However, if the Operand is itself an instruction, the (potentially
448 // complex) inserted code may be shared by many users. Because of this, we
449 // want to emit code for the computation of the operand right before its old
450 // computation. This is usually safe, because we obviously used to use the
451 // computation when it was computed in its current block. However, in some
452 // cases (e.g. use of a post-incremented induction variable) the NewBase
453 // value will be pinned to live somewhere after the original computation.
454 // In this case, we have to back off.
456 // If this is a use outside the loop (which means after, since it is based
457 // on a loop indvar) we use the post-incremented value, so that we don't
458 // artificially make the preinc value live out the bottom of the loop.
459 if (!isUseOfPostIncrementedValue && L->contains(Inst->getParent())) {
460 if (NewBasePt && isa<PHINode>(OperandValToReplace)) {
461 InsertPt = NewBasePt;
462 ++InsertPt;
463 } else if (Instruction *OpInst
464 = dyn_cast<Instruction>(OperandValToReplace)) {
465 InsertPt = OpInst;
466 while (isa<PHINode>(InsertPt)) ++InsertPt;
469 Value *NewVal = InsertCodeForBaseAtPosition(NewBase,
470 OperandValToReplace->getType(),
471 Rewriter, InsertPt, L);
472 // Replace the use of the operand Value with the new Phi we just created.
473 Inst->replaceUsesOfWith(OperandValToReplace, NewVal);
475 DOUT << " Replacing with ";
476 DEBUG(WriteAsOperand(*DOUT, NewVal, /*PrintType=*/false));
477 DOUT << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n";
478 return;
481 // PHI nodes are more complex. We have to insert one copy of the NewBase+Imm
482 // expression into each operand block that uses it. Note that PHI nodes can
483 // have multiple entries for the same predecessor. We use a map to make sure
484 // that a PHI node only has a single Value* for each predecessor (which also
485 // prevents us from inserting duplicate code in some blocks).
486 DenseMap<BasicBlock*, Value*> InsertedCode;
487 PHINode *PN = cast<PHINode>(Inst);
488 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
489 if (PN->getIncomingValue(i) == OperandValToReplace) {
490 // If the original expression is outside the loop, put the replacement
491 // code in the same place as the original expression,
492 // which need not be an immediate predecessor of this PHI. This way we
493 // need only one copy of it even if it is referenced multiple times in
494 // the PHI. We don't do this when the original expression is inside the
495 // loop because multiple copies sometimes do useful sinking of code in
496 // that case(?).
497 Instruction *OldLoc = dyn_cast<Instruction>(OperandValToReplace);
498 if (L->contains(OldLoc->getParent())) {
499 // If this is a critical edge, split the edge so that we do not insert
500 // the code on all predecessor/successor paths. We do this unless this
501 // is the canonical backedge for this loop, as this can make some
502 // inserted code be in an illegal position.
503 BasicBlock *PHIPred = PN->getIncomingBlock(i);
504 if (e != 1 && PHIPred->getTerminator()->getNumSuccessors() > 1 &&
505 (PN->getParent() != L->getHeader() || !L->contains(PHIPred))) {
507 // First step, split the critical edge.
508 SplitCriticalEdge(PHIPred, PN->getParent(), P, false);
510 // Next step: move the basic block. In particular, if the PHI node
511 // is outside of the loop, and PredTI is in the loop, we want to
512 // move the block to be immediately before the PHI block, not
513 // immediately after PredTI.
514 if (L->contains(PHIPred) && !L->contains(PN->getParent())) {
515 BasicBlock *NewBB = PN->getIncomingBlock(i);
516 NewBB->moveBefore(PN->getParent());
519 // Splitting the edge can reduce the number of PHI entries we have.
520 e = PN->getNumIncomingValues();
523 Value *&Code = InsertedCode[PN->getIncomingBlock(i)];
524 if (!Code) {
525 // Insert the code into the end of the predecessor block.
526 Instruction *InsertPt = (L->contains(OldLoc->getParent())) ?
527 PN->getIncomingBlock(i)->getTerminator() :
528 OldLoc->getParent()->getTerminator();
529 Code = InsertCodeForBaseAtPosition(NewBase, PN->getType(),
530 Rewriter, InsertPt, L);
532 DOUT << " Changing PHI use to ";
533 DEBUG(WriteAsOperand(*DOUT, Code, /*PrintType=*/false));
534 DOUT << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n";
537 // Replace the use of the operand Value with the new Phi we just created.
538 PN->setIncomingValue(i, Code);
539 Rewriter.clear();
543 // PHI node might have become a constant value after SplitCriticalEdge.
544 DeadInsts.push_back(Inst);
548 /// fitsInAddressMode - Return true if V can be subsumed within an addressing
549 /// mode, and does not need to be put in a register first.
550 static bool fitsInAddressMode(const SCEVHandle &V, const Type *UseTy,
551 const TargetLowering *TLI, bool HasBaseReg) {
552 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) {
553 int64_t VC = SC->getValue()->getSExtValue();
554 if (TLI) {
555 TargetLowering::AddrMode AM;
556 AM.BaseOffs = VC;
557 AM.HasBaseReg = HasBaseReg;
558 return TLI->isLegalAddressingMode(AM, UseTy);
559 } else {
560 // Defaults to PPC. PPC allows a sign-extended 16-bit immediate field.
561 return (VC > -(1 << 16) && VC < (1 << 16)-1);
565 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
566 if (GlobalValue *GV = dyn_cast<GlobalValue>(SU->getValue())) {
567 if (TLI) {
568 TargetLowering::AddrMode AM;
569 AM.BaseGV = GV;
570 AM.HasBaseReg = HasBaseReg;
571 return TLI->isLegalAddressingMode(AM, UseTy);
572 } else {
573 // Default: assume global addresses are not legal.
577 return false;
580 /// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are
581 /// loop varying to the Imm operand.
582 static void MoveLoopVariantsToImmediateField(SCEVHandle &Val, SCEVHandle &Imm,
583 Loop *L, ScalarEvolution *SE) {
584 if (Val->isLoopInvariant(L)) return; // Nothing to do.
586 if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
587 std::vector<SCEVHandle> NewOps;
588 NewOps.reserve(SAE->getNumOperands());
590 for (unsigned i = 0; i != SAE->getNumOperands(); ++i)
591 if (!SAE->getOperand(i)->isLoopInvariant(L)) {
592 // If this is a loop-variant expression, it must stay in the immediate
593 // field of the expression.
594 Imm = SE->getAddExpr(Imm, SAE->getOperand(i));
595 } else {
596 NewOps.push_back(SAE->getOperand(i));
599 if (NewOps.empty())
600 Val = SE->getIntegerSCEV(0, Val->getType());
601 else
602 Val = SE->getAddExpr(NewOps);
603 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
604 // Try to pull immediates out of the start value of nested addrec's.
605 SCEVHandle Start = SARE->getStart();
606 MoveLoopVariantsToImmediateField(Start, Imm, L, SE);
608 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
609 Ops[0] = Start;
610 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
611 } else {
612 // Otherwise, all of Val is variant, move the whole thing over.
613 Imm = SE->getAddExpr(Imm, Val);
614 Val = SE->getIntegerSCEV(0, Val->getType());
619 /// MoveImmediateValues - Look at Val, and pull out any additions of constants
620 /// that can fit into the immediate field of instructions in the target.
621 /// Accumulate these immediate values into the Imm value.
622 static void MoveImmediateValues(const TargetLowering *TLI,
623 const Type *UseTy,
624 SCEVHandle &Val, SCEVHandle &Imm,
625 bool isAddress, Loop *L,
626 ScalarEvolution *SE) {
627 if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
628 std::vector<SCEVHandle> NewOps;
629 NewOps.reserve(SAE->getNumOperands());
631 for (unsigned i = 0; i != SAE->getNumOperands(); ++i) {
632 SCEVHandle NewOp = SAE->getOperand(i);
633 MoveImmediateValues(TLI, UseTy, NewOp, Imm, isAddress, L, SE);
635 if (!NewOp->isLoopInvariant(L)) {
636 // If this is a loop-variant expression, it must stay in the immediate
637 // field of the expression.
638 Imm = SE->getAddExpr(Imm, NewOp);
639 } else {
640 NewOps.push_back(NewOp);
644 if (NewOps.empty())
645 Val = SE->getIntegerSCEV(0, Val->getType());
646 else
647 Val = SE->getAddExpr(NewOps);
648 return;
649 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
650 // Try to pull immediates out of the start value of nested addrec's.
651 SCEVHandle Start = SARE->getStart();
652 MoveImmediateValues(TLI, UseTy, Start, Imm, isAddress, L, SE);
654 if (Start != SARE->getStart()) {
655 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
656 Ops[0] = Start;
657 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
659 return;
660 } else if (const SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) {
661 // Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field.
662 if (isAddress && fitsInAddressMode(SME->getOperand(0), UseTy, TLI, false) &&
663 SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) {
665 SCEVHandle SubImm = SE->getIntegerSCEV(0, Val->getType());
666 SCEVHandle NewOp = SME->getOperand(1);
667 MoveImmediateValues(TLI, UseTy, NewOp, SubImm, isAddress, L, SE);
669 // If we extracted something out of the subexpressions, see if we can
670 // simplify this!
671 if (NewOp != SME->getOperand(1)) {
672 // Scale SubImm up by "8". If the result is a target constant, we are
673 // good.
674 SubImm = SE->getMulExpr(SubImm, SME->getOperand(0));
675 if (fitsInAddressMode(SubImm, UseTy, TLI, false)) {
676 // Accumulate the immediate.
677 Imm = SE->getAddExpr(Imm, SubImm);
679 // Update what is left of 'Val'.
680 Val = SE->getMulExpr(SME->getOperand(0), NewOp);
681 return;
687 // Loop-variant expressions must stay in the immediate field of the
688 // expression.
689 if ((isAddress && fitsInAddressMode(Val, UseTy, TLI, false)) ||
690 !Val->isLoopInvariant(L)) {
691 Imm = SE->getAddExpr(Imm, Val);
692 Val = SE->getIntegerSCEV(0, Val->getType());
693 return;
696 // Otherwise, no immediates to move.
699 static void MoveImmediateValues(const TargetLowering *TLI,
700 Instruction *User,
701 SCEVHandle &Val, SCEVHandle &Imm,
702 bool isAddress, Loop *L,
703 ScalarEvolution *SE) {
704 const Type *UseTy = getAccessType(User);
705 MoveImmediateValues(TLI, UseTy, Val, Imm, isAddress, L, SE);
708 /// SeparateSubExprs - Decompose Expr into all of the subexpressions that are
709 /// added together. This is used to reassociate common addition subexprs
710 /// together for maximal sharing when rewriting bases.
711 static void SeparateSubExprs(std::vector<SCEVHandle> &SubExprs,
712 SCEVHandle Expr,
713 ScalarEvolution *SE) {
714 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) {
715 for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j)
716 SeparateSubExprs(SubExprs, AE->getOperand(j), SE);
717 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) {
718 SCEVHandle Zero = SE->getIntegerSCEV(0, Expr->getType());
719 if (SARE->getOperand(0) == Zero) {
720 SubExprs.push_back(Expr);
721 } else {
722 // Compute the addrec with zero as its base.
723 std::vector<SCEVHandle> Ops(SARE->op_begin(), SARE->op_end());
724 Ops[0] = Zero; // Start with zero base.
725 SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop()));
728 SeparateSubExprs(SubExprs, SARE->getOperand(0), SE);
730 } else if (!Expr->isZero()) {
731 // Do not add zero.
732 SubExprs.push_back(Expr);
736 // This is logically local to the following function, but C++ says we have
737 // to make it file scope.
738 struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
740 /// RemoveCommonExpressionsFromUseBases - Look through all of the Bases of all
741 /// the Uses, removing any common subexpressions, except that if all such
742 /// subexpressions can be folded into an addressing mode for all uses inside
743 /// the loop (this case is referred to as "free" in comments herein) we do
744 /// not remove anything. This looks for things like (a+b+c) and
745 /// (a+c+d) and computes the common (a+c) subexpression. The common expression
746 /// is *removed* from the Bases and returned.
747 static SCEVHandle
748 RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses,
749 ScalarEvolution *SE, Loop *L,
750 const TargetLowering *TLI) {
751 unsigned NumUses = Uses.size();
753 // Only one use? This is a very common case, so we handle it specially and
754 // cheaply.
755 SCEVHandle Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType());
756 SCEVHandle Result = Zero;
757 SCEVHandle FreeResult = Zero;
758 if (NumUses == 1) {
759 // If the use is inside the loop, use its base, regardless of what it is:
760 // it is clearly shared across all the IV's. If the use is outside the loop
761 // (which means after it) we don't want to factor anything *into* the loop,
762 // so just use 0 as the base.
763 if (L->contains(Uses[0].Inst->getParent()))
764 std::swap(Result, Uses[0].Base);
765 return Result;
768 // To find common subexpressions, count how many of Uses use each expression.
769 // If any subexpressions are used Uses.size() times, they are common.
770 // Also track whether all uses of each expression can be moved into an
771 // an addressing mode "for free"; such expressions are left within the loop.
772 // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
773 std::map<SCEVHandle, SubExprUseData> SubExpressionUseData;
775 // UniqueSubExprs - Keep track of all of the subexpressions we see in the
776 // order we see them.
777 std::vector<SCEVHandle> UniqueSubExprs;
779 std::vector<SCEVHandle> SubExprs;
780 unsigned NumUsesInsideLoop = 0;
781 for (unsigned i = 0; i != NumUses; ++i) {
782 // If the user is outside the loop, just ignore it for base computation.
783 // Since the user is outside the loop, it must be *after* the loop (if it
784 // were before, it could not be based on the loop IV). We don't want users
785 // after the loop to affect base computation of values *inside* the loop,
786 // because we can always add their offsets to the result IV after the loop
787 // is done, ensuring we get good code inside the loop.
788 if (!L->contains(Uses[i].Inst->getParent()))
789 continue;
790 NumUsesInsideLoop++;
792 // If the base is zero (which is common), return zero now, there are no
793 // CSEs we can find.
794 if (Uses[i].Base == Zero) return Zero;
796 // If this use is as an address we may be able to put CSEs in the addressing
797 // mode rather than hoisting them.
798 bool isAddrUse = isAddressUse(Uses[i].Inst, Uses[i].OperandValToReplace);
799 // We may need the UseTy below, but only when isAddrUse, so compute it
800 // only in that case.
801 const Type *UseTy = 0;
802 if (isAddrUse)
803 UseTy = getAccessType(Uses[i].Inst);
805 // Split the expression into subexprs.
806 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
807 // Add one to SubExpressionUseData.Count for each subexpr present, and
808 // if the subexpr is not a valid immediate within an addressing mode use,
809 // set SubExpressionUseData.notAllUsesAreFree. We definitely want to
810 // hoist these out of the loop (if they are common to all uses).
811 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
812 if (++SubExpressionUseData[SubExprs[j]].Count == 1)
813 UniqueSubExprs.push_back(SubExprs[j]);
814 if (!isAddrUse || !fitsInAddressMode(SubExprs[j], UseTy, TLI, false))
815 SubExpressionUseData[SubExprs[j]].notAllUsesAreFree = true;
817 SubExprs.clear();
820 // Now that we know how many times each is used, build Result. Iterate over
821 // UniqueSubexprs so that we have a stable ordering.
822 for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) {
823 std::map<SCEVHandle, SubExprUseData>::iterator I =
824 SubExpressionUseData.find(UniqueSubExprs[i]);
825 assert(I != SubExpressionUseData.end() && "Entry not found?");
826 if (I->second.Count == NumUsesInsideLoop) { // Found CSE!
827 if (I->second.notAllUsesAreFree)
828 Result = SE->getAddExpr(Result, I->first);
829 else
830 FreeResult = SE->getAddExpr(FreeResult, I->first);
831 } else
832 // Remove non-cse's from SubExpressionUseData.
833 SubExpressionUseData.erase(I);
836 if (FreeResult != Zero) {
837 // We have some subexpressions that can be subsumed into addressing
838 // modes in every use inside the loop. However, it's possible that
839 // there are so many of them that the combined FreeResult cannot
840 // be subsumed, or that the target cannot handle both a FreeResult
841 // and a Result in the same instruction (for example because it would
842 // require too many registers). Check this.
843 for (unsigned i=0; i<NumUses; ++i) {
844 if (!L->contains(Uses[i].Inst->getParent()))
845 continue;
846 // We know this is an addressing mode use; if there are any uses that
847 // are not, FreeResult would be Zero.
848 const Type *UseTy = getAccessType(Uses[i].Inst);
849 if (!fitsInAddressMode(FreeResult, UseTy, TLI, Result!=Zero)) {
850 // FIXME: could split up FreeResult into pieces here, some hoisted
851 // and some not. There is no obvious advantage to this.
852 Result = SE->getAddExpr(Result, FreeResult);
853 FreeResult = Zero;
854 break;
859 // If we found no CSE's, return now.
860 if (Result == Zero) return Result;
862 // If we still have a FreeResult, remove its subexpressions from
863 // SubExpressionUseData. This means they will remain in the use Bases.
864 if (FreeResult != Zero) {
865 SeparateSubExprs(SubExprs, FreeResult, SE);
866 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
867 std::map<SCEVHandle, SubExprUseData>::iterator I =
868 SubExpressionUseData.find(SubExprs[j]);
869 SubExpressionUseData.erase(I);
871 SubExprs.clear();
874 // Otherwise, remove all of the CSE's we found from each of the base values.
875 for (unsigned i = 0; i != NumUses; ++i) {
876 // Uses outside the loop don't necessarily include the common base, but
877 // the final IV value coming into those uses does. Instead of trying to
878 // remove the pieces of the common base, which might not be there,
879 // subtract off the base to compensate for this.
880 if (!L->contains(Uses[i].Inst->getParent())) {
881 Uses[i].Base = SE->getMinusSCEV(Uses[i].Base, Result);
882 continue;
885 // Split the expression into subexprs.
886 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
888 // Remove any common subexpressions.
889 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j)
890 if (SubExpressionUseData.count(SubExprs[j])) {
891 SubExprs.erase(SubExprs.begin()+j);
892 --j; --e;
895 // Finally, add the non-shared expressions together.
896 if (SubExprs.empty())
897 Uses[i].Base = Zero;
898 else
899 Uses[i].Base = SE->getAddExpr(SubExprs);
900 SubExprs.clear();
903 return Result;
906 /// ValidScale - Check whether the given Scale is valid for all loads and
907 /// stores in UsersToProcess.
909 bool LoopStrengthReduce::ValidScale(bool HasBaseReg, int64_t Scale,
910 const std::vector<BasedUser>& UsersToProcess) {
911 if (!TLI)
912 return true;
914 for (unsigned i = 0, e = UsersToProcess.size(); i!=e; ++i) {
915 // If this is a load or other access, pass the type of the access in.
916 const Type *AccessTy = Type::VoidTy;
917 if (isAddressUse(UsersToProcess[i].Inst,
918 UsersToProcess[i].OperandValToReplace))
919 AccessTy = getAccessType(UsersToProcess[i].Inst);
920 else if (isa<PHINode>(UsersToProcess[i].Inst))
921 continue;
923 TargetLowering::AddrMode AM;
924 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
925 AM.BaseOffs = SC->getValue()->getSExtValue();
926 AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
927 AM.Scale = Scale;
929 // If load[imm+r*scale] is illegal, bail out.
930 if (!TLI->isLegalAddressingMode(AM, AccessTy))
931 return false;
933 return true;
936 /// ValidOffset - Check whether the given Offset is valid for all loads and
937 /// stores in UsersToProcess.
939 bool LoopStrengthReduce::ValidOffset(bool HasBaseReg,
940 int64_t Offset,
941 int64_t Scale,
942 const std::vector<BasedUser>& UsersToProcess) {
943 if (!TLI)
944 return true;
946 for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) {
947 // If this is a load or other access, pass the type of the access in.
948 const Type *AccessTy = Type::VoidTy;
949 if (isAddressUse(UsersToProcess[i].Inst,
950 UsersToProcess[i].OperandValToReplace))
951 AccessTy = getAccessType(UsersToProcess[i].Inst);
952 else if (isa<PHINode>(UsersToProcess[i].Inst))
953 continue;
955 TargetLowering::AddrMode AM;
956 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
957 AM.BaseOffs = SC->getValue()->getSExtValue();
958 AM.BaseOffs = (uint64_t)AM.BaseOffs + (uint64_t)Offset;
959 AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
960 AM.Scale = Scale;
962 // If load[imm+r*scale] is illegal, bail out.
963 if (!TLI->isLegalAddressingMode(AM, AccessTy))
964 return false;
966 return true;
969 /// RequiresTypeConversion - Returns true if converting Ty1 to Ty2 is not
970 /// a nop.
971 bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1,
972 const Type *Ty2) {
973 if (Ty1 == Ty2)
974 return false;
975 Ty1 = SE->getEffectiveSCEVType(Ty1);
976 Ty2 = SE->getEffectiveSCEVType(Ty2);
977 if (Ty1 == Ty2)
978 return false;
979 if (Ty1->canLosslesslyBitCastTo(Ty2))
980 return false;
981 if (TLI && TLI->isTruncateFree(Ty1, Ty2))
982 return false;
983 return true;
986 /// CheckForIVReuse - Returns the multiple if the stride is the multiple
987 /// of a previous stride and it is a legal value for the target addressing
988 /// mode scale component and optional base reg. This allows the users of
989 /// this stride to be rewritten as prev iv * factor. It returns 0 if no
990 /// reuse is possible. Factors can be negative on same targets, e.g. ARM.
992 /// If all uses are outside the loop, we don't require that all multiplies
993 /// be folded into the addressing mode, nor even that the factor be constant;
994 /// a multiply (executed once) outside the loop is better than another IV
995 /// within. Well, usually.
996 SCEVHandle LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg,
997 bool AllUsesAreAddresses,
998 bool AllUsesAreOutsideLoop,
999 const SCEVHandle &Stride,
1000 IVExpr &IV, const Type *Ty,
1001 const std::vector<BasedUser>& UsersToProcess) {
1002 if (StrideNoReuse.count(Stride))
1003 return SE->getIntegerSCEV(0, Stride->getType());
1005 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
1006 int64_t SInt = SC->getValue()->getSExtValue();
1007 for (unsigned NewStride = 0, e = IU->StrideOrder.size();
1008 NewStride != e; ++NewStride) {
1009 std::map<SCEVHandle, IVsOfOneStride>::iterator SI =
1010 IVsByStride.find(IU->StrideOrder[NewStride]);
1011 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first) ||
1012 StrideNoReuse.count(SI->first))
1013 continue;
1014 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1015 if (SI->first != Stride &&
1016 (unsigned(abs64(SInt)) < SSInt || (SInt % SSInt) != 0))
1017 continue;
1018 int64_t Scale = SInt / SSInt;
1019 // Check that this stride is valid for all the types used for loads and
1020 // stores; if it can be used for some and not others, we might as well use
1021 // the original stride everywhere, since we have to create the IV for it
1022 // anyway. If the scale is 1, then we don't need to worry about folding
1023 // multiplications.
1024 if (Scale == 1 ||
1025 (AllUsesAreAddresses &&
1026 ValidScale(HasBaseReg, Scale, UsersToProcess))) {
1027 // Prefer to reuse an IV with a base of zero.
1028 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1029 IE = SI->second.IVs.end(); II != IE; ++II)
1030 // Only reuse previous IV if it would not require a type conversion
1031 // and if the base difference can be folded.
1032 if (II->Base->isZero() &&
1033 !RequiresTypeConversion(II->Base->getType(), Ty)) {
1034 IV = *II;
1035 return SE->getIntegerSCEV(Scale, Stride->getType());
1037 // Otherwise, settle for an IV with a foldable base.
1038 if (AllUsesAreAddresses)
1039 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1040 IE = SI->second.IVs.end(); II != IE; ++II)
1041 // Only reuse previous IV if it would not require a type conversion
1042 // and if the base difference can be folded.
1043 if (SE->getEffectiveSCEVType(II->Base->getType()) ==
1044 SE->getEffectiveSCEVType(Ty) &&
1045 isa<SCEVConstant>(II->Base)) {
1046 int64_t Base =
1047 cast<SCEVConstant>(II->Base)->getValue()->getSExtValue();
1048 if (Base > INT32_MIN && Base <= INT32_MAX &&
1049 ValidOffset(HasBaseReg, -Base * Scale,
1050 Scale, UsersToProcess)) {
1051 IV = *II;
1052 return SE->getIntegerSCEV(Scale, Stride->getType());
1057 } else if (AllUsesAreOutsideLoop) {
1058 // Accept nonconstant strides here; it is really really right to substitute
1059 // an existing IV if we can.
1060 for (unsigned NewStride = 0, e = IU->StrideOrder.size();
1061 NewStride != e; ++NewStride) {
1062 std::map<SCEVHandle, IVsOfOneStride>::iterator SI =
1063 IVsByStride.find(IU->StrideOrder[NewStride]);
1064 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first))
1065 continue;
1066 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1067 if (SI->first != Stride && SSInt != 1)
1068 continue;
1069 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1070 IE = SI->second.IVs.end(); II != IE; ++II)
1071 // Accept nonzero base here.
1072 // Only reuse previous IV if it would not require a type conversion.
1073 if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1074 IV = *II;
1075 return Stride;
1078 // Special case, old IV is -1*x and this one is x. Can treat this one as
1079 // -1*old.
1080 for (unsigned NewStride = 0, e = IU->StrideOrder.size();
1081 NewStride != e; ++NewStride) {
1082 std::map<SCEVHandle, IVsOfOneStride>::iterator SI =
1083 IVsByStride.find(IU->StrideOrder[NewStride]);
1084 if (SI == IVsByStride.end())
1085 continue;
1086 if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(SI->first))
1087 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(ME->getOperand(0)))
1088 if (Stride == ME->getOperand(1) &&
1089 SC->getValue()->getSExtValue() == -1LL)
1090 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1091 IE = SI->second.IVs.end(); II != IE; ++II)
1092 // Accept nonzero base here.
1093 // Only reuse previous IV if it would not require type conversion.
1094 if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1095 IV = *II;
1096 return SE->getIntegerSCEV(-1LL, Stride->getType());
1100 return SE->getIntegerSCEV(0, Stride->getType());
1103 /// PartitionByIsUseOfPostIncrementedValue - Simple boolean predicate that
1104 /// returns true if Val's isUseOfPostIncrementedValue is true.
1105 static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) {
1106 return Val.isUseOfPostIncrementedValue;
1109 /// isNonConstantNegative - Return true if the specified scev is negated, but
1110 /// not a constant.
1111 static bool isNonConstantNegative(const SCEVHandle &Expr) {
1112 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr);
1113 if (!Mul) return false;
1115 // If there is a constant factor, it will be first.
1116 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
1117 if (!SC) return false;
1119 // Return true if the value is negative, this matches things like (-42 * V).
1120 return SC->getValue()->getValue().isNegative();
1123 // CollectIVUsers - Transform our list of users and offsets to a bit more
1124 // complex table. In this new vector, each 'BasedUser' contains 'Base', the base
1125 // of the strided accesses, as well as the old information from Uses. We
1126 // progressively move information from the Base field to the Imm field, until
1127 // we eventually have the full access expression to rewrite the use.
1128 SCEVHandle LoopStrengthReduce::CollectIVUsers(const SCEVHandle &Stride,
1129 IVUsersOfOneStride &Uses,
1130 Loop *L,
1131 bool &AllUsesAreAddresses,
1132 bool &AllUsesAreOutsideLoop,
1133 std::vector<BasedUser> &UsersToProcess) {
1134 // FIXME: Generalize to non-affine IV's.
1135 if (!Stride->isLoopInvariant(L))
1136 return SE->getIntegerSCEV(0, Stride->getType());
1138 UsersToProcess.reserve(Uses.Users.size());
1139 for (ilist<IVStrideUse>::iterator I = Uses.Users.begin(),
1140 E = Uses.Users.end(); I != E; ++I) {
1141 UsersToProcess.push_back(BasedUser(*I, SE));
1143 // Move any loop variant operands from the offset field to the immediate
1144 // field of the use, so that we don't try to use something before it is
1145 // computed.
1146 MoveLoopVariantsToImmediateField(UsersToProcess.back().Base,
1147 UsersToProcess.back().Imm, L, SE);
1148 assert(UsersToProcess.back().Base->isLoopInvariant(L) &&
1149 "Base value is not loop invariant!");
1152 // We now have a whole bunch of uses of like-strided induction variables, but
1153 // they might all have different bases. We want to emit one PHI node for this
1154 // stride which we fold as many common expressions (between the IVs) into as
1155 // possible. Start by identifying the common expressions in the base values
1156 // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find
1157 // "A+B"), emit it to the preheader, then remove the expression from the
1158 // UsersToProcess base values.
1159 SCEVHandle CommonExprs =
1160 RemoveCommonExpressionsFromUseBases(UsersToProcess, SE, L, TLI);
1162 // Next, figure out what we can represent in the immediate fields of
1163 // instructions. If we can represent anything there, move it to the imm
1164 // fields of the BasedUsers. We do this so that it increases the commonality
1165 // of the remaining uses.
1166 unsigned NumPHI = 0;
1167 bool HasAddress = false;
1168 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1169 // If the user is not in the current loop, this means it is using the exit
1170 // value of the IV. Do not put anything in the base, make sure it's all in
1171 // the immediate field to allow as much factoring as possible.
1172 if (!L->contains(UsersToProcess[i].Inst->getParent())) {
1173 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm,
1174 UsersToProcess[i].Base);
1175 UsersToProcess[i].Base =
1176 SE->getIntegerSCEV(0, UsersToProcess[i].Base->getType());
1177 } else {
1178 // Not all uses are outside the loop.
1179 AllUsesAreOutsideLoop = false;
1181 // Addressing modes can be folded into loads and stores. Be careful that
1182 // the store is through the expression, not of the expression though.
1183 bool isPHI = false;
1184 bool isAddress = isAddressUse(UsersToProcess[i].Inst,
1185 UsersToProcess[i].OperandValToReplace);
1186 if (isa<PHINode>(UsersToProcess[i].Inst)) {
1187 isPHI = true;
1188 ++NumPHI;
1191 if (isAddress)
1192 HasAddress = true;
1194 // If this use isn't an address, then not all uses are addresses.
1195 if (!isAddress && !isPHI)
1196 AllUsesAreAddresses = false;
1198 MoveImmediateValues(TLI, UsersToProcess[i].Inst, UsersToProcess[i].Base,
1199 UsersToProcess[i].Imm, isAddress, L, SE);
1203 // If one of the use is a PHI node and all other uses are addresses, still
1204 // allow iv reuse. Essentially we are trading one constant multiplication
1205 // for one fewer iv.
1206 if (NumPHI > 1)
1207 AllUsesAreAddresses = false;
1209 // There are no in-loop address uses.
1210 if (AllUsesAreAddresses && (!HasAddress && !AllUsesAreOutsideLoop))
1211 AllUsesAreAddresses = false;
1213 return CommonExprs;
1216 /// ShouldUseFullStrengthReductionMode - Test whether full strength-reduction
1217 /// is valid and profitable for the given set of users of a stride. In
1218 /// full strength-reduction mode, all addresses at the current stride are
1219 /// strength-reduced all the way down to pointer arithmetic.
1221 bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode(
1222 const std::vector<BasedUser> &UsersToProcess,
1223 const Loop *L,
1224 bool AllUsesAreAddresses,
1225 SCEVHandle Stride) {
1226 if (!EnableFullLSRMode)
1227 return false;
1229 // The heuristics below aim to avoid increasing register pressure, but
1230 // fully strength-reducing all the addresses increases the number of
1231 // add instructions, so don't do this when optimizing for size.
1232 // TODO: If the loop is large, the savings due to simpler addresses
1233 // may oughtweight the costs of the extra increment instructions.
1234 if (L->getHeader()->getParent()->hasFnAttr(Attribute::OptimizeForSize))
1235 return false;
1237 // TODO: For now, don't do full strength reduction if there could
1238 // potentially be greater-stride multiples of the current stride
1239 // which could reuse the current stride IV.
1240 if (IU->StrideOrder.back() != Stride)
1241 return false;
1243 // Iterate through the uses to find conditions that automatically rule out
1244 // full-lsr mode.
1245 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1246 const SCEV *Base = UsersToProcess[i].Base;
1247 const SCEV *Imm = UsersToProcess[i].Imm;
1248 // If any users have a loop-variant component, they can't be fully
1249 // strength-reduced.
1250 if (Imm && !Imm->isLoopInvariant(L))
1251 return false;
1252 // If there are to users with the same base and the difference between
1253 // the two Imm values can't be folded into the address, full
1254 // strength reduction would increase register pressure.
1255 do {
1256 const SCEV *CurImm = UsersToProcess[i].Imm;
1257 if ((CurImm || Imm) && CurImm != Imm) {
1258 if (!CurImm) CurImm = SE->getIntegerSCEV(0, Stride->getType());
1259 if (!Imm) Imm = SE->getIntegerSCEV(0, Stride->getType());
1260 const Instruction *Inst = UsersToProcess[i].Inst;
1261 const Type *UseTy = getAccessType(Inst);
1262 SCEVHandle Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1263 if (!Diff->isZero() &&
1264 (!AllUsesAreAddresses ||
1265 !fitsInAddressMode(Diff, UseTy, TLI, /*HasBaseReg=*/true)))
1266 return false;
1268 } while (++i != e && Base == UsersToProcess[i].Base);
1271 // If there's exactly one user in this stride, fully strength-reducing it
1272 // won't increase register pressure. If it's starting from a non-zero base,
1273 // it'll be simpler this way.
1274 if (UsersToProcess.size() == 1 && !UsersToProcess[0].Base->isZero())
1275 return true;
1277 // Otherwise, if there are any users in this stride that don't require
1278 // a register for their base, full strength-reduction will increase
1279 // register pressure.
1280 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1281 if (UsersToProcess[i].Base->isZero())
1282 return false;
1284 // Otherwise, go for it.
1285 return true;
1288 /// InsertAffinePhi Create and insert a PHI node for an induction variable
1289 /// with the specified start and step values in the specified loop.
1291 /// If NegateStride is true, the stride should be negated by using a
1292 /// subtract instead of an add.
1294 /// Return the created phi node.
1296 static PHINode *InsertAffinePhi(SCEVHandle Start, SCEVHandle Step,
1297 Instruction *IVIncInsertPt,
1298 const Loop *L,
1299 SCEVExpander &Rewriter) {
1300 assert(Start->isLoopInvariant(L) && "New PHI start is not loop invariant!");
1301 assert(Step->isLoopInvariant(L) && "New PHI stride is not loop invariant!");
1303 BasicBlock *Header = L->getHeader();
1304 BasicBlock *Preheader = L->getLoopPreheader();
1305 BasicBlock *LatchBlock = L->getLoopLatch();
1306 const Type *Ty = Start->getType();
1307 Ty = Rewriter.SE.getEffectiveSCEVType(Ty);
1309 PHINode *PN = PHINode::Create(Ty, "lsr.iv", Header->begin());
1310 PN->addIncoming(Rewriter.expandCodeFor(Start, Ty, Preheader->getTerminator()),
1311 Preheader);
1313 // If the stride is negative, insert a sub instead of an add for the
1314 // increment.
1315 bool isNegative = isNonConstantNegative(Step);
1316 SCEVHandle IncAmount = Step;
1317 if (isNegative)
1318 IncAmount = Rewriter.SE.getNegativeSCEV(Step);
1320 // Insert an add instruction right before the terminator corresponding
1321 // to the back-edge or just before the only use. The location is determined
1322 // by the caller and passed in as IVIncInsertPt.
1323 Value *StepV = Rewriter.expandCodeFor(IncAmount, Ty,
1324 Preheader->getTerminator());
1325 Instruction *IncV;
1326 if (isNegative) {
1327 IncV = BinaryOperator::CreateSub(PN, StepV, "lsr.iv.next",
1328 IVIncInsertPt);
1329 } else {
1330 IncV = BinaryOperator::CreateAdd(PN, StepV, "lsr.iv.next",
1331 IVIncInsertPt);
1333 if (!isa<ConstantInt>(StepV)) ++NumVariable;
1335 PN->addIncoming(IncV, LatchBlock);
1337 ++NumInserted;
1338 return PN;
1341 static void SortUsersToProcess(std::vector<BasedUser> &UsersToProcess) {
1342 // We want to emit code for users inside the loop first. To do this, we
1343 // rearrange BasedUser so that the entries at the end have
1344 // isUseOfPostIncrementedValue = false, because we pop off the end of the
1345 // vector (so we handle them first).
1346 std::partition(UsersToProcess.begin(), UsersToProcess.end(),
1347 PartitionByIsUseOfPostIncrementedValue);
1349 // Sort this by base, so that things with the same base are handled
1350 // together. By partitioning first and stable-sorting later, we are
1351 // guaranteed that within each base we will pop off users from within the
1352 // loop before users outside of the loop with a particular base.
1354 // We would like to use stable_sort here, but we can't. The problem is that
1355 // SCEVHandle's don't have a deterministic ordering w.r.t to each other, so
1356 // we don't have anything to do a '<' comparison on. Because we think the
1357 // number of uses is small, do a horrible bubble sort which just relies on
1358 // ==.
1359 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1360 // Get a base value.
1361 SCEVHandle Base = UsersToProcess[i].Base;
1363 // Compact everything with this base to be consecutive with this one.
1364 for (unsigned j = i+1; j != e; ++j) {
1365 if (UsersToProcess[j].Base == Base) {
1366 std::swap(UsersToProcess[i+1], UsersToProcess[j]);
1367 ++i;
1373 /// PrepareToStrengthReduceFully - Prepare to fully strength-reduce
1374 /// UsersToProcess, meaning lowering addresses all the way down to direct
1375 /// pointer arithmetic.
1377 void
1378 LoopStrengthReduce::PrepareToStrengthReduceFully(
1379 std::vector<BasedUser> &UsersToProcess,
1380 SCEVHandle Stride,
1381 SCEVHandle CommonExprs,
1382 const Loop *L,
1383 SCEVExpander &PreheaderRewriter) {
1384 DOUT << " Fully reducing all users\n";
1386 // Rewrite the UsersToProcess records, creating a separate PHI for each
1387 // unique Base value.
1388 Instruction *IVIncInsertPt = L->getLoopLatch()->getTerminator();
1389 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1390 // TODO: The uses are grouped by base, but not sorted. We arbitrarily
1391 // pick the first Imm value here to start with, and adjust it for the
1392 // other uses.
1393 SCEVHandle Imm = UsersToProcess[i].Imm;
1394 SCEVHandle Base = UsersToProcess[i].Base;
1395 SCEVHandle Start = SE->getAddExpr(CommonExprs, Base, Imm);
1396 PHINode *Phi = InsertAffinePhi(Start, Stride, IVIncInsertPt, L,
1397 PreheaderRewriter);
1398 // Loop over all the users with the same base.
1399 do {
1400 UsersToProcess[i].Base = SE->getIntegerSCEV(0, Stride->getType());
1401 UsersToProcess[i].Imm = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1402 UsersToProcess[i].Phi = Phi;
1403 assert(UsersToProcess[i].Imm->isLoopInvariant(L) &&
1404 "ShouldUseFullStrengthReductionMode should reject this!");
1405 } while (++i != e && Base == UsersToProcess[i].Base);
1409 /// FindIVIncInsertPt - Return the location to insert the increment instruction.
1410 /// If the only use if a use of postinc value, (must be the loop termination
1411 /// condition), then insert it just before the use.
1412 static Instruction *FindIVIncInsertPt(std::vector<BasedUser> &UsersToProcess,
1413 const Loop *L) {
1414 if (UsersToProcess.size() == 1 &&
1415 UsersToProcess[0].isUseOfPostIncrementedValue &&
1416 L->contains(UsersToProcess[0].Inst->getParent()))
1417 return UsersToProcess[0].Inst;
1418 return L->getLoopLatch()->getTerminator();
1421 /// PrepareToStrengthReduceWithNewPhi - Insert a new induction variable for the
1422 /// given users to share.
1424 void
1425 LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi(
1426 std::vector<BasedUser> &UsersToProcess,
1427 SCEVHandle Stride,
1428 SCEVHandle CommonExprs,
1429 Value *CommonBaseV,
1430 Instruction *IVIncInsertPt,
1431 const Loop *L,
1432 SCEVExpander &PreheaderRewriter) {
1433 DOUT << " Inserting new PHI:\n";
1435 PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV),
1436 Stride, IVIncInsertPt, L,
1437 PreheaderRewriter);
1439 // Remember this in case a later stride is multiple of this.
1440 IVsByStride[Stride].addIV(Stride, CommonExprs, Phi);
1442 // All the users will share this new IV.
1443 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1444 UsersToProcess[i].Phi = Phi;
1446 DOUT << " IV=";
1447 DEBUG(WriteAsOperand(*DOUT, Phi, /*PrintType=*/false));
1448 DOUT << "\n";
1451 /// PrepareToStrengthReduceFromSmallerStride - Prepare for the given users to
1452 /// reuse an induction variable with a stride that is a factor of the current
1453 /// induction variable.
1455 void
1456 LoopStrengthReduce::PrepareToStrengthReduceFromSmallerStride(
1457 std::vector<BasedUser> &UsersToProcess,
1458 Value *CommonBaseV,
1459 const IVExpr &ReuseIV,
1460 Instruction *PreInsertPt) {
1461 DOUT << " Rewriting in terms of existing IV of STRIDE " << *ReuseIV.Stride
1462 << " and BASE " << *ReuseIV.Base << "\n";
1464 // All the users will share the reused IV.
1465 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1466 UsersToProcess[i].Phi = ReuseIV.PHI;
1468 Constant *C = dyn_cast<Constant>(CommonBaseV);
1469 if (C &&
1470 (!C->isNullValue() &&
1471 !fitsInAddressMode(SE->getUnknown(CommonBaseV), CommonBaseV->getType(),
1472 TLI, false)))
1473 // We want the common base emitted into the preheader! This is just
1474 // using cast as a copy so BitCast (no-op cast) is appropriate
1475 CommonBaseV = new BitCastInst(CommonBaseV, CommonBaseV->getType(),
1476 "commonbase", PreInsertPt);
1479 static bool IsImmFoldedIntoAddrMode(GlobalValue *GV, int64_t Offset,
1480 const Type *AccessTy,
1481 std::vector<BasedUser> &UsersToProcess,
1482 const TargetLowering *TLI) {
1483 SmallVector<Instruction*, 16> AddrModeInsts;
1484 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1485 if (UsersToProcess[i].isUseOfPostIncrementedValue)
1486 continue;
1487 ExtAddrMode AddrMode =
1488 AddressingModeMatcher::Match(UsersToProcess[i].OperandValToReplace,
1489 AccessTy, UsersToProcess[i].Inst,
1490 AddrModeInsts, *TLI);
1491 if (GV && GV != AddrMode.BaseGV)
1492 return false;
1493 if (Offset && !AddrMode.BaseOffs)
1494 // FIXME: How to accurate check it's immediate offset is folded.
1495 return false;
1496 AddrModeInsts.clear();
1498 return true;
1501 /// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single
1502 /// stride of IV. All of the users may have different starting values, and this
1503 /// may not be the only stride.
1504 void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
1505 IVUsersOfOneStride &Uses,
1506 Loop *L) {
1507 // If all the users are moved to another stride, then there is nothing to do.
1508 if (Uses.Users.empty())
1509 return;
1511 // Keep track if every use in UsersToProcess is an address. If they all are,
1512 // we may be able to rewrite the entire collection of them in terms of a
1513 // smaller-stride IV.
1514 bool AllUsesAreAddresses = true;
1516 // Keep track if every use of a single stride is outside the loop. If so,
1517 // we want to be more aggressive about reusing a smaller-stride IV; a
1518 // multiply outside the loop is better than another IV inside. Well, usually.
1519 bool AllUsesAreOutsideLoop = true;
1521 // Transform our list of users and offsets to a bit more complex table. In
1522 // this new vector, each 'BasedUser' contains 'Base' the base of the
1523 // strided accessas well as the old information from Uses. We progressively
1524 // move information from the Base field to the Imm field, until we eventually
1525 // have the full access expression to rewrite the use.
1526 std::vector<BasedUser> UsersToProcess;
1527 SCEVHandle CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses,
1528 AllUsesAreOutsideLoop,
1529 UsersToProcess);
1531 // Sort the UsersToProcess array so that users with common bases are
1532 // next to each other.
1533 SortUsersToProcess(UsersToProcess);
1535 // If we managed to find some expressions in common, we'll need to carry
1536 // their value in a register and add it in for each use. This will take up
1537 // a register operand, which potentially restricts what stride values are
1538 // valid.
1539 bool HaveCommonExprs = !CommonExprs->isZero();
1540 const Type *ReplacedTy = CommonExprs->getType();
1542 // If all uses are addresses, consider sinking the immediate part of the
1543 // common expression back into uses if they can fit in the immediate fields.
1544 if (TLI && HaveCommonExprs && AllUsesAreAddresses) {
1545 SCEVHandle NewCommon = CommonExprs;
1546 SCEVHandle Imm = SE->getIntegerSCEV(0, ReplacedTy);
1547 MoveImmediateValues(TLI, Type::VoidTy, NewCommon, Imm, true, L, SE);
1548 if (!Imm->isZero()) {
1549 bool DoSink = true;
1551 // If the immediate part of the common expression is a GV, check if it's
1552 // possible to fold it into the target addressing mode.
1553 GlobalValue *GV = 0;
1554 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(Imm))
1555 GV = dyn_cast<GlobalValue>(SU->getValue());
1556 int64_t Offset = 0;
1557 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Imm))
1558 Offset = SC->getValue()->getSExtValue();
1559 if (GV || Offset)
1560 // Pass VoidTy as the AccessTy to be conservative, because
1561 // there could be multiple access types among all the uses.
1562 DoSink = IsImmFoldedIntoAddrMode(GV, Offset, Type::VoidTy,
1563 UsersToProcess, TLI);
1565 if (DoSink) {
1566 DOUT << " Sinking " << *Imm << " back down into uses\n";
1567 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1568 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm, Imm);
1569 CommonExprs = NewCommon;
1570 HaveCommonExprs = !CommonExprs->isZero();
1571 ++NumImmSunk;
1576 // Now that we know what we need to do, insert the PHI node itself.
1578 DOUT << "LSR: Examining IVs of TYPE " << *ReplacedTy << " of STRIDE "
1579 << *Stride << ":\n"
1580 << " Common base: " << *CommonExprs << "\n";
1582 SCEVExpander Rewriter(*SE, *LI);
1583 SCEVExpander PreheaderRewriter(*SE, *LI);
1585 BasicBlock *Preheader = L->getLoopPreheader();
1586 Instruction *PreInsertPt = Preheader->getTerminator();
1587 BasicBlock *LatchBlock = L->getLoopLatch();
1588 Instruction *IVIncInsertPt = LatchBlock->getTerminator();
1590 Value *CommonBaseV = Constant::getNullValue(ReplacedTy);
1592 SCEVHandle RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy);
1593 IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty),
1594 SE->getIntegerSCEV(0, Type::Int32Ty),
1597 /// Choose a strength-reduction strategy and prepare for it by creating
1598 /// the necessary PHIs and adjusting the bookkeeping.
1599 if (ShouldUseFullStrengthReductionMode(UsersToProcess, L,
1600 AllUsesAreAddresses, Stride)) {
1601 PrepareToStrengthReduceFully(UsersToProcess, Stride, CommonExprs, L,
1602 PreheaderRewriter);
1603 } else {
1604 // Emit the initial base value into the loop preheader.
1605 CommonBaseV = PreheaderRewriter.expandCodeFor(CommonExprs, ReplacedTy,
1606 PreInsertPt);
1608 // If all uses are addresses, check if it is possible to reuse an IV. The
1609 // new IV must have a stride that is a multiple of the old stride; the
1610 // multiple must be a number that can be encoded in the scale field of the
1611 // target addressing mode; and we must have a valid instruction after this
1612 // substitution, including the immediate field, if any.
1613 RewriteFactor = CheckForIVReuse(HaveCommonExprs, AllUsesAreAddresses,
1614 AllUsesAreOutsideLoop,
1615 Stride, ReuseIV, ReplacedTy,
1616 UsersToProcess);
1617 if (!RewriteFactor->isZero())
1618 PrepareToStrengthReduceFromSmallerStride(UsersToProcess, CommonBaseV,
1619 ReuseIV, PreInsertPt);
1620 else {
1621 IVIncInsertPt = FindIVIncInsertPt(UsersToProcess, L);
1622 PrepareToStrengthReduceWithNewPhi(UsersToProcess, Stride, CommonExprs,
1623 CommonBaseV, IVIncInsertPt,
1624 L, PreheaderRewriter);
1628 // Process all the users now, replacing their strided uses with
1629 // strength-reduced forms. This outer loop handles all bases, the inner
1630 // loop handles all users of a particular base.
1631 while (!UsersToProcess.empty()) {
1632 SCEVHandle Base = UsersToProcess.back().Base;
1633 Instruction *Inst = UsersToProcess.back().Inst;
1635 // Emit the code for Base into the preheader.
1636 Value *BaseV = 0;
1637 if (!Base->isZero()) {
1638 BaseV = PreheaderRewriter.expandCodeFor(Base, Base->getType(),
1639 PreInsertPt);
1641 DOUT << " INSERTING code for BASE = " << *Base << ":";
1642 if (BaseV->hasName())
1643 DOUT << " Result value name = %" << BaseV->getNameStr();
1644 DOUT << "\n";
1646 // If BaseV is a non-zero constant, make sure that it gets inserted into
1647 // the preheader, instead of being forward substituted into the uses. We
1648 // do this by forcing a BitCast (noop cast) to be inserted into the
1649 // preheader in this case.
1650 if (!fitsInAddressMode(Base, getAccessType(Inst), TLI, false)) {
1651 // We want this constant emitted into the preheader! This is just
1652 // using cast as a copy so BitCast (no-op cast) is appropriate
1653 BaseV = new BitCastInst(BaseV, BaseV->getType(), "preheaderinsert",
1654 PreInsertPt);
1658 // Emit the code to add the immediate offset to the Phi value, just before
1659 // the instructions that we identified as using this stride and base.
1660 do {
1661 // FIXME: Use emitted users to emit other users.
1662 BasedUser &User = UsersToProcess.back();
1664 DOUT << " Examining ";
1665 if (User.isUseOfPostIncrementedValue)
1666 DOUT << "postinc";
1667 else
1668 DOUT << "preinc";
1669 DOUT << " use ";
1670 DEBUG(WriteAsOperand(*DOUT, UsersToProcess.back().OperandValToReplace,
1671 /*PrintType=*/false));
1672 DOUT << " in Inst: " << *(User.Inst);
1674 // If this instruction wants to use the post-incremented value, move it
1675 // after the post-inc and use its value instead of the PHI.
1676 Value *RewriteOp = User.Phi;
1677 if (User.isUseOfPostIncrementedValue) {
1678 RewriteOp = User.Phi->getIncomingValueForBlock(LatchBlock);
1679 // If this user is in the loop, make sure it is the last thing in the
1680 // loop to ensure it is dominated by the increment. In case it's the
1681 // only use of the iv, the increment instruction is already before the
1682 // use.
1683 if (L->contains(User.Inst->getParent()) && User.Inst != IVIncInsertPt)
1684 User.Inst->moveBefore(IVIncInsertPt);
1687 SCEVHandle RewriteExpr = SE->getUnknown(RewriteOp);
1689 if (SE->getEffectiveSCEVType(RewriteOp->getType()) !=
1690 SE->getEffectiveSCEVType(ReplacedTy)) {
1691 assert(SE->getTypeSizeInBits(RewriteOp->getType()) >
1692 SE->getTypeSizeInBits(ReplacedTy) &&
1693 "Unexpected widening cast!");
1694 RewriteExpr = SE->getTruncateExpr(RewriteExpr, ReplacedTy);
1697 // If we had to insert new instructions for RewriteOp, we have to
1698 // consider that they may not have been able to end up immediately
1699 // next to RewriteOp, because non-PHI instructions may never precede
1700 // PHI instructions in a block. In this case, remember where the last
1701 // instruction was inserted so that if we're replacing a different
1702 // PHI node, we can use the later point to expand the final
1703 // RewriteExpr.
1704 Instruction *NewBasePt = dyn_cast<Instruction>(RewriteOp);
1705 if (RewriteOp == User.Phi) NewBasePt = 0;
1707 // Clear the SCEVExpander's expression map so that we are guaranteed
1708 // to have the code emitted where we expect it.
1709 Rewriter.clear();
1711 // If we are reusing the iv, then it must be multiplied by a constant
1712 // factor to take advantage of the addressing mode scale component.
1713 if (!RewriteFactor->isZero()) {
1714 // If we're reusing an IV with a nonzero base (currently this happens
1715 // only when all reuses are outside the loop) subtract that base here.
1716 // The base has been used to initialize the PHI node but we don't want
1717 // it here.
1718 if (!ReuseIV.Base->isZero()) {
1719 SCEVHandle typedBase = ReuseIV.Base;
1720 if (SE->getEffectiveSCEVType(RewriteExpr->getType()) !=
1721 SE->getEffectiveSCEVType(ReuseIV.Base->getType())) {
1722 // It's possible the original IV is a larger type than the new IV,
1723 // in which case we have to truncate the Base. We checked in
1724 // RequiresTypeConversion that this is valid.
1725 assert(SE->getTypeSizeInBits(RewriteExpr->getType()) <
1726 SE->getTypeSizeInBits(ReuseIV.Base->getType()) &&
1727 "Unexpected lengthening conversion!");
1728 typedBase = SE->getTruncateExpr(ReuseIV.Base,
1729 RewriteExpr->getType());
1731 RewriteExpr = SE->getMinusSCEV(RewriteExpr, typedBase);
1734 // Multiply old variable, with base removed, by new scale factor.
1735 RewriteExpr = SE->getMulExpr(RewriteFactor,
1736 RewriteExpr);
1738 // The common base is emitted in the loop preheader. But since we
1739 // are reusing an IV, it has not been used to initialize the PHI node.
1740 // Add it to the expression used to rewrite the uses.
1741 // When this use is outside the loop, we earlier subtracted the
1742 // common base, and are adding it back here. Use the same expression
1743 // as before, rather than CommonBaseV, so DAGCombiner will zap it.
1744 if (!CommonExprs->isZero()) {
1745 if (L->contains(User.Inst->getParent()))
1746 RewriteExpr = SE->getAddExpr(RewriteExpr,
1747 SE->getUnknown(CommonBaseV));
1748 else
1749 RewriteExpr = SE->getAddExpr(RewriteExpr, CommonExprs);
1753 // Now that we know what we need to do, insert code before User for the
1754 // immediate and any loop-variant expressions.
1755 if (BaseV)
1756 // Add BaseV to the PHI value if needed.
1757 RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV));
1759 User.RewriteInstructionToUseNewBase(RewriteExpr, NewBasePt,
1760 Rewriter, L, this,
1761 DeadInsts);
1763 // Mark old value we replaced as possibly dead, so that it is eliminated
1764 // if we just replaced the last use of that value.
1765 DeadInsts.push_back(User.OperandValToReplace);
1767 UsersToProcess.pop_back();
1768 ++NumReduced;
1770 // If there are any more users to process with the same base, process them
1771 // now. We sorted by base above, so we just have to check the last elt.
1772 } while (!UsersToProcess.empty() && UsersToProcess.back().Base == Base);
1773 // TODO: Next, find out which base index is the most common, pull it out.
1776 // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
1777 // different starting values, into different PHIs.
1780 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
1781 /// set the IV user and stride information and return true, otherwise return
1782 /// false.
1783 bool LoopStrengthReduce::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
1784 const SCEVHandle *&CondStride) {
1785 for (unsigned Stride = 0, e = IU->StrideOrder.size();
1786 Stride != e && !CondUse; ++Stride) {
1787 std::map<SCEVHandle, IVUsersOfOneStride *>::iterator SI =
1788 IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
1789 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
1791 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
1792 E = SI->second->Users.end(); UI != E; ++UI)
1793 if (UI->getUser() == Cond) {
1794 // NOTE: we could handle setcc instructions with multiple uses here, but
1795 // InstCombine does it as well for simple uses, it's not clear that it
1796 // occurs enough in real life to handle.
1797 CondUse = UI;
1798 CondStride = &SI->first;
1799 return true;
1802 return false;
1805 namespace {
1806 // Constant strides come first which in turns are sorted by their absolute
1807 // values. If absolute values are the same, then positive strides comes first.
1808 // e.g.
1809 // 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X
1810 struct StrideCompare {
1811 const ScalarEvolution *SE;
1812 explicit StrideCompare(const ScalarEvolution *se) : SE(se) {}
1814 bool operator()(const SCEVHandle &LHS, const SCEVHandle &RHS) {
1815 const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS);
1816 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
1817 if (LHSC && RHSC) {
1818 int64_t LV = LHSC->getValue()->getSExtValue();
1819 int64_t RV = RHSC->getValue()->getSExtValue();
1820 uint64_t ALV = (LV < 0) ? -LV : LV;
1821 uint64_t ARV = (RV < 0) ? -RV : RV;
1822 if (ALV == ARV) {
1823 if (LV != RV)
1824 return LV > RV;
1825 } else {
1826 return ALV < ARV;
1829 // If it's the same value but different type, sort by bit width so
1830 // that we emit larger induction variables before smaller
1831 // ones, letting the smaller be re-written in terms of larger ones.
1832 return SE->getTypeSizeInBits(RHS->getType()) <
1833 SE->getTypeSizeInBits(LHS->getType());
1835 return LHSC && !RHSC;
1840 /// ChangeCompareStride - If a loop termination compare instruction is the
1841 /// only use of its stride, and the compaison is against a constant value,
1842 /// try eliminate the stride by moving the compare instruction to another
1843 /// stride and change its constant operand accordingly. e.g.
1845 /// loop:
1846 /// ...
1847 /// v1 = v1 + 3
1848 /// v2 = v2 + 1
1849 /// if (v2 < 10) goto loop
1850 /// =>
1851 /// loop:
1852 /// ...
1853 /// v1 = v1 + 3
1854 /// if (v1 < 30) goto loop
1855 ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
1856 IVStrideUse* &CondUse,
1857 const SCEVHandle* &CondStride) {
1858 // If there's only one stride in the loop, there's nothing to do here.
1859 if (IU->StrideOrder.size() < 2)
1860 return Cond;
1861 // If there are other users of the condition's stride, don't bother
1862 // trying to change the condition because the stride will still
1863 // remain.
1864 std::map<SCEVHandle, IVUsersOfOneStride *>::iterator I =
1865 IU->IVUsesByStride.find(*CondStride);
1866 if (I == IU->IVUsesByStride.end() ||
1867 I->second->Users.size() != 1)
1868 return Cond;
1869 // Only handle constant strides for now.
1870 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride);
1871 if (!SC) return Cond;
1873 ICmpInst::Predicate Predicate = Cond->getPredicate();
1874 int64_t CmpSSInt = SC->getValue()->getSExtValue();
1875 unsigned BitWidth = SE->getTypeSizeInBits((*CondStride)->getType());
1876 uint64_t SignBit = 1ULL << (BitWidth-1);
1877 const Type *CmpTy = Cond->getOperand(0)->getType();
1878 const Type *NewCmpTy = NULL;
1879 unsigned TyBits = SE->getTypeSizeInBits(CmpTy);
1880 unsigned NewTyBits = 0;
1881 SCEVHandle *NewStride = NULL;
1882 Value *NewCmpLHS = NULL;
1883 Value *NewCmpRHS = NULL;
1884 int64_t Scale = 1;
1885 SCEVHandle NewOffset = SE->getIntegerSCEV(0, CmpTy);
1887 if (ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1))) {
1888 int64_t CmpVal = C->getValue().getSExtValue();
1890 // Check stride constant and the comparision constant signs to detect
1891 // overflow.
1892 if ((CmpVal & SignBit) != (CmpSSInt & SignBit))
1893 return Cond;
1895 // Look for a suitable stride / iv as replacement.
1896 for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
1897 std::map<SCEVHandle, IVUsersOfOneStride *>::iterator SI =
1898 IU->IVUsesByStride.find(IU->StrideOrder[i]);
1899 if (!isa<SCEVConstant>(SI->first))
1900 continue;
1901 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1902 if (SSInt == CmpSSInt ||
1903 abs64(SSInt) < abs64(CmpSSInt) ||
1904 (SSInt % CmpSSInt) != 0)
1905 continue;
1907 Scale = SSInt / CmpSSInt;
1908 int64_t NewCmpVal = CmpVal * Scale;
1909 APInt Mul = APInt(BitWidth*2, CmpVal, true);
1910 Mul = Mul * APInt(BitWidth*2, Scale, true);
1911 // Check for overflow.
1912 if (!Mul.isSignedIntN(BitWidth))
1913 continue;
1914 // Check for overflow in the stride's type too.
1915 if (!Mul.isSignedIntN(SE->getTypeSizeInBits(SI->first->getType())))
1916 continue;
1918 // Watch out for overflow.
1919 if (ICmpInst::isSignedPredicate(Predicate) &&
1920 (CmpVal & SignBit) != (NewCmpVal & SignBit))
1921 continue;
1923 if (NewCmpVal == CmpVal)
1924 continue;
1925 // Pick the best iv to use trying to avoid a cast.
1926 NewCmpLHS = NULL;
1927 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
1928 E = SI->second->Users.end(); UI != E; ++UI) {
1929 Value *Op = UI->getOperandValToReplace();
1931 // If the IVStrideUse implies a cast, check for an actual cast which
1932 // can be used to find the original IV expression.
1933 if (SE->getEffectiveSCEVType(Op->getType()) !=
1934 SE->getEffectiveSCEVType(SI->first->getType())) {
1935 CastInst *CI = dyn_cast<CastInst>(Op);
1936 // If it's not a simple cast, it's complicated.
1937 if (!CI)
1938 continue;
1939 // If it's a cast from a type other than the stride type,
1940 // it's complicated.
1941 if (CI->getOperand(0)->getType() != SI->first->getType())
1942 continue;
1943 // Ok, we found the IV expression in the stride's type.
1944 Op = CI->getOperand(0);
1947 NewCmpLHS = Op;
1948 if (NewCmpLHS->getType() == CmpTy)
1949 break;
1951 if (!NewCmpLHS)
1952 continue;
1954 NewCmpTy = NewCmpLHS->getType();
1955 NewTyBits = SE->getTypeSizeInBits(NewCmpTy);
1956 const Type *NewCmpIntTy = IntegerType::get(NewTyBits);
1957 if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
1958 // Check if it is possible to rewrite it using
1959 // an iv / stride of a smaller integer type.
1960 unsigned Bits = NewTyBits;
1961 if (ICmpInst::isSignedPredicate(Predicate))
1962 --Bits;
1963 uint64_t Mask = (1ULL << Bits) - 1;
1964 if (((uint64_t)NewCmpVal & Mask) != (uint64_t)NewCmpVal)
1965 continue;
1968 // Don't rewrite if use offset is non-constant and the new type is
1969 // of a different type.
1970 // FIXME: too conservative?
1971 if (NewTyBits != TyBits && !isa<SCEVConstant>(CondUse->getOffset()))
1972 continue;
1974 bool AllUsesAreAddresses = true;
1975 bool AllUsesAreOutsideLoop = true;
1976 std::vector<BasedUser> UsersToProcess;
1977 SCEVHandle CommonExprs = CollectIVUsers(SI->first, *SI->second, L,
1978 AllUsesAreAddresses,
1979 AllUsesAreOutsideLoop,
1980 UsersToProcess);
1981 // Avoid rewriting the compare instruction with an iv of new stride
1982 // if it's likely the new stride uses will be rewritten using the
1983 // stride of the compare instruction.
1984 if (AllUsesAreAddresses &&
1985 ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess))
1986 continue;
1988 // If scale is negative, use swapped predicate unless it's testing
1989 // for equality.
1990 if (Scale < 0 && !Cond->isEquality())
1991 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1993 NewStride = &IU->StrideOrder[i];
1994 if (!isa<PointerType>(NewCmpTy))
1995 NewCmpRHS = ConstantInt::get(NewCmpTy, NewCmpVal);
1996 else {
1997 ConstantInt *CI = ConstantInt::get(NewCmpIntTy, NewCmpVal);
1998 NewCmpRHS = ConstantExpr::getIntToPtr(CI, NewCmpTy);
2000 NewOffset = TyBits == NewTyBits
2001 ? SE->getMulExpr(CondUse->getOffset(),
2002 SE->getConstant(ConstantInt::get(CmpTy, Scale)))
2003 : SE->getConstant(ConstantInt::get(NewCmpIntTy,
2004 cast<SCEVConstant>(CondUse->getOffset())->getValue()
2005 ->getSExtValue()*Scale));
2006 break;
2010 // Forgo this transformation if it the increment happens to be
2011 // unfortunately positioned after the condition, and the condition
2012 // has multiple uses which prevent it from being moved immediately
2013 // before the branch. See
2014 // test/Transforms/LoopStrengthReduce/change-compare-stride-trickiness-*.ll
2015 // for an example of this situation.
2016 if (!Cond->hasOneUse()) {
2017 for (BasicBlock::iterator I = Cond, E = Cond->getParent()->end();
2018 I != E; ++I)
2019 if (I == NewCmpLHS)
2020 return Cond;
2023 if (NewCmpRHS) {
2024 // Create a new compare instruction using new stride / iv.
2025 ICmpInst *OldCond = Cond;
2026 // Insert new compare instruction.
2027 Cond = new ICmpInst(Predicate, NewCmpLHS, NewCmpRHS,
2028 L->getHeader()->getName() + ".termcond",
2029 OldCond);
2031 // Remove the old compare instruction. The old indvar is probably dead too.
2032 DeadInsts.push_back(CondUse->getOperandValToReplace());
2033 OldCond->replaceAllUsesWith(Cond);
2034 OldCond->eraseFromParent();
2036 IU->IVUsesByStride[*NewStride]->addUser(NewOffset, Cond, NewCmpLHS, false);
2037 CondUse = &IU->IVUsesByStride[*NewStride]->Users.back();
2038 CondStride = NewStride;
2039 ++NumEliminated;
2040 Changed = true;
2043 return Cond;
2046 /// OptimizeSMax - Rewrite the loop's terminating condition if it uses
2047 /// an smax computation.
2049 /// This is a narrow solution to a specific, but acute, problem. For loops
2050 /// like this:
2052 /// i = 0;
2053 /// do {
2054 /// p[i] = 0.0;
2055 /// } while (++i < n);
2057 /// where the comparison is signed, the trip count isn't just 'n', because
2058 /// 'n' could be negative. And unfortunately this can come up even for loops
2059 /// where the user didn't use a C do-while loop. For example, seemingly
2060 /// well-behaved top-test loops will commonly be lowered like this:
2062 /// if (n > 0) {
2063 /// i = 0;
2064 /// do {
2065 /// p[i] = 0.0;
2066 /// } while (++i < n);
2067 /// }
2069 /// and then it's possible for subsequent optimization to obscure the if
2070 /// test in such a way that indvars can't find it.
2072 /// When indvars can't find the if test in loops like this, it creates a
2073 /// signed-max expression, which allows it to give the loop a canonical
2074 /// induction variable:
2076 /// i = 0;
2077 /// smax = n < 1 ? 1 : n;
2078 /// do {
2079 /// p[i] = 0.0;
2080 /// } while (++i != smax);
2082 /// Canonical induction variables are necessary because the loop passes
2083 /// are designed around them. The most obvious example of this is the
2084 /// LoopInfo analysis, which doesn't remember trip count values. It
2085 /// expects to be able to rediscover the trip count each time it is
2086 /// needed, and it does this using a simple analyis that only succeeds if
2087 /// the loop has a canonical induction variable.
2089 /// However, when it comes time to generate code, the maximum operation
2090 /// can be quite costly, especially if it's inside of an outer loop.
2092 /// This function solves this problem by detecting this type of loop and
2093 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
2094 /// the instructions for the maximum computation.
2096 ICmpInst *LoopStrengthReduce::OptimizeSMax(Loop *L, ICmpInst *Cond,
2097 IVStrideUse* &CondUse) {
2098 // Check that the loop matches the pattern we're looking for.
2099 if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
2100 Cond->getPredicate() != CmpInst::ICMP_NE)
2101 return Cond;
2103 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
2104 if (!Sel || !Sel->hasOneUse()) return Cond;
2106 SCEVHandle BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2107 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2108 return Cond;
2109 SCEVHandle One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType());
2111 // Add one to the backedge-taken count to get the trip count.
2112 SCEVHandle IterationCount = SE->getAddExpr(BackedgeTakenCount, One);
2114 // Check for a max calculation that matches the pattern.
2115 const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(IterationCount);
2116 if (!SMax || SMax != SE->getSCEV(Sel)) return Cond;
2118 SCEVHandle SMaxLHS = SMax->getOperand(0);
2119 SCEVHandle SMaxRHS = SMax->getOperand(1);
2120 if (!SMaxLHS || SMaxLHS != One) return Cond;
2122 // Check the relevant induction variable for conformance to
2123 // the pattern.
2124 SCEVHandle IV = SE->getSCEV(Cond->getOperand(0));
2125 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2126 if (!AR || !AR->isAffine() ||
2127 AR->getStart() != One ||
2128 AR->getStepRecurrence(*SE) != One)
2129 return Cond;
2131 assert(AR->getLoop() == L &&
2132 "Loop condition operand is an addrec in a different loop!");
2134 // Check the right operand of the select, and remember it, as it will
2135 // be used in the new comparison instruction.
2136 Value *NewRHS = 0;
2137 if (SE->getSCEV(Sel->getOperand(1)) == SMaxRHS)
2138 NewRHS = Sel->getOperand(1);
2139 else if (SE->getSCEV(Sel->getOperand(2)) == SMaxRHS)
2140 NewRHS = Sel->getOperand(2);
2141 if (!NewRHS) return Cond;
2143 // Ok, everything looks ok to change the condition into an SLT or SGE and
2144 // delete the max calculation.
2145 ICmpInst *NewCond =
2146 new ICmpInst(Cond->getPredicate() == CmpInst::ICMP_NE ?
2147 CmpInst::ICMP_SLT :
2148 CmpInst::ICMP_SGE,
2149 Cond->getOperand(0), NewRHS, "scmp", Cond);
2151 // Delete the max calculation instructions.
2152 Cond->replaceAllUsesWith(NewCond);
2153 CondUse->setUser(NewCond);
2154 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
2155 Cond->eraseFromParent();
2156 Sel->eraseFromParent();
2157 if (Cmp->use_empty())
2158 Cmp->eraseFromParent();
2159 return NewCond;
2162 /// OptimizeShadowIV - If IV is used in a int-to-float cast
2163 /// inside the loop then try to eliminate the cast opeation.
2164 void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
2166 SCEVHandle BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2167 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2168 return;
2170 for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e;
2171 ++Stride) {
2172 std::map<SCEVHandle, IVUsersOfOneStride *>::iterator SI =
2173 IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
2174 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
2175 if (!isa<SCEVConstant>(SI->first))
2176 continue;
2178 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
2179 E = SI->second->Users.end(); UI != E; /* empty */) {
2180 ilist<IVStrideUse>::iterator CandidateUI = UI;
2181 ++UI;
2182 Instruction *ShadowUse = CandidateUI->getUser();
2183 const Type *DestTy = NULL;
2185 /* If shadow use is a int->float cast then insert a second IV
2186 to eliminate this cast.
2188 for (unsigned i = 0; i < n; ++i)
2189 foo((double)i);
2191 is transformed into
2193 double d = 0.0;
2194 for (unsigned i = 0; i < n; ++i, ++d)
2195 foo(d);
2197 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser()))
2198 DestTy = UCast->getDestTy();
2199 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser()))
2200 DestTy = SCast->getDestTy();
2201 if (!DestTy) continue;
2203 if (TLI) {
2204 // If target does not support DestTy natively then do not apply
2205 // this transformation.
2206 MVT DVT = TLI->getValueType(DestTy);
2207 if (!TLI->isTypeLegal(DVT)) continue;
2210 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
2211 if (!PH) continue;
2212 if (PH->getNumIncomingValues() != 2) continue;
2214 const Type *SrcTy = PH->getType();
2215 int Mantissa = DestTy->getFPMantissaWidth();
2216 if (Mantissa == -1) continue;
2217 if ((int)SE->getTypeSizeInBits(SrcTy) > Mantissa)
2218 continue;
2220 unsigned Entry, Latch;
2221 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
2222 Entry = 0;
2223 Latch = 1;
2224 } else {
2225 Entry = 1;
2226 Latch = 0;
2229 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
2230 if (!Init) continue;
2231 ConstantFP *NewInit = ConstantFP::get(DestTy, Init->getZExtValue());
2233 BinaryOperator *Incr =
2234 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
2235 if (!Incr) continue;
2236 if (Incr->getOpcode() != Instruction::Add
2237 && Incr->getOpcode() != Instruction::Sub)
2238 continue;
2240 /* Initialize new IV, double d = 0.0 in above example. */
2241 ConstantInt *C = NULL;
2242 if (Incr->getOperand(0) == PH)
2243 C = dyn_cast<ConstantInt>(Incr->getOperand(1));
2244 else if (Incr->getOperand(1) == PH)
2245 C = dyn_cast<ConstantInt>(Incr->getOperand(0));
2246 else
2247 continue;
2249 if (!C) continue;
2251 /* Add new PHINode. */
2252 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH);
2254 /* create new increment. '++d' in above example. */
2255 ConstantFP *CFP = ConstantFP::get(DestTy, C->getZExtValue());
2256 BinaryOperator *NewIncr =
2257 BinaryOperator::Create(Incr->getOpcode(),
2258 NewPH, CFP, "IV.S.next.", Incr);
2260 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
2261 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
2263 /* Remove cast operation */
2264 ShadowUse->replaceAllUsesWith(NewPH);
2265 ShadowUse->eraseFromParent();
2266 NumShadow++;
2267 break;
2272 // OptimizeIndvars - Now that IVUsesByStride is set up with all of the indvar
2273 // uses in the loop, look to see if we can eliminate some, in favor of using
2274 // common indvars for the different uses.
2275 void LoopStrengthReduce::OptimizeIndvars(Loop *L) {
2276 // TODO: implement optzns here.
2278 OptimizeShadowIV(L);
2281 /// OptimizeLoopTermCond - Change loop terminating condition to use the
2282 /// postinc iv when possible.
2283 void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) {
2284 // Finally, get the terminating condition for the loop if possible. If we
2285 // can, we want to change it to use a post-incremented version of its
2286 // induction variable, to allow coalescing the live ranges for the IV into
2287 // one register value.
2288 BasicBlock *LatchBlock = L->getLoopLatch();
2289 BasicBlock *ExitBlock = L->getExitingBlock();
2290 if (!ExitBlock)
2291 // Multiple exits, just look at the exit in the latch block if there is one.
2292 ExitBlock = LatchBlock;
2293 BranchInst *TermBr = dyn_cast<BranchInst>(ExitBlock->getTerminator());
2294 if (!TermBr)
2295 return;
2296 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
2297 return;
2299 // Search IVUsesByStride to find Cond's IVUse if there is one.
2300 IVStrideUse *CondUse = 0;
2301 const SCEVHandle *CondStride = 0;
2302 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2303 if (!FindIVUserForCond(Cond, CondUse, CondStride))
2304 return; // setcc doesn't use the IV.
2306 if (ExitBlock != LatchBlock) {
2307 if (!Cond->hasOneUse())
2308 // See below, we don't want the condition to be cloned.
2309 return;
2311 // If exiting block is the latch block, we know it's safe and profitable to
2312 // transform the icmp to use post-inc iv. Otherwise do so only if it would
2313 // not reuse another iv and its iv would be reused by other uses. We are
2314 // optimizing for the case where the icmp is the only use of the iv.
2315 IVUsersOfOneStride &StrideUses = *IU->IVUsesByStride[*CondStride];
2316 for (ilist<IVStrideUse>::iterator I = StrideUses.Users.begin(),
2317 E = StrideUses.Users.end(); I != E; ++I) {
2318 if (I->getUser() == Cond)
2319 continue;
2320 if (!I->isUseOfPostIncrementedValue())
2321 return;
2324 // FIXME: This is expensive, and worse still ChangeCompareStride does a
2325 // similar check. Can we perform all the icmp related transformations after
2326 // StrengthReduceStridedIVUsers?
2327 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride)) {
2328 int64_t SInt = SC->getValue()->getSExtValue();
2329 for (unsigned NewStride = 0, ee = IU->StrideOrder.size(); NewStride != ee;
2330 ++NewStride) {
2331 std::map<SCEVHandle, IVUsersOfOneStride *>::iterator SI =
2332 IU->IVUsesByStride.find(IU->StrideOrder[NewStride]);
2333 if (!isa<SCEVConstant>(SI->first) || SI->first == *CondStride)
2334 continue;
2335 int64_t SSInt =
2336 cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
2337 if (SSInt == SInt)
2338 return; // This can definitely be reused.
2339 if (unsigned(abs64(SSInt)) < SInt || (SSInt % SInt) != 0)
2340 continue;
2341 int64_t Scale = SSInt / SInt;
2342 bool AllUsesAreAddresses = true;
2343 bool AllUsesAreOutsideLoop = true;
2344 std::vector<BasedUser> UsersToProcess;
2345 SCEVHandle CommonExprs = CollectIVUsers(SI->first, *SI->second, L,
2346 AllUsesAreAddresses,
2347 AllUsesAreOutsideLoop,
2348 UsersToProcess);
2349 // Avoid rewriting the compare instruction with an iv of new stride
2350 // if it's likely the new stride uses will be rewritten using the
2351 // stride of the compare instruction.
2352 if (AllUsesAreAddresses &&
2353 ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess))
2354 return;
2358 StrideNoReuse.insert(*CondStride);
2361 // If the trip count is computed in terms of an smax (due to ScalarEvolution
2362 // being unable to find a sufficient guard, for example), change the loop
2363 // comparison to use SLT instead of NE.
2364 Cond = OptimizeSMax(L, Cond, CondUse);
2366 // If possible, change stride and operands of the compare instruction to
2367 // eliminate one stride.
2368 if (ExitBlock == LatchBlock)
2369 Cond = ChangeCompareStride(L, Cond, CondUse, CondStride);
2371 // It's possible for the setcc instruction to be anywhere in the loop, and
2372 // possible for it to have multiple users. If it is not immediately before
2373 // the latch block branch, move it.
2374 if (&*++BasicBlock::iterator(Cond) != (Instruction*)TermBr) {
2375 if (Cond->hasOneUse()) { // Condition has a single use, just move it.
2376 Cond->moveBefore(TermBr);
2377 } else {
2378 // Otherwise, clone the terminating condition and insert into the loopend.
2379 Cond = cast<ICmpInst>(Cond->clone());
2380 Cond->setName(L->getHeader()->getName() + ".termcond");
2381 LatchBlock->getInstList().insert(TermBr, Cond);
2383 // Clone the IVUse, as the old use still exists!
2384 IU->IVUsesByStride[*CondStride]->addUser(CondUse->getOffset(), Cond,
2385 CondUse->getOperandValToReplace(),
2386 false);
2387 CondUse = &IU->IVUsesByStride[*CondStride]->Users.back();
2391 // If we get to here, we know that we can transform the setcc instruction to
2392 // use the post-incremented version of the IV, allowing us to coalesce the
2393 // live ranges for the IV correctly.
2394 CondUse->setOffset(SE->getMinusSCEV(CondUse->getOffset(), *CondStride));
2395 CondUse->setIsUseOfPostIncrementedValue(true);
2396 Changed = true;
2398 ++NumLoopCond;
2401 // OptimizeLoopCountIV - If, after all sharing of IVs, the IV used for deciding
2402 // when to exit the loop is used only for that purpose, try to rearrange things
2403 // so it counts down to a test against zero.
2404 void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) {
2406 // If the number of times the loop is executed isn't computable, give up.
2407 SCEVHandle BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2408 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2409 return;
2411 // Get the terminating condition for the loop if possible (this isn't
2412 // necessarily in the latch, or a block that's a predecessor of the header).
2413 SmallVector<BasicBlock*, 8> ExitBlocks;
2414 L->getExitBlocks(ExitBlocks);
2415 if (ExitBlocks.size() != 1) return;
2417 // Okay, there is one exit block. Try to find the condition that causes the
2418 // loop to be exited.
2419 BasicBlock *ExitBlock = ExitBlocks[0];
2421 BasicBlock *ExitingBlock = 0;
2422 for (pred_iterator PI = pred_begin(ExitBlock), E = pred_end(ExitBlock);
2423 PI != E; ++PI)
2424 if (L->contains(*PI)) {
2425 if (ExitingBlock == 0)
2426 ExitingBlock = *PI;
2427 else
2428 return; // More than one block exiting!
2430 assert(ExitingBlock && "No exits from loop, something is broken!");
2432 // Okay, we've computed the exiting block. See what condition causes us to
2433 // exit.
2435 // FIXME: we should be able to handle switch instructions (with a single exit)
2436 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2437 if (TermBr == 0) return;
2438 assert(TermBr->isConditional() && "If unconditional, it can't be in loop!");
2439 if (!isa<ICmpInst>(TermBr->getCondition()))
2440 return;
2441 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2443 // Handle only tests for equality for the moment, and only stride 1.
2444 if (Cond->getPredicate() != CmpInst::ICMP_EQ)
2445 return;
2446 SCEVHandle IV = SE->getSCEV(Cond->getOperand(0));
2447 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2448 SCEVHandle One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType());
2449 if (!AR || !AR->isAffine() || AR->getStepRecurrence(*SE) != One)
2450 return;
2452 // Make sure the IV is only used for counting. Value may be preinc or
2453 // postinc; 2 uses in either case.
2454 if (!Cond->getOperand(0)->hasNUses(2))
2455 return;
2456 PHINode *phi = dyn_cast<PHINode>(Cond->getOperand(0));
2457 Instruction *incr;
2458 if (phi && phi->getParent()==L->getHeader()) {
2459 // value tested is preinc. Find the increment.
2460 // A CmpInst is not a BinaryOperator; we depend on this.
2461 Instruction::use_iterator UI = phi->use_begin();
2462 incr = dyn_cast<BinaryOperator>(UI);
2463 if (!incr)
2464 incr = dyn_cast<BinaryOperator>(++UI);
2465 // 1 use for postinc value, the phi. Unnecessarily conservative?
2466 if (!incr || !incr->hasOneUse() || incr->getOpcode()!=Instruction::Add)
2467 return;
2468 } else {
2469 // Value tested is postinc. Find the phi node.
2470 incr = dyn_cast<BinaryOperator>(Cond->getOperand(0));
2471 if (!incr || incr->getOpcode()!=Instruction::Add)
2472 return;
2474 Instruction::use_iterator UI = Cond->getOperand(0)->use_begin();
2475 phi = dyn_cast<PHINode>(UI);
2476 if (!phi)
2477 phi = dyn_cast<PHINode>(++UI);
2478 // 1 use for preinc value, the increment.
2479 if (!phi || phi->getParent()!=L->getHeader() || !phi->hasOneUse())
2480 return;
2483 // Replace the increment with a decrement.
2484 BinaryOperator *decr =
2485 BinaryOperator::Create(Instruction::Sub, incr->getOperand(0),
2486 incr->getOperand(1), "tmp", incr);
2487 incr->replaceAllUsesWith(decr);
2488 incr->eraseFromParent();
2490 // Substitute endval-startval for the original startval, and 0 for the
2491 // original endval. Since we're only testing for equality this is OK even
2492 // if the computation wraps around.
2493 BasicBlock *Preheader = L->getLoopPreheader();
2494 Instruction *PreInsertPt = Preheader->getTerminator();
2495 int inBlock = L->contains(phi->getIncomingBlock(0)) ? 1 : 0;
2496 Value *startVal = phi->getIncomingValue(inBlock);
2497 Value *endVal = Cond->getOperand(1);
2498 // FIXME check for case where both are constant
2499 ConstantInt* Zero = ConstantInt::get(Cond->getOperand(1)->getType(), 0);
2500 BinaryOperator *NewStartVal =
2501 BinaryOperator::Create(Instruction::Sub, endVal, startVal,
2502 "tmp", PreInsertPt);
2503 phi->setIncomingValue(inBlock, NewStartVal);
2504 Cond->setOperand(1, Zero);
2506 Changed = true;
2509 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) {
2511 IU = &getAnalysis<IVUsers>();
2512 LI = &getAnalysis<LoopInfo>();
2513 DT = &getAnalysis<DominatorTree>();
2514 SE = &getAnalysis<ScalarEvolution>();
2515 Changed = false;
2517 if (!IU->IVUsesByStride.empty()) {
2518 #ifndef NDEBUG
2519 DOUT << "\nLSR on \"" << L->getHeader()->getParent()->getNameStart()
2520 << "\" ";
2521 DEBUG(L->dump());
2522 #endif
2524 // Sort the StrideOrder so we process larger strides first.
2525 std::stable_sort(IU->StrideOrder.begin(), IU->StrideOrder.end(),
2526 StrideCompare(SE));
2528 // Optimize induction variables. Some indvar uses can be transformed to use
2529 // strides that will be needed for other purposes. A common example of this
2530 // is the exit test for the loop, which can often be rewritten to use the
2531 // computation of some other indvar to decide when to terminate the loop.
2532 OptimizeIndvars(L);
2534 // Change loop terminating condition to use the postinc iv when possible
2535 // and optimize loop terminating compare. FIXME: Move this after
2536 // StrengthReduceStridedIVUsers?
2537 OptimizeLoopTermCond(L);
2539 // FIXME: We can shrink overlarge IV's here. e.g. if the code has
2540 // computation in i64 values and the target doesn't support i64, demote
2541 // the computation to 32-bit if safe.
2543 // FIXME: Attempt to reuse values across multiple IV's. In particular, we
2544 // could have something like "for(i) { foo(i*8); bar(i*16) }", which should
2545 // be codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC.
2546 // Need to be careful that IV's are all the same type. Only works for
2547 // intptr_t indvars.
2549 // IVsByStride keeps IVs for one particular loop.
2550 assert(IVsByStride.empty() && "Stale entries in IVsByStride?");
2552 // Note: this processes each stride/type pair individually. All users
2553 // passed into StrengthReduceStridedIVUsers have the same type AND stride.
2554 // Also, note that we iterate over IVUsesByStride indirectly by using
2555 // StrideOrder. This extra layer of indirection makes the ordering of
2556 // strides deterministic - not dependent on map order.
2557 for (unsigned Stride = 0, e = IU->StrideOrder.size();
2558 Stride != e; ++Stride) {
2559 std::map<SCEVHandle, IVUsersOfOneStride *>::iterator SI =
2560 IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
2561 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
2562 // FIXME: Generalize to non-affine IV's.
2563 if (!SI->first->isLoopInvariant(L))
2564 continue;
2565 StrengthReduceStridedIVUsers(SI->first, *SI->second, L);
2569 // After all sharing is done, see if we can adjust the loop to test against
2570 // zero instead of counting up to a maximum. This is usually faster.
2571 OptimizeLoopCountIV(L);
2573 // We're done analyzing this loop; release all the state we built up for it.
2574 IVsByStride.clear();
2575 StrideNoReuse.clear();
2577 // Clean up after ourselves
2578 if (!DeadInsts.empty())
2579 DeleteTriviallyDeadInstructions();
2581 // At this point, it is worth checking to see if any recurrence PHIs are also
2582 // dead, so that we can remove them as well.
2583 DeleteDeadPHIs(L->getHeader());
2585 return Changed;