Move ConstantExpr to 2.5 API.
[llvm/avr.git] / lib / Transforms / Scalar / LoopStrengthReduce.cpp
blob9fd0fdf0b0d6d2a72938a941961d30816714fef4
1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This transformation analyzes and transforms the induction variables (and
11 // computations derived from them) into forms suitable for efficient execution
12 // on the target.
14 // This pass performs a strength reduction on array references inside loops that
15 // have as one or more of their components the loop induction variable, it
16 // rewrites expressions to take advantage of scaled-index addressing modes
17 // available on the target, and it performs a variety of other optimizations
18 // related to loop induction variables.
20 //===----------------------------------------------------------------------===//
22 #define DEBUG_TYPE "loop-reduce"
23 #include "llvm/Transforms/Scalar.h"
24 #include "llvm/Constants.h"
25 #include "llvm/Instructions.h"
26 #include "llvm/IntrinsicInst.h"
27 #include "llvm/LLVMContext.h"
28 #include "llvm/Type.h"
29 #include "llvm/DerivedTypes.h"
30 #include "llvm/Analysis/Dominators.h"
31 #include "llvm/Analysis/IVUsers.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/LoopPass.h"
34 #include "llvm/Analysis/ScalarEvolutionExpander.h"
35 #include "llvm/Transforms/Utils/AddrModeMatcher.h"
36 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
37 #include "llvm/Transforms/Utils/Local.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/Support/CFG.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/Compiler.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/ValueHandle.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Target/TargetLowering.h"
46 #include <algorithm>
47 using namespace llvm;
49 STATISTIC(NumReduced , "Number of IV uses strength reduced");
50 STATISTIC(NumInserted, "Number of PHIs inserted");
51 STATISTIC(NumVariable, "Number of PHIs with variable strides");
52 STATISTIC(NumEliminated, "Number of strides eliminated");
53 STATISTIC(NumShadow, "Number of Shadow IVs optimized");
54 STATISTIC(NumImmSunk, "Number of common expr immediates sunk into uses");
55 STATISTIC(NumLoopCond, "Number of loop terminating conds optimized");
57 static cl::opt<bool> EnableFullLSRMode("enable-full-lsr",
58 cl::init(false),
59 cl::Hidden);
61 namespace {
63 struct BasedUser;
65 /// IVInfo - This structure keeps track of one IV expression inserted during
66 /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as
67 /// well as the PHI node and increment value created for rewrite.
68 struct VISIBILITY_HIDDEN IVExpr {
69 const SCEV *Stride;
70 const SCEV *Base;
71 PHINode *PHI;
73 IVExpr(const SCEV *const stride, const SCEV *const base, PHINode *phi)
74 : Stride(stride), Base(base), PHI(phi) {}
77 /// IVsOfOneStride - This structure keeps track of all IV expression inserted
78 /// during StrengthReduceStridedIVUsers for a particular stride of the IV.
79 struct VISIBILITY_HIDDEN IVsOfOneStride {
80 std::vector<IVExpr> IVs;
82 void addIV(const SCEV *const Stride, const SCEV *const Base, PHINode *PHI) {
83 IVs.push_back(IVExpr(Stride, Base, PHI));
87 class VISIBILITY_HIDDEN LoopStrengthReduce : public LoopPass {
88 IVUsers *IU;
89 LoopInfo *LI;
90 DominatorTree *DT;
91 ScalarEvolution *SE;
92 bool Changed;
94 /// IVsByStride - Keep track of all IVs that have been inserted for a
95 /// particular stride.
96 std::map<const SCEV *, IVsOfOneStride> IVsByStride;
98 /// StrideNoReuse - Keep track of all the strides whose ivs cannot be
99 /// reused (nor should they be rewritten to reuse other strides).
100 SmallSet<const SCEV *, 4> StrideNoReuse;
102 /// DeadInsts - Keep track of instructions we may have made dead, so that
103 /// we can remove them after we are done working.
104 SmallVector<WeakVH, 16> DeadInsts;
106 /// TLI - Keep a pointer of a TargetLowering to consult for determining
107 /// transformation profitability.
108 const TargetLowering *TLI;
110 public:
111 static char ID; // Pass ID, replacement for typeid
112 explicit LoopStrengthReduce(const TargetLowering *tli = NULL) :
113 LoopPass(&ID), TLI(tli) {
116 bool runOnLoop(Loop *L, LPPassManager &LPM);
118 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
119 // We split critical edges, so we change the CFG. However, we do update
120 // many analyses if they are around.
121 AU.addPreservedID(LoopSimplifyID);
122 AU.addPreserved<LoopInfo>();
123 AU.addPreserved<DominanceFrontier>();
124 AU.addPreserved<DominatorTree>();
126 AU.addRequiredID(LoopSimplifyID);
127 AU.addRequired<LoopInfo>();
128 AU.addRequired<DominatorTree>();
129 AU.addRequired<ScalarEvolution>();
130 AU.addPreserved<ScalarEvolution>();
131 AU.addRequired<IVUsers>();
132 AU.addPreserved<IVUsers>();
135 private:
136 ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond,
137 IVStrideUse* &CondUse,
138 const SCEV *const * &CondStride);
140 void OptimizeIndvars(Loop *L);
141 void OptimizeLoopCountIV(Loop *L);
142 void OptimizeLoopTermCond(Loop *L);
144 /// OptimizeShadowIV - If IV is used in a int-to-float cast
145 /// inside the loop then try to eliminate the cast opeation.
146 void OptimizeShadowIV(Loop *L);
148 /// OptimizeMax - Rewrite the loop's terminating condition
149 /// if it uses a max computation.
150 ICmpInst *OptimizeMax(Loop *L, ICmpInst *Cond,
151 IVStrideUse* &CondUse);
153 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
154 const SCEV *const * &CondStride);
155 bool RequiresTypeConversion(const Type *Ty, const Type *NewTy);
156 const SCEV *CheckForIVReuse(bool, bool, bool, const SCEV *const&,
157 IVExpr&, const Type*,
158 const std::vector<BasedUser>& UsersToProcess);
159 bool ValidScale(bool, int64_t,
160 const std::vector<BasedUser>& UsersToProcess);
161 bool ValidOffset(bool, int64_t, int64_t,
162 const std::vector<BasedUser>& UsersToProcess);
163 const SCEV *CollectIVUsers(const SCEV *const &Stride,
164 IVUsersOfOneStride &Uses,
165 Loop *L,
166 bool &AllUsesAreAddresses,
167 bool &AllUsesAreOutsideLoop,
168 std::vector<BasedUser> &UsersToProcess);
169 bool ShouldUseFullStrengthReductionMode(
170 const std::vector<BasedUser> &UsersToProcess,
171 const Loop *L,
172 bool AllUsesAreAddresses,
173 const SCEV *Stride);
174 void PrepareToStrengthReduceFully(
175 std::vector<BasedUser> &UsersToProcess,
176 const SCEV *Stride,
177 const SCEV *CommonExprs,
178 const Loop *L,
179 SCEVExpander &PreheaderRewriter);
180 void PrepareToStrengthReduceFromSmallerStride(
181 std::vector<BasedUser> &UsersToProcess,
182 Value *CommonBaseV,
183 const IVExpr &ReuseIV,
184 Instruction *PreInsertPt);
185 void PrepareToStrengthReduceWithNewPhi(
186 std::vector<BasedUser> &UsersToProcess,
187 const SCEV *Stride,
188 const SCEV *CommonExprs,
189 Value *CommonBaseV,
190 Instruction *IVIncInsertPt,
191 const Loop *L,
192 SCEVExpander &PreheaderRewriter);
193 void StrengthReduceStridedIVUsers(const SCEV *const &Stride,
194 IVUsersOfOneStride &Uses,
195 Loop *L);
196 void DeleteTriviallyDeadInstructions();
200 char LoopStrengthReduce::ID = 0;
201 static RegisterPass<LoopStrengthReduce>
202 X("loop-reduce", "Loop Strength Reduction");
204 Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
205 return new LoopStrengthReduce(TLI);
208 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
209 /// specified set are trivially dead, delete them and see if this makes any of
210 /// their operands subsequently dead.
211 void LoopStrengthReduce::DeleteTriviallyDeadInstructions() {
212 if (DeadInsts.empty()) return;
214 while (!DeadInsts.empty()) {
215 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.back());
216 DeadInsts.pop_back();
218 if (I == 0 || !isInstructionTriviallyDead(I))
219 continue;
221 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) {
222 if (Instruction *U = dyn_cast<Instruction>(*OI)) {
223 *OI = 0;
224 if (U->use_empty())
225 DeadInsts.push_back(U);
229 I->eraseFromParent();
230 Changed = true;
234 /// containsAddRecFromDifferentLoop - Determine whether expression S involves a
235 /// subexpression that is an AddRec from a loop other than L. An outer loop
236 /// of L is OK, but not an inner loop nor a disjoint loop.
237 static bool containsAddRecFromDifferentLoop(const SCEV *S, Loop *L) {
238 // This is very common, put it first.
239 if (isa<SCEVConstant>(S))
240 return false;
241 if (const SCEVCommutativeExpr *AE = dyn_cast<SCEVCommutativeExpr>(S)) {
242 for (unsigned int i=0; i< AE->getNumOperands(); i++)
243 if (containsAddRecFromDifferentLoop(AE->getOperand(i), L))
244 return true;
245 return false;
247 if (const SCEVAddRecExpr *AE = dyn_cast<SCEVAddRecExpr>(S)) {
248 if (const Loop *newLoop = AE->getLoop()) {
249 if (newLoop == L)
250 return false;
251 // if newLoop is an outer loop of L, this is OK.
252 if (!LoopInfo::isNotAlreadyContainedIn(L, newLoop))
253 return false;
255 return true;
257 if (const SCEVUDivExpr *DE = dyn_cast<SCEVUDivExpr>(S))
258 return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
259 containsAddRecFromDifferentLoop(DE->getRHS(), L);
260 #if 0
261 // SCEVSDivExpr has been backed out temporarily, but will be back; we'll
262 // need this when it is.
263 if (const SCEVSDivExpr *DE = dyn_cast<SCEVSDivExpr>(S))
264 return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
265 containsAddRecFromDifferentLoop(DE->getRHS(), L);
266 #endif
267 if (const SCEVCastExpr *CE = dyn_cast<SCEVCastExpr>(S))
268 return containsAddRecFromDifferentLoop(CE->getOperand(), L);
269 return false;
272 /// isAddressUse - Returns true if the specified instruction is using the
273 /// specified value as an address.
274 static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
275 bool isAddress = isa<LoadInst>(Inst);
276 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
277 if (SI->getOperand(1) == OperandVal)
278 isAddress = true;
279 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
280 // Addressing modes can also be folded into prefetches and a variety
281 // of intrinsics.
282 switch (II->getIntrinsicID()) {
283 default: break;
284 case Intrinsic::prefetch:
285 case Intrinsic::x86_sse2_loadu_dq:
286 case Intrinsic::x86_sse2_loadu_pd:
287 case Intrinsic::x86_sse_loadu_ps:
288 case Intrinsic::x86_sse_storeu_ps:
289 case Intrinsic::x86_sse2_storeu_pd:
290 case Intrinsic::x86_sse2_storeu_dq:
291 case Intrinsic::x86_sse2_storel_dq:
292 if (II->getOperand(1) == OperandVal)
293 isAddress = true;
294 break;
297 return isAddress;
300 /// getAccessType - Return the type of the memory being accessed.
301 static const Type *getAccessType(const Instruction *Inst) {
302 const Type *AccessTy = Inst->getType();
303 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst))
304 AccessTy = SI->getOperand(0)->getType();
305 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
306 // Addressing modes can also be folded into prefetches and a variety
307 // of intrinsics.
308 switch (II->getIntrinsicID()) {
309 default: break;
310 case Intrinsic::x86_sse_storeu_ps:
311 case Intrinsic::x86_sse2_storeu_pd:
312 case Intrinsic::x86_sse2_storeu_dq:
313 case Intrinsic::x86_sse2_storel_dq:
314 AccessTy = II->getOperand(1)->getType();
315 break;
318 return AccessTy;
321 namespace {
322 /// BasedUser - For a particular base value, keep information about how we've
323 /// partitioned the expression so far.
324 struct BasedUser {
325 /// SE - The current ScalarEvolution object.
326 ScalarEvolution *SE;
328 /// Base - The Base value for the PHI node that needs to be inserted for
329 /// this use. As the use is processed, information gets moved from this
330 /// field to the Imm field (below). BasedUser values are sorted by this
331 /// field.
332 const SCEV *Base;
334 /// Inst - The instruction using the induction variable.
335 Instruction *Inst;
337 /// OperandValToReplace - The operand value of Inst to replace with the
338 /// EmittedBase.
339 Value *OperandValToReplace;
341 /// Imm - The immediate value that should be added to the base immediately
342 /// before Inst, because it will be folded into the imm field of the
343 /// instruction. This is also sometimes used for loop-variant values that
344 /// must be added inside the loop.
345 const SCEV *Imm;
347 /// Phi - The induction variable that performs the striding that
348 /// should be used for this user.
349 PHINode *Phi;
351 // isUseOfPostIncrementedValue - True if this should use the
352 // post-incremented version of this IV, not the preincremented version.
353 // This can only be set in special cases, such as the terminating setcc
354 // instruction for a loop and uses outside the loop that are dominated by
355 // the loop.
356 bool isUseOfPostIncrementedValue;
358 BasedUser(IVStrideUse &IVSU, ScalarEvolution *se)
359 : SE(se), Base(IVSU.getOffset()), Inst(IVSU.getUser()),
360 OperandValToReplace(IVSU.getOperandValToReplace()),
361 Imm(SE->getIntegerSCEV(0, Base->getType())),
362 isUseOfPostIncrementedValue(IVSU.isUseOfPostIncrementedValue()) {}
364 // Once we rewrite the code to insert the new IVs we want, update the
365 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
366 // to it.
367 void RewriteInstructionToUseNewBase(const SCEV *const &NewBase,
368 Instruction *InsertPt,
369 SCEVExpander &Rewriter, Loop *L, Pass *P,
370 LoopInfo &LI,
371 SmallVectorImpl<WeakVH> &DeadInsts);
373 Value *InsertCodeForBaseAtPosition(const SCEV *const &NewBase,
374 const Type *Ty,
375 SCEVExpander &Rewriter,
376 Instruction *IP, Loop *L,
377 LoopInfo &LI);
378 void dump() const;
382 void BasedUser::dump() const {
383 cerr << " Base=" << *Base;
384 cerr << " Imm=" << *Imm;
385 cerr << " Inst: " << *Inst;
388 Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV *const &NewBase,
389 const Type *Ty,
390 SCEVExpander &Rewriter,
391 Instruction *IP, Loop *L,
392 LoopInfo &LI) {
393 // Figure out where we *really* want to insert this code. In particular, if
394 // the user is inside of a loop that is nested inside of L, we really don't
395 // want to insert this expression before the user, we'd rather pull it out as
396 // many loops as possible.
397 Instruction *BaseInsertPt = IP;
399 // Figure out the most-nested loop that IP is in.
400 Loop *InsertLoop = LI.getLoopFor(IP->getParent());
402 // If InsertLoop is not L, and InsertLoop is nested inside of L, figure out
403 // the preheader of the outer-most loop where NewBase is not loop invariant.
404 if (L->contains(IP->getParent()))
405 while (InsertLoop && NewBase->isLoopInvariant(InsertLoop)) {
406 BaseInsertPt = InsertLoop->getLoopPreheader()->getTerminator();
407 InsertLoop = InsertLoop->getParentLoop();
410 Value *Base = Rewriter.expandCodeFor(NewBase, 0, BaseInsertPt);
412 const SCEV *NewValSCEV = SE->getUnknown(Base);
414 // Always emit the immediate into the same block as the user.
415 NewValSCEV = SE->getAddExpr(NewValSCEV, Imm);
417 return Rewriter.expandCodeFor(NewValSCEV, Ty, IP);
421 // Once we rewrite the code to insert the new IVs we want, update the
422 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
423 // to it. NewBasePt is the last instruction which contributes to the
424 // value of NewBase in the case that it's a diffferent instruction from
425 // the PHI that NewBase is computed from, or null otherwise.
427 void BasedUser::RewriteInstructionToUseNewBase(const SCEV *const &NewBase,
428 Instruction *NewBasePt,
429 SCEVExpander &Rewriter, Loop *L, Pass *P,
430 LoopInfo &LI,
431 SmallVectorImpl<WeakVH> &DeadInsts) {
432 if (!isa<PHINode>(Inst)) {
433 // By default, insert code at the user instruction.
434 BasicBlock::iterator InsertPt = Inst;
436 // However, if the Operand is itself an instruction, the (potentially
437 // complex) inserted code may be shared by many users. Because of this, we
438 // want to emit code for the computation of the operand right before its old
439 // computation. This is usually safe, because we obviously used to use the
440 // computation when it was computed in its current block. However, in some
441 // cases (e.g. use of a post-incremented induction variable) the NewBase
442 // value will be pinned to live somewhere after the original computation.
443 // In this case, we have to back off.
445 // If this is a use outside the loop (which means after, since it is based
446 // on a loop indvar) we use the post-incremented value, so that we don't
447 // artificially make the preinc value live out the bottom of the loop.
448 if (!isUseOfPostIncrementedValue && L->contains(Inst->getParent())) {
449 if (NewBasePt && isa<PHINode>(OperandValToReplace)) {
450 InsertPt = NewBasePt;
451 ++InsertPt;
452 } else if (Instruction *OpInst
453 = dyn_cast<Instruction>(OperandValToReplace)) {
454 InsertPt = OpInst;
455 while (isa<PHINode>(InsertPt)) ++InsertPt;
458 Value *NewVal = InsertCodeForBaseAtPosition(NewBase,
459 OperandValToReplace->getType(),
460 Rewriter, InsertPt, L, LI);
461 // Replace the use of the operand Value with the new Phi we just created.
462 Inst->replaceUsesOfWith(OperandValToReplace, NewVal);
464 DOUT << " Replacing with ";
465 DEBUG(WriteAsOperand(*DOUT, NewVal, /*PrintType=*/false));
466 DOUT << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n";
467 return;
470 // PHI nodes are more complex. We have to insert one copy of the NewBase+Imm
471 // expression into each operand block that uses it. Note that PHI nodes can
472 // have multiple entries for the same predecessor. We use a map to make sure
473 // that a PHI node only has a single Value* for each predecessor (which also
474 // prevents us from inserting duplicate code in some blocks).
475 DenseMap<BasicBlock*, Value*> InsertedCode;
476 PHINode *PN = cast<PHINode>(Inst);
477 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
478 if (PN->getIncomingValue(i) == OperandValToReplace) {
479 // If the original expression is outside the loop, put the replacement
480 // code in the same place as the original expression,
481 // which need not be an immediate predecessor of this PHI. This way we
482 // need only one copy of it even if it is referenced multiple times in
483 // the PHI. We don't do this when the original expression is inside the
484 // loop because multiple copies sometimes do useful sinking of code in
485 // that case(?).
486 Instruction *OldLoc = dyn_cast<Instruction>(OperandValToReplace);
487 if (L->contains(OldLoc->getParent())) {
488 // If this is a critical edge, split the edge so that we do not insert
489 // the code on all predecessor/successor paths. We do this unless this
490 // is the canonical backedge for this loop, as this can make some
491 // inserted code be in an illegal position.
492 BasicBlock *PHIPred = PN->getIncomingBlock(i);
493 if (e != 1 && PHIPred->getTerminator()->getNumSuccessors() > 1 &&
494 (PN->getParent() != L->getHeader() || !L->contains(PHIPred))) {
496 // First step, split the critical edge.
497 SplitCriticalEdge(PHIPred, PN->getParent(), P, false);
499 // Next step: move the basic block. In particular, if the PHI node
500 // is outside of the loop, and PredTI is in the loop, we want to
501 // move the block to be immediately before the PHI block, not
502 // immediately after PredTI.
503 if (L->contains(PHIPred) && !L->contains(PN->getParent())) {
504 BasicBlock *NewBB = PN->getIncomingBlock(i);
505 NewBB->moveBefore(PN->getParent());
508 // Splitting the edge can reduce the number of PHI entries we have.
509 e = PN->getNumIncomingValues();
512 Value *&Code = InsertedCode[PN->getIncomingBlock(i)];
513 if (!Code) {
514 // Insert the code into the end of the predecessor block.
515 Instruction *InsertPt = (L->contains(OldLoc->getParent())) ?
516 PN->getIncomingBlock(i)->getTerminator() :
517 OldLoc->getParent()->getTerminator();
518 Code = InsertCodeForBaseAtPosition(NewBase, PN->getType(),
519 Rewriter, InsertPt, L, LI);
521 DOUT << " Changing PHI use to ";
522 DEBUG(WriteAsOperand(*DOUT, Code, /*PrintType=*/false));
523 DOUT << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n";
526 // Replace the use of the operand Value with the new Phi we just created.
527 PN->setIncomingValue(i, Code);
528 Rewriter.clear();
532 // PHI node might have become a constant value after SplitCriticalEdge.
533 DeadInsts.push_back(Inst);
537 /// fitsInAddressMode - Return true if V can be subsumed within an addressing
538 /// mode, and does not need to be put in a register first.
539 static bool fitsInAddressMode(const SCEV *const &V, const Type *AccessTy,
540 const TargetLowering *TLI, bool HasBaseReg) {
541 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) {
542 int64_t VC = SC->getValue()->getSExtValue();
543 if (TLI) {
544 TargetLowering::AddrMode AM;
545 AM.BaseOffs = VC;
546 AM.HasBaseReg = HasBaseReg;
547 return TLI->isLegalAddressingMode(AM, AccessTy);
548 } else {
549 // Defaults to PPC. PPC allows a sign-extended 16-bit immediate field.
550 return (VC > -(1 << 16) && VC < (1 << 16)-1);
554 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
555 if (GlobalValue *GV = dyn_cast<GlobalValue>(SU->getValue())) {
556 if (TLI) {
557 TargetLowering::AddrMode AM;
558 AM.BaseGV = GV;
559 AM.HasBaseReg = HasBaseReg;
560 return TLI->isLegalAddressingMode(AM, AccessTy);
561 } else {
562 // Default: assume global addresses are not legal.
566 return false;
569 /// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are
570 /// loop varying to the Imm operand.
571 static void MoveLoopVariantsToImmediateField(const SCEV *&Val, const SCEV *&Imm,
572 Loop *L, ScalarEvolution *SE) {
573 if (Val->isLoopInvariant(L)) return; // Nothing to do.
575 if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
576 SmallVector<const SCEV *, 4> NewOps;
577 NewOps.reserve(SAE->getNumOperands());
579 for (unsigned i = 0; i != SAE->getNumOperands(); ++i)
580 if (!SAE->getOperand(i)->isLoopInvariant(L)) {
581 // If this is a loop-variant expression, it must stay in the immediate
582 // field of the expression.
583 Imm = SE->getAddExpr(Imm, SAE->getOperand(i));
584 } else {
585 NewOps.push_back(SAE->getOperand(i));
588 if (NewOps.empty())
589 Val = SE->getIntegerSCEV(0, Val->getType());
590 else
591 Val = SE->getAddExpr(NewOps);
592 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
593 // Try to pull immediates out of the start value of nested addrec's.
594 const SCEV *Start = SARE->getStart();
595 MoveLoopVariantsToImmediateField(Start, Imm, L, SE);
597 SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
598 Ops[0] = Start;
599 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
600 } else {
601 // Otherwise, all of Val is variant, move the whole thing over.
602 Imm = SE->getAddExpr(Imm, Val);
603 Val = SE->getIntegerSCEV(0, Val->getType());
608 /// MoveImmediateValues - Look at Val, and pull out any additions of constants
609 /// that can fit into the immediate field of instructions in the target.
610 /// Accumulate these immediate values into the Imm value.
611 static void MoveImmediateValues(const TargetLowering *TLI,
612 const Type *AccessTy,
613 const SCEV *&Val, const SCEV *&Imm,
614 bool isAddress, Loop *L,
615 ScalarEvolution *SE) {
616 if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
617 SmallVector<const SCEV *, 4> NewOps;
618 NewOps.reserve(SAE->getNumOperands());
620 for (unsigned i = 0; i != SAE->getNumOperands(); ++i) {
621 const SCEV *NewOp = SAE->getOperand(i);
622 MoveImmediateValues(TLI, AccessTy, NewOp, Imm, isAddress, L, SE);
624 if (!NewOp->isLoopInvariant(L)) {
625 // If this is a loop-variant expression, it must stay in the immediate
626 // field of the expression.
627 Imm = SE->getAddExpr(Imm, NewOp);
628 } else {
629 NewOps.push_back(NewOp);
633 if (NewOps.empty())
634 Val = SE->getIntegerSCEV(0, Val->getType());
635 else
636 Val = SE->getAddExpr(NewOps);
637 return;
638 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
639 // Try to pull immediates out of the start value of nested addrec's.
640 const SCEV *Start = SARE->getStart();
641 MoveImmediateValues(TLI, AccessTy, Start, Imm, isAddress, L, SE);
643 if (Start != SARE->getStart()) {
644 SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
645 Ops[0] = Start;
646 Val = SE->getAddRecExpr(Ops, SARE->getLoop());
648 return;
649 } else if (const SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) {
650 // Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field.
651 if (isAddress &&
652 fitsInAddressMode(SME->getOperand(0), AccessTy, TLI, false) &&
653 SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) {
655 const SCEV *SubImm = SE->getIntegerSCEV(0, Val->getType());
656 const SCEV *NewOp = SME->getOperand(1);
657 MoveImmediateValues(TLI, AccessTy, NewOp, SubImm, isAddress, L, SE);
659 // If we extracted something out of the subexpressions, see if we can
660 // simplify this!
661 if (NewOp != SME->getOperand(1)) {
662 // Scale SubImm up by "8". If the result is a target constant, we are
663 // good.
664 SubImm = SE->getMulExpr(SubImm, SME->getOperand(0));
665 if (fitsInAddressMode(SubImm, AccessTy, TLI, false)) {
666 // Accumulate the immediate.
667 Imm = SE->getAddExpr(Imm, SubImm);
669 // Update what is left of 'Val'.
670 Val = SE->getMulExpr(SME->getOperand(0), NewOp);
671 return;
677 // Loop-variant expressions must stay in the immediate field of the
678 // expression.
679 if ((isAddress && fitsInAddressMode(Val, AccessTy, TLI, false)) ||
680 !Val->isLoopInvariant(L)) {
681 Imm = SE->getAddExpr(Imm, Val);
682 Val = SE->getIntegerSCEV(0, Val->getType());
683 return;
686 // Otherwise, no immediates to move.
689 static void MoveImmediateValues(const TargetLowering *TLI,
690 Instruction *User,
691 const SCEV *&Val, const SCEV *&Imm,
692 bool isAddress, Loop *L,
693 ScalarEvolution *SE) {
694 const Type *AccessTy = getAccessType(User);
695 MoveImmediateValues(TLI, AccessTy, Val, Imm, isAddress, L, SE);
698 /// SeparateSubExprs - Decompose Expr into all of the subexpressions that are
699 /// added together. This is used to reassociate common addition subexprs
700 /// together for maximal sharing when rewriting bases.
701 static void SeparateSubExprs(SmallVector<const SCEV *, 16> &SubExprs,
702 const SCEV *Expr,
703 ScalarEvolution *SE) {
704 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) {
705 for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j)
706 SeparateSubExprs(SubExprs, AE->getOperand(j), SE);
707 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) {
708 const SCEV *Zero = SE->getIntegerSCEV(0, Expr->getType());
709 if (SARE->getOperand(0) == Zero) {
710 SubExprs.push_back(Expr);
711 } else {
712 // Compute the addrec with zero as its base.
713 SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
714 Ops[0] = Zero; // Start with zero base.
715 SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop()));
718 SeparateSubExprs(SubExprs, SARE->getOperand(0), SE);
720 } else if (!Expr->isZero()) {
721 // Do not add zero.
722 SubExprs.push_back(Expr);
726 // This is logically local to the following function, but C++ says we have
727 // to make it file scope.
728 struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
730 /// RemoveCommonExpressionsFromUseBases - Look through all of the Bases of all
731 /// the Uses, removing any common subexpressions, except that if all such
732 /// subexpressions can be folded into an addressing mode for all uses inside
733 /// the loop (this case is referred to as "free" in comments herein) we do
734 /// not remove anything. This looks for things like (a+b+c) and
735 /// (a+c+d) and computes the common (a+c) subexpression. The common expression
736 /// is *removed* from the Bases and returned.
737 static const SCEV *
738 RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses,
739 ScalarEvolution *SE, Loop *L,
740 const TargetLowering *TLI) {
741 unsigned NumUses = Uses.size();
743 // Only one use? This is a very common case, so we handle it specially and
744 // cheaply.
745 const SCEV *Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType());
746 const SCEV *Result = Zero;
747 const SCEV *FreeResult = Zero;
748 if (NumUses == 1) {
749 // If the use is inside the loop, use its base, regardless of what it is:
750 // it is clearly shared across all the IV's. If the use is outside the loop
751 // (which means after it) we don't want to factor anything *into* the loop,
752 // so just use 0 as the base.
753 if (L->contains(Uses[0].Inst->getParent()))
754 std::swap(Result, Uses[0].Base);
755 return Result;
758 // To find common subexpressions, count how many of Uses use each expression.
759 // If any subexpressions are used Uses.size() times, they are common.
760 // Also track whether all uses of each expression can be moved into an
761 // an addressing mode "for free"; such expressions are left within the loop.
762 // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
763 std::map<const SCEV *, SubExprUseData> SubExpressionUseData;
765 // UniqueSubExprs - Keep track of all of the subexpressions we see in the
766 // order we see them.
767 SmallVector<const SCEV *, 16> UniqueSubExprs;
769 SmallVector<const SCEV *, 16> SubExprs;
770 unsigned NumUsesInsideLoop = 0;
771 for (unsigned i = 0; i != NumUses; ++i) {
772 // If the user is outside the loop, just ignore it for base computation.
773 // Since the user is outside the loop, it must be *after* the loop (if it
774 // were before, it could not be based on the loop IV). We don't want users
775 // after the loop to affect base computation of values *inside* the loop,
776 // because we can always add their offsets to the result IV after the loop
777 // is done, ensuring we get good code inside the loop.
778 if (!L->contains(Uses[i].Inst->getParent()))
779 continue;
780 NumUsesInsideLoop++;
782 // If the base is zero (which is common), return zero now, there are no
783 // CSEs we can find.
784 if (Uses[i].Base == Zero) return Zero;
786 // If this use is as an address we may be able to put CSEs in the addressing
787 // mode rather than hoisting them.
788 bool isAddrUse = isAddressUse(Uses[i].Inst, Uses[i].OperandValToReplace);
789 // We may need the AccessTy below, but only when isAddrUse, so compute it
790 // only in that case.
791 const Type *AccessTy = 0;
792 if (isAddrUse)
793 AccessTy = getAccessType(Uses[i].Inst);
795 // Split the expression into subexprs.
796 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
797 // Add one to SubExpressionUseData.Count for each subexpr present, and
798 // if the subexpr is not a valid immediate within an addressing mode use,
799 // set SubExpressionUseData.notAllUsesAreFree. We definitely want to
800 // hoist these out of the loop (if they are common to all uses).
801 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
802 if (++SubExpressionUseData[SubExprs[j]].Count == 1)
803 UniqueSubExprs.push_back(SubExprs[j]);
804 if (!isAddrUse || !fitsInAddressMode(SubExprs[j], AccessTy, TLI, false))
805 SubExpressionUseData[SubExprs[j]].notAllUsesAreFree = true;
807 SubExprs.clear();
810 // Now that we know how many times each is used, build Result. Iterate over
811 // UniqueSubexprs so that we have a stable ordering.
812 for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) {
813 std::map<const SCEV *, SubExprUseData>::iterator I =
814 SubExpressionUseData.find(UniqueSubExprs[i]);
815 assert(I != SubExpressionUseData.end() && "Entry not found?");
816 if (I->second.Count == NumUsesInsideLoop) { // Found CSE!
817 if (I->second.notAllUsesAreFree)
818 Result = SE->getAddExpr(Result, I->first);
819 else
820 FreeResult = SE->getAddExpr(FreeResult, I->first);
821 } else
822 // Remove non-cse's from SubExpressionUseData.
823 SubExpressionUseData.erase(I);
826 if (FreeResult != Zero) {
827 // We have some subexpressions that can be subsumed into addressing
828 // modes in every use inside the loop. However, it's possible that
829 // there are so many of them that the combined FreeResult cannot
830 // be subsumed, or that the target cannot handle both a FreeResult
831 // and a Result in the same instruction (for example because it would
832 // require too many registers). Check this.
833 for (unsigned i=0; i<NumUses; ++i) {
834 if (!L->contains(Uses[i].Inst->getParent()))
835 continue;
836 // We know this is an addressing mode use; if there are any uses that
837 // are not, FreeResult would be Zero.
838 const Type *AccessTy = getAccessType(Uses[i].Inst);
839 if (!fitsInAddressMode(FreeResult, AccessTy, TLI, Result!=Zero)) {
840 // FIXME: could split up FreeResult into pieces here, some hoisted
841 // and some not. There is no obvious advantage to this.
842 Result = SE->getAddExpr(Result, FreeResult);
843 FreeResult = Zero;
844 break;
849 // If we found no CSE's, return now.
850 if (Result == Zero) return Result;
852 // If we still have a FreeResult, remove its subexpressions from
853 // SubExpressionUseData. This means they will remain in the use Bases.
854 if (FreeResult != Zero) {
855 SeparateSubExprs(SubExprs, FreeResult, SE);
856 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
857 std::map<const SCEV *, SubExprUseData>::iterator I =
858 SubExpressionUseData.find(SubExprs[j]);
859 SubExpressionUseData.erase(I);
861 SubExprs.clear();
864 // Otherwise, remove all of the CSE's we found from each of the base values.
865 for (unsigned i = 0; i != NumUses; ++i) {
866 // Uses outside the loop don't necessarily include the common base, but
867 // the final IV value coming into those uses does. Instead of trying to
868 // remove the pieces of the common base, which might not be there,
869 // subtract off the base to compensate for this.
870 if (!L->contains(Uses[i].Inst->getParent())) {
871 Uses[i].Base = SE->getMinusSCEV(Uses[i].Base, Result);
872 continue;
875 // Split the expression into subexprs.
876 SeparateSubExprs(SubExprs, Uses[i].Base, SE);
878 // Remove any common subexpressions.
879 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j)
880 if (SubExpressionUseData.count(SubExprs[j])) {
881 SubExprs.erase(SubExprs.begin()+j);
882 --j; --e;
885 // Finally, add the non-shared expressions together.
886 if (SubExprs.empty())
887 Uses[i].Base = Zero;
888 else
889 Uses[i].Base = SE->getAddExpr(SubExprs);
890 SubExprs.clear();
893 return Result;
896 /// ValidScale - Check whether the given Scale is valid for all loads and
897 /// stores in UsersToProcess.
899 bool LoopStrengthReduce::ValidScale(bool HasBaseReg, int64_t Scale,
900 const std::vector<BasedUser>& UsersToProcess) {
901 if (!TLI)
902 return true;
904 for (unsigned i = 0, e = UsersToProcess.size(); i!=e; ++i) {
905 // If this is a load or other access, pass the type of the access in.
906 const Type *AccessTy = Type::VoidTy;
907 if (isAddressUse(UsersToProcess[i].Inst,
908 UsersToProcess[i].OperandValToReplace))
909 AccessTy = getAccessType(UsersToProcess[i].Inst);
910 else if (isa<PHINode>(UsersToProcess[i].Inst))
911 continue;
913 TargetLowering::AddrMode AM;
914 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
915 AM.BaseOffs = SC->getValue()->getSExtValue();
916 AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
917 AM.Scale = Scale;
919 // If load[imm+r*scale] is illegal, bail out.
920 if (!TLI->isLegalAddressingMode(AM, AccessTy))
921 return false;
923 return true;
926 /// ValidOffset - Check whether the given Offset is valid for all loads and
927 /// stores in UsersToProcess.
929 bool LoopStrengthReduce::ValidOffset(bool HasBaseReg,
930 int64_t Offset,
931 int64_t Scale,
932 const std::vector<BasedUser>& UsersToProcess) {
933 if (!TLI)
934 return true;
936 for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) {
937 // If this is a load or other access, pass the type of the access in.
938 const Type *AccessTy = Type::VoidTy;
939 if (isAddressUse(UsersToProcess[i].Inst,
940 UsersToProcess[i].OperandValToReplace))
941 AccessTy = getAccessType(UsersToProcess[i].Inst);
942 else if (isa<PHINode>(UsersToProcess[i].Inst))
943 continue;
945 TargetLowering::AddrMode AM;
946 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
947 AM.BaseOffs = SC->getValue()->getSExtValue();
948 AM.BaseOffs = (uint64_t)AM.BaseOffs + (uint64_t)Offset;
949 AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
950 AM.Scale = Scale;
952 // If load[imm+r*scale] is illegal, bail out.
953 if (!TLI->isLegalAddressingMode(AM, AccessTy))
954 return false;
956 return true;
959 /// RequiresTypeConversion - Returns true if converting Ty1 to Ty2 is not
960 /// a nop.
961 bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1,
962 const Type *Ty2) {
963 if (Ty1 == Ty2)
964 return false;
965 Ty1 = SE->getEffectiveSCEVType(Ty1);
966 Ty2 = SE->getEffectiveSCEVType(Ty2);
967 if (Ty1 == Ty2)
968 return false;
969 if (Ty1->canLosslesslyBitCastTo(Ty2))
970 return false;
971 if (TLI && TLI->isTruncateFree(Ty1, Ty2))
972 return false;
973 return true;
976 /// CheckForIVReuse - Returns the multiple if the stride is the multiple
977 /// of a previous stride and it is a legal value for the target addressing
978 /// mode scale component and optional base reg. This allows the users of
979 /// this stride to be rewritten as prev iv * factor. It returns 0 if no
980 /// reuse is possible. Factors can be negative on same targets, e.g. ARM.
982 /// If all uses are outside the loop, we don't require that all multiplies
983 /// be folded into the addressing mode, nor even that the factor be constant;
984 /// a multiply (executed once) outside the loop is better than another IV
985 /// within. Well, usually.
986 const SCEV *LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg,
987 bool AllUsesAreAddresses,
988 bool AllUsesAreOutsideLoop,
989 const SCEV *const &Stride,
990 IVExpr &IV, const Type *Ty,
991 const std::vector<BasedUser>& UsersToProcess) {
992 if (StrideNoReuse.count(Stride))
993 return SE->getIntegerSCEV(0, Stride->getType());
995 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
996 int64_t SInt = SC->getValue()->getSExtValue();
997 for (unsigned NewStride = 0, e = IU->StrideOrder.size();
998 NewStride != e; ++NewStride) {
999 std::map<const SCEV *, IVsOfOneStride>::iterator SI =
1000 IVsByStride.find(IU->StrideOrder[NewStride]);
1001 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first) ||
1002 StrideNoReuse.count(SI->first))
1003 continue;
1004 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1005 if (SI->first != Stride &&
1006 (unsigned(abs64(SInt)) < SSInt || (SInt % SSInt) != 0))
1007 continue;
1008 int64_t Scale = SInt / SSInt;
1009 // Check that this stride is valid for all the types used for loads and
1010 // stores; if it can be used for some and not others, we might as well use
1011 // the original stride everywhere, since we have to create the IV for it
1012 // anyway. If the scale is 1, then we don't need to worry about folding
1013 // multiplications.
1014 if (Scale == 1 ||
1015 (AllUsesAreAddresses &&
1016 ValidScale(HasBaseReg, Scale, UsersToProcess))) {
1017 // Prefer to reuse an IV with a base of zero.
1018 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1019 IE = SI->second.IVs.end(); II != IE; ++II)
1020 // Only reuse previous IV if it would not require a type conversion
1021 // and if the base difference can be folded.
1022 if (II->Base->isZero() &&
1023 !RequiresTypeConversion(II->Base->getType(), Ty)) {
1024 IV = *II;
1025 return SE->getIntegerSCEV(Scale, Stride->getType());
1027 // Otherwise, settle for an IV with a foldable base.
1028 if (AllUsesAreAddresses)
1029 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1030 IE = SI->second.IVs.end(); II != IE; ++II)
1031 // Only reuse previous IV if it would not require a type conversion
1032 // and if the base difference can be folded.
1033 if (SE->getEffectiveSCEVType(II->Base->getType()) ==
1034 SE->getEffectiveSCEVType(Ty) &&
1035 isa<SCEVConstant>(II->Base)) {
1036 int64_t Base =
1037 cast<SCEVConstant>(II->Base)->getValue()->getSExtValue();
1038 if (Base > INT32_MIN && Base <= INT32_MAX &&
1039 ValidOffset(HasBaseReg, -Base * Scale,
1040 Scale, UsersToProcess)) {
1041 IV = *II;
1042 return SE->getIntegerSCEV(Scale, Stride->getType());
1047 } else if (AllUsesAreOutsideLoop) {
1048 // Accept nonconstant strides here; it is really really right to substitute
1049 // an existing IV if we can.
1050 for (unsigned NewStride = 0, e = IU->StrideOrder.size();
1051 NewStride != e; ++NewStride) {
1052 std::map<const SCEV *, IVsOfOneStride>::iterator SI =
1053 IVsByStride.find(IU->StrideOrder[NewStride]);
1054 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first))
1055 continue;
1056 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1057 if (SI->first != Stride && SSInt != 1)
1058 continue;
1059 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1060 IE = SI->second.IVs.end(); II != IE; ++II)
1061 // Accept nonzero base here.
1062 // Only reuse previous IV if it would not require a type conversion.
1063 if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1064 IV = *II;
1065 return Stride;
1068 // Special case, old IV is -1*x and this one is x. Can treat this one as
1069 // -1*old.
1070 for (unsigned NewStride = 0, e = IU->StrideOrder.size();
1071 NewStride != e; ++NewStride) {
1072 std::map<const SCEV *, IVsOfOneStride>::iterator SI =
1073 IVsByStride.find(IU->StrideOrder[NewStride]);
1074 if (SI == IVsByStride.end())
1075 continue;
1076 if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(SI->first))
1077 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(ME->getOperand(0)))
1078 if (Stride == ME->getOperand(1) &&
1079 SC->getValue()->getSExtValue() == -1LL)
1080 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1081 IE = SI->second.IVs.end(); II != IE; ++II)
1082 // Accept nonzero base here.
1083 // Only reuse previous IV if it would not require type conversion.
1084 if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1085 IV = *II;
1086 return SE->getIntegerSCEV(-1LL, Stride->getType());
1090 return SE->getIntegerSCEV(0, Stride->getType());
1093 /// PartitionByIsUseOfPostIncrementedValue - Simple boolean predicate that
1094 /// returns true if Val's isUseOfPostIncrementedValue is true.
1095 static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) {
1096 return Val.isUseOfPostIncrementedValue;
1099 /// isNonConstantNegative - Return true if the specified scev is negated, but
1100 /// not a constant.
1101 static bool isNonConstantNegative(const SCEV *const &Expr) {
1102 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr);
1103 if (!Mul) return false;
1105 // If there is a constant factor, it will be first.
1106 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
1107 if (!SC) return false;
1109 // Return true if the value is negative, this matches things like (-42 * V).
1110 return SC->getValue()->getValue().isNegative();
1113 /// CollectIVUsers - Transform our list of users and offsets to a bit more
1114 /// complex table. In this new vector, each 'BasedUser' contains 'Base', the base
1115 /// of the strided accesses, as well as the old information from Uses. We
1116 /// progressively move information from the Base field to the Imm field, until
1117 /// we eventually have the full access expression to rewrite the use.
1118 const SCEV *LoopStrengthReduce::CollectIVUsers(const SCEV *const &Stride,
1119 IVUsersOfOneStride &Uses,
1120 Loop *L,
1121 bool &AllUsesAreAddresses,
1122 bool &AllUsesAreOutsideLoop,
1123 std::vector<BasedUser> &UsersToProcess) {
1124 // FIXME: Generalize to non-affine IV's.
1125 if (!Stride->isLoopInvariant(L))
1126 return SE->getIntegerSCEV(0, Stride->getType());
1128 UsersToProcess.reserve(Uses.Users.size());
1129 for (ilist<IVStrideUse>::iterator I = Uses.Users.begin(),
1130 E = Uses.Users.end(); I != E; ++I) {
1131 UsersToProcess.push_back(BasedUser(*I, SE));
1133 // Move any loop variant operands from the offset field to the immediate
1134 // field of the use, so that we don't try to use something before it is
1135 // computed.
1136 MoveLoopVariantsToImmediateField(UsersToProcess.back().Base,
1137 UsersToProcess.back().Imm, L, SE);
1138 assert(UsersToProcess.back().Base->isLoopInvariant(L) &&
1139 "Base value is not loop invariant!");
1142 // We now have a whole bunch of uses of like-strided induction variables, but
1143 // they might all have different bases. We want to emit one PHI node for this
1144 // stride which we fold as many common expressions (between the IVs) into as
1145 // possible. Start by identifying the common expressions in the base values
1146 // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find
1147 // "A+B"), emit it to the preheader, then remove the expression from the
1148 // UsersToProcess base values.
1149 const SCEV *CommonExprs =
1150 RemoveCommonExpressionsFromUseBases(UsersToProcess, SE, L, TLI);
1152 // Next, figure out what we can represent in the immediate fields of
1153 // instructions. If we can represent anything there, move it to the imm
1154 // fields of the BasedUsers. We do this so that it increases the commonality
1155 // of the remaining uses.
1156 unsigned NumPHI = 0;
1157 bool HasAddress = false;
1158 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1159 // If the user is not in the current loop, this means it is using the exit
1160 // value of the IV. Do not put anything in the base, make sure it's all in
1161 // the immediate field to allow as much factoring as possible.
1162 if (!L->contains(UsersToProcess[i].Inst->getParent())) {
1163 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm,
1164 UsersToProcess[i].Base);
1165 UsersToProcess[i].Base =
1166 SE->getIntegerSCEV(0, UsersToProcess[i].Base->getType());
1167 } else {
1168 // Not all uses are outside the loop.
1169 AllUsesAreOutsideLoop = false;
1171 // Addressing modes can be folded into loads and stores. Be careful that
1172 // the store is through the expression, not of the expression though.
1173 bool isPHI = false;
1174 bool isAddress = isAddressUse(UsersToProcess[i].Inst,
1175 UsersToProcess[i].OperandValToReplace);
1176 if (isa<PHINode>(UsersToProcess[i].Inst)) {
1177 isPHI = true;
1178 ++NumPHI;
1181 if (isAddress)
1182 HasAddress = true;
1184 // If this use isn't an address, then not all uses are addresses.
1185 if (!isAddress && !isPHI)
1186 AllUsesAreAddresses = false;
1188 MoveImmediateValues(TLI, UsersToProcess[i].Inst, UsersToProcess[i].Base,
1189 UsersToProcess[i].Imm, isAddress, L, SE);
1193 // If one of the use is a PHI node and all other uses are addresses, still
1194 // allow iv reuse. Essentially we are trading one constant multiplication
1195 // for one fewer iv.
1196 if (NumPHI > 1)
1197 AllUsesAreAddresses = false;
1199 // There are no in-loop address uses.
1200 if (AllUsesAreAddresses && (!HasAddress && !AllUsesAreOutsideLoop))
1201 AllUsesAreAddresses = false;
1203 return CommonExprs;
1206 /// ShouldUseFullStrengthReductionMode - Test whether full strength-reduction
1207 /// is valid and profitable for the given set of users of a stride. In
1208 /// full strength-reduction mode, all addresses at the current stride are
1209 /// strength-reduced all the way down to pointer arithmetic.
1211 bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode(
1212 const std::vector<BasedUser> &UsersToProcess,
1213 const Loop *L,
1214 bool AllUsesAreAddresses,
1215 const SCEV *Stride) {
1216 if (!EnableFullLSRMode)
1217 return false;
1219 // The heuristics below aim to avoid increasing register pressure, but
1220 // fully strength-reducing all the addresses increases the number of
1221 // add instructions, so don't do this when optimizing for size.
1222 // TODO: If the loop is large, the savings due to simpler addresses
1223 // may oughtweight the costs of the extra increment instructions.
1224 if (L->getHeader()->getParent()->hasFnAttr(Attribute::OptimizeForSize))
1225 return false;
1227 // TODO: For now, don't do full strength reduction if there could
1228 // potentially be greater-stride multiples of the current stride
1229 // which could reuse the current stride IV.
1230 if (IU->StrideOrder.back() != Stride)
1231 return false;
1233 // Iterate through the uses to find conditions that automatically rule out
1234 // full-lsr mode.
1235 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1236 const SCEV *Base = UsersToProcess[i].Base;
1237 const SCEV *Imm = UsersToProcess[i].Imm;
1238 // If any users have a loop-variant component, they can't be fully
1239 // strength-reduced.
1240 if (Imm && !Imm->isLoopInvariant(L))
1241 return false;
1242 // If there are to users with the same base and the difference between
1243 // the two Imm values can't be folded into the address, full
1244 // strength reduction would increase register pressure.
1245 do {
1246 const SCEV *CurImm = UsersToProcess[i].Imm;
1247 if ((CurImm || Imm) && CurImm != Imm) {
1248 if (!CurImm) CurImm = SE->getIntegerSCEV(0, Stride->getType());
1249 if (!Imm) Imm = SE->getIntegerSCEV(0, Stride->getType());
1250 const Instruction *Inst = UsersToProcess[i].Inst;
1251 const Type *AccessTy = getAccessType(Inst);
1252 const SCEV *Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1253 if (!Diff->isZero() &&
1254 (!AllUsesAreAddresses ||
1255 !fitsInAddressMode(Diff, AccessTy, TLI, /*HasBaseReg=*/true)))
1256 return false;
1258 } while (++i != e && Base == UsersToProcess[i].Base);
1261 // If there's exactly one user in this stride, fully strength-reducing it
1262 // won't increase register pressure. If it's starting from a non-zero base,
1263 // it'll be simpler this way.
1264 if (UsersToProcess.size() == 1 && !UsersToProcess[0].Base->isZero())
1265 return true;
1267 // Otherwise, if there are any users in this stride that don't require
1268 // a register for their base, full strength-reduction will increase
1269 // register pressure.
1270 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1271 if (UsersToProcess[i].Base->isZero())
1272 return false;
1274 // Otherwise, go for it.
1275 return true;
1278 /// InsertAffinePhi Create and insert a PHI node for an induction variable
1279 /// with the specified start and step values in the specified loop.
1281 /// If NegateStride is true, the stride should be negated by using a
1282 /// subtract instead of an add.
1284 /// Return the created phi node.
1286 static PHINode *InsertAffinePhi(const SCEV *Start, const SCEV *Step,
1287 Instruction *IVIncInsertPt,
1288 const Loop *L,
1289 SCEVExpander &Rewriter) {
1290 assert(Start->isLoopInvariant(L) && "New PHI start is not loop invariant!");
1291 assert(Step->isLoopInvariant(L) && "New PHI stride is not loop invariant!");
1293 BasicBlock *Header = L->getHeader();
1294 BasicBlock *Preheader = L->getLoopPreheader();
1295 BasicBlock *LatchBlock = L->getLoopLatch();
1296 const Type *Ty = Start->getType();
1297 Ty = Rewriter.SE.getEffectiveSCEVType(Ty);
1299 PHINode *PN = PHINode::Create(Ty, "lsr.iv", Header->begin());
1300 PN->addIncoming(Rewriter.expandCodeFor(Start, Ty, Preheader->getTerminator()),
1301 Preheader);
1303 // If the stride is negative, insert a sub instead of an add for the
1304 // increment.
1305 bool isNegative = isNonConstantNegative(Step);
1306 const SCEV *IncAmount = Step;
1307 if (isNegative)
1308 IncAmount = Rewriter.SE.getNegativeSCEV(Step);
1310 // Insert an add instruction right before the terminator corresponding
1311 // to the back-edge or just before the only use. The location is determined
1312 // by the caller and passed in as IVIncInsertPt.
1313 Value *StepV = Rewriter.expandCodeFor(IncAmount, Ty,
1314 Preheader->getTerminator());
1315 Instruction *IncV;
1316 if (isNegative) {
1317 IncV = BinaryOperator::CreateSub(PN, StepV, "lsr.iv.next",
1318 IVIncInsertPt);
1319 } else {
1320 IncV = BinaryOperator::CreateAdd(PN, StepV, "lsr.iv.next",
1321 IVIncInsertPt);
1323 if (!isa<ConstantInt>(StepV)) ++NumVariable;
1325 PN->addIncoming(IncV, LatchBlock);
1327 ++NumInserted;
1328 return PN;
1331 static void SortUsersToProcess(std::vector<BasedUser> &UsersToProcess) {
1332 // We want to emit code for users inside the loop first. To do this, we
1333 // rearrange BasedUser so that the entries at the end have
1334 // isUseOfPostIncrementedValue = false, because we pop off the end of the
1335 // vector (so we handle them first).
1336 std::partition(UsersToProcess.begin(), UsersToProcess.end(),
1337 PartitionByIsUseOfPostIncrementedValue);
1339 // Sort this by base, so that things with the same base are handled
1340 // together. By partitioning first and stable-sorting later, we are
1341 // guaranteed that within each base we will pop off users from within the
1342 // loop before users outside of the loop with a particular base.
1344 // We would like to use stable_sort here, but we can't. The problem is that
1345 // const SCEV *'s don't have a deterministic ordering w.r.t to each other, so
1346 // we don't have anything to do a '<' comparison on. Because we think the
1347 // number of uses is small, do a horrible bubble sort which just relies on
1348 // ==.
1349 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1350 // Get a base value.
1351 const SCEV *Base = UsersToProcess[i].Base;
1353 // Compact everything with this base to be consecutive with this one.
1354 for (unsigned j = i+1; j != e; ++j) {
1355 if (UsersToProcess[j].Base == Base) {
1356 std::swap(UsersToProcess[i+1], UsersToProcess[j]);
1357 ++i;
1363 /// PrepareToStrengthReduceFully - Prepare to fully strength-reduce
1364 /// UsersToProcess, meaning lowering addresses all the way down to direct
1365 /// pointer arithmetic.
1367 void
1368 LoopStrengthReduce::PrepareToStrengthReduceFully(
1369 std::vector<BasedUser> &UsersToProcess,
1370 const SCEV *Stride,
1371 const SCEV *CommonExprs,
1372 const Loop *L,
1373 SCEVExpander &PreheaderRewriter) {
1374 DOUT << " Fully reducing all users\n";
1376 // Rewrite the UsersToProcess records, creating a separate PHI for each
1377 // unique Base value.
1378 Instruction *IVIncInsertPt = L->getLoopLatch()->getTerminator();
1379 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1380 // TODO: The uses are grouped by base, but not sorted. We arbitrarily
1381 // pick the first Imm value here to start with, and adjust it for the
1382 // other uses.
1383 const SCEV *Imm = UsersToProcess[i].Imm;
1384 const SCEV *Base = UsersToProcess[i].Base;
1385 const SCEV *Start = SE->getAddExpr(CommonExprs, Base, Imm);
1386 PHINode *Phi = InsertAffinePhi(Start, Stride, IVIncInsertPt, L,
1387 PreheaderRewriter);
1388 // Loop over all the users with the same base.
1389 do {
1390 UsersToProcess[i].Base = SE->getIntegerSCEV(0, Stride->getType());
1391 UsersToProcess[i].Imm = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1392 UsersToProcess[i].Phi = Phi;
1393 assert(UsersToProcess[i].Imm->isLoopInvariant(L) &&
1394 "ShouldUseFullStrengthReductionMode should reject this!");
1395 } while (++i != e && Base == UsersToProcess[i].Base);
1399 /// FindIVIncInsertPt - Return the location to insert the increment instruction.
1400 /// If the only use if a use of postinc value, (must be the loop termination
1401 /// condition), then insert it just before the use.
1402 static Instruction *FindIVIncInsertPt(std::vector<BasedUser> &UsersToProcess,
1403 const Loop *L) {
1404 if (UsersToProcess.size() == 1 &&
1405 UsersToProcess[0].isUseOfPostIncrementedValue &&
1406 L->contains(UsersToProcess[0].Inst->getParent()))
1407 return UsersToProcess[0].Inst;
1408 return L->getLoopLatch()->getTerminator();
1411 /// PrepareToStrengthReduceWithNewPhi - Insert a new induction variable for the
1412 /// given users to share.
1414 void
1415 LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi(
1416 std::vector<BasedUser> &UsersToProcess,
1417 const SCEV *Stride,
1418 const SCEV *CommonExprs,
1419 Value *CommonBaseV,
1420 Instruction *IVIncInsertPt,
1421 const Loop *L,
1422 SCEVExpander &PreheaderRewriter) {
1423 DOUT << " Inserting new PHI:\n";
1425 PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV),
1426 Stride, IVIncInsertPt, L,
1427 PreheaderRewriter);
1429 // Remember this in case a later stride is multiple of this.
1430 IVsByStride[Stride].addIV(Stride, CommonExprs, Phi);
1432 // All the users will share this new IV.
1433 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1434 UsersToProcess[i].Phi = Phi;
1436 DOUT << " IV=";
1437 DEBUG(WriteAsOperand(*DOUT, Phi, /*PrintType=*/false));
1438 DOUT << "\n";
1441 /// PrepareToStrengthReduceFromSmallerStride - Prepare for the given users to
1442 /// reuse an induction variable with a stride that is a factor of the current
1443 /// induction variable.
1445 void
1446 LoopStrengthReduce::PrepareToStrengthReduceFromSmallerStride(
1447 std::vector<BasedUser> &UsersToProcess,
1448 Value *CommonBaseV,
1449 const IVExpr &ReuseIV,
1450 Instruction *PreInsertPt) {
1451 DOUT << " Rewriting in terms of existing IV of STRIDE " << *ReuseIV.Stride
1452 << " and BASE " << *ReuseIV.Base << "\n";
1454 // All the users will share the reused IV.
1455 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1456 UsersToProcess[i].Phi = ReuseIV.PHI;
1458 Constant *C = dyn_cast<Constant>(CommonBaseV);
1459 if (C &&
1460 (!C->isNullValue() &&
1461 !fitsInAddressMode(SE->getUnknown(CommonBaseV), CommonBaseV->getType(),
1462 TLI, false)))
1463 // We want the common base emitted into the preheader! This is just
1464 // using cast as a copy so BitCast (no-op cast) is appropriate
1465 CommonBaseV = new BitCastInst(CommonBaseV, CommonBaseV->getType(),
1466 "commonbase", PreInsertPt);
1469 static bool IsImmFoldedIntoAddrMode(GlobalValue *GV, int64_t Offset,
1470 const Type *AccessTy,
1471 std::vector<BasedUser> &UsersToProcess,
1472 const TargetLowering *TLI) {
1473 SmallVector<Instruction*, 16> AddrModeInsts;
1474 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1475 if (UsersToProcess[i].isUseOfPostIncrementedValue)
1476 continue;
1477 ExtAddrMode AddrMode =
1478 AddressingModeMatcher::Match(UsersToProcess[i].OperandValToReplace,
1479 AccessTy, UsersToProcess[i].Inst,
1480 AddrModeInsts, *TLI);
1481 if (GV && GV != AddrMode.BaseGV)
1482 return false;
1483 if (Offset && !AddrMode.BaseOffs)
1484 // FIXME: How to accurate check it's immediate offset is folded.
1485 return false;
1486 AddrModeInsts.clear();
1488 return true;
1491 /// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single
1492 /// stride of IV. All of the users may have different starting values, and this
1493 /// may not be the only stride.
1494 void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV *const &Stride,
1495 IVUsersOfOneStride &Uses,
1496 Loop *L) {
1497 // If all the users are moved to another stride, then there is nothing to do.
1498 if (Uses.Users.empty())
1499 return;
1501 // Keep track if every use in UsersToProcess is an address. If they all are,
1502 // we may be able to rewrite the entire collection of them in terms of a
1503 // smaller-stride IV.
1504 bool AllUsesAreAddresses = true;
1506 // Keep track if every use of a single stride is outside the loop. If so,
1507 // we want to be more aggressive about reusing a smaller-stride IV; a
1508 // multiply outside the loop is better than another IV inside. Well, usually.
1509 bool AllUsesAreOutsideLoop = true;
1511 // Transform our list of users and offsets to a bit more complex table. In
1512 // this new vector, each 'BasedUser' contains 'Base' the base of the
1513 // strided accessas well as the old information from Uses. We progressively
1514 // move information from the Base field to the Imm field, until we eventually
1515 // have the full access expression to rewrite the use.
1516 std::vector<BasedUser> UsersToProcess;
1517 const SCEV *CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses,
1518 AllUsesAreOutsideLoop,
1519 UsersToProcess);
1521 // Sort the UsersToProcess array so that users with common bases are
1522 // next to each other.
1523 SortUsersToProcess(UsersToProcess);
1525 // If we managed to find some expressions in common, we'll need to carry
1526 // their value in a register and add it in for each use. This will take up
1527 // a register operand, which potentially restricts what stride values are
1528 // valid.
1529 bool HaveCommonExprs = !CommonExprs->isZero();
1530 const Type *ReplacedTy = CommonExprs->getType();
1532 // If all uses are addresses, consider sinking the immediate part of the
1533 // common expression back into uses if they can fit in the immediate fields.
1534 if (TLI && HaveCommonExprs && AllUsesAreAddresses) {
1535 const SCEV *NewCommon = CommonExprs;
1536 const SCEV *Imm = SE->getIntegerSCEV(0, ReplacedTy);
1537 MoveImmediateValues(TLI, Type::VoidTy, NewCommon, Imm, true, L, SE);
1538 if (!Imm->isZero()) {
1539 bool DoSink = true;
1541 // If the immediate part of the common expression is a GV, check if it's
1542 // possible to fold it into the target addressing mode.
1543 GlobalValue *GV = 0;
1544 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(Imm))
1545 GV = dyn_cast<GlobalValue>(SU->getValue());
1546 int64_t Offset = 0;
1547 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Imm))
1548 Offset = SC->getValue()->getSExtValue();
1549 if (GV || Offset)
1550 // Pass VoidTy as the AccessTy to be conservative, because
1551 // there could be multiple access types among all the uses.
1552 DoSink = IsImmFoldedIntoAddrMode(GV, Offset, Type::VoidTy,
1553 UsersToProcess, TLI);
1555 if (DoSink) {
1556 DOUT << " Sinking " << *Imm << " back down into uses\n";
1557 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1558 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm, Imm);
1559 CommonExprs = NewCommon;
1560 HaveCommonExprs = !CommonExprs->isZero();
1561 ++NumImmSunk;
1566 // Now that we know what we need to do, insert the PHI node itself.
1568 DOUT << "LSR: Examining IVs of TYPE " << *ReplacedTy << " of STRIDE "
1569 << *Stride << ":\n"
1570 << " Common base: " << *CommonExprs << "\n";
1572 SCEVExpander Rewriter(*SE);
1573 SCEVExpander PreheaderRewriter(*SE);
1575 BasicBlock *Preheader = L->getLoopPreheader();
1576 Instruction *PreInsertPt = Preheader->getTerminator();
1577 BasicBlock *LatchBlock = L->getLoopLatch();
1578 Instruction *IVIncInsertPt = LatchBlock->getTerminator();
1580 LLVMContext &Context = Preheader->getContext();
1582 Value *CommonBaseV = Context.getNullValue(ReplacedTy);
1584 const SCEV *RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy);
1585 IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty),
1586 SE->getIntegerSCEV(0, Type::Int32Ty),
1589 /// Choose a strength-reduction strategy and prepare for it by creating
1590 /// the necessary PHIs and adjusting the bookkeeping.
1591 if (ShouldUseFullStrengthReductionMode(UsersToProcess, L,
1592 AllUsesAreAddresses, Stride)) {
1593 PrepareToStrengthReduceFully(UsersToProcess, Stride, CommonExprs, L,
1594 PreheaderRewriter);
1595 } else {
1596 // Emit the initial base value into the loop preheader.
1597 CommonBaseV = PreheaderRewriter.expandCodeFor(CommonExprs, ReplacedTy,
1598 PreInsertPt);
1600 // If all uses are addresses, check if it is possible to reuse an IV. The
1601 // new IV must have a stride that is a multiple of the old stride; the
1602 // multiple must be a number that can be encoded in the scale field of the
1603 // target addressing mode; and we must have a valid instruction after this
1604 // substitution, including the immediate field, if any.
1605 RewriteFactor = CheckForIVReuse(HaveCommonExprs, AllUsesAreAddresses,
1606 AllUsesAreOutsideLoop,
1607 Stride, ReuseIV, ReplacedTy,
1608 UsersToProcess);
1609 if (!RewriteFactor->isZero())
1610 PrepareToStrengthReduceFromSmallerStride(UsersToProcess, CommonBaseV,
1611 ReuseIV, PreInsertPt);
1612 else {
1613 IVIncInsertPt = FindIVIncInsertPt(UsersToProcess, L);
1614 PrepareToStrengthReduceWithNewPhi(UsersToProcess, Stride, CommonExprs,
1615 CommonBaseV, IVIncInsertPt,
1616 L, PreheaderRewriter);
1620 // Process all the users now, replacing their strided uses with
1621 // strength-reduced forms. This outer loop handles all bases, the inner
1622 // loop handles all users of a particular base.
1623 while (!UsersToProcess.empty()) {
1624 const SCEV *Base = UsersToProcess.back().Base;
1625 Instruction *Inst = UsersToProcess.back().Inst;
1627 // Emit the code for Base into the preheader.
1628 Value *BaseV = 0;
1629 if (!Base->isZero()) {
1630 BaseV = PreheaderRewriter.expandCodeFor(Base, 0, PreInsertPt);
1632 DOUT << " INSERTING code for BASE = " << *Base << ":";
1633 if (BaseV->hasName())
1634 DOUT << " Result value name = %" << BaseV->getNameStr();
1635 DOUT << "\n";
1637 // If BaseV is a non-zero constant, make sure that it gets inserted into
1638 // the preheader, instead of being forward substituted into the uses. We
1639 // do this by forcing a BitCast (noop cast) to be inserted into the
1640 // preheader in this case.
1641 if (!fitsInAddressMode(Base, getAccessType(Inst), TLI, false) &&
1642 !isa<Instruction>(BaseV)) {
1643 // We want this constant emitted into the preheader! This is just
1644 // using cast as a copy so BitCast (no-op cast) is appropriate
1645 BaseV = new BitCastInst(BaseV, BaseV->getType(), "preheaderinsert",
1646 PreInsertPt);
1650 // Emit the code to add the immediate offset to the Phi value, just before
1651 // the instructions that we identified as using this stride and base.
1652 do {
1653 // FIXME: Use emitted users to emit other users.
1654 BasedUser &User = UsersToProcess.back();
1656 DOUT << " Examining ";
1657 if (User.isUseOfPostIncrementedValue)
1658 DOUT << "postinc";
1659 else
1660 DOUT << "preinc";
1661 DOUT << " use ";
1662 DEBUG(WriteAsOperand(*DOUT, UsersToProcess.back().OperandValToReplace,
1663 /*PrintType=*/false));
1664 DOUT << " in Inst: " << *(User.Inst);
1666 // If this instruction wants to use the post-incremented value, move it
1667 // after the post-inc and use its value instead of the PHI.
1668 Value *RewriteOp = User.Phi;
1669 if (User.isUseOfPostIncrementedValue) {
1670 RewriteOp = User.Phi->getIncomingValueForBlock(LatchBlock);
1671 // If this user is in the loop, make sure it is the last thing in the
1672 // loop to ensure it is dominated by the increment. In case it's the
1673 // only use of the iv, the increment instruction is already before the
1674 // use.
1675 if (L->contains(User.Inst->getParent()) && User.Inst != IVIncInsertPt)
1676 User.Inst->moveBefore(IVIncInsertPt);
1679 const SCEV *RewriteExpr = SE->getUnknown(RewriteOp);
1681 if (SE->getEffectiveSCEVType(RewriteOp->getType()) !=
1682 SE->getEffectiveSCEVType(ReplacedTy)) {
1683 assert(SE->getTypeSizeInBits(RewriteOp->getType()) >
1684 SE->getTypeSizeInBits(ReplacedTy) &&
1685 "Unexpected widening cast!");
1686 RewriteExpr = SE->getTruncateExpr(RewriteExpr, ReplacedTy);
1689 // If we had to insert new instructions for RewriteOp, we have to
1690 // consider that they may not have been able to end up immediately
1691 // next to RewriteOp, because non-PHI instructions may never precede
1692 // PHI instructions in a block. In this case, remember where the last
1693 // instruction was inserted so that if we're replacing a different
1694 // PHI node, we can use the later point to expand the final
1695 // RewriteExpr.
1696 Instruction *NewBasePt = dyn_cast<Instruction>(RewriteOp);
1697 if (RewriteOp == User.Phi) NewBasePt = 0;
1699 // Clear the SCEVExpander's expression map so that we are guaranteed
1700 // to have the code emitted where we expect it.
1701 Rewriter.clear();
1703 // If we are reusing the iv, then it must be multiplied by a constant
1704 // factor to take advantage of the addressing mode scale component.
1705 if (!RewriteFactor->isZero()) {
1706 // If we're reusing an IV with a nonzero base (currently this happens
1707 // only when all reuses are outside the loop) subtract that base here.
1708 // The base has been used to initialize the PHI node but we don't want
1709 // it here.
1710 if (!ReuseIV.Base->isZero()) {
1711 const SCEV *typedBase = ReuseIV.Base;
1712 if (SE->getEffectiveSCEVType(RewriteExpr->getType()) !=
1713 SE->getEffectiveSCEVType(ReuseIV.Base->getType())) {
1714 // It's possible the original IV is a larger type than the new IV,
1715 // in which case we have to truncate the Base. We checked in
1716 // RequiresTypeConversion that this is valid.
1717 assert(SE->getTypeSizeInBits(RewriteExpr->getType()) <
1718 SE->getTypeSizeInBits(ReuseIV.Base->getType()) &&
1719 "Unexpected lengthening conversion!");
1720 typedBase = SE->getTruncateExpr(ReuseIV.Base,
1721 RewriteExpr->getType());
1723 RewriteExpr = SE->getMinusSCEV(RewriteExpr, typedBase);
1726 // Multiply old variable, with base removed, by new scale factor.
1727 RewriteExpr = SE->getMulExpr(RewriteFactor,
1728 RewriteExpr);
1730 // The common base is emitted in the loop preheader. But since we
1731 // are reusing an IV, it has not been used to initialize the PHI node.
1732 // Add it to the expression used to rewrite the uses.
1733 // When this use is outside the loop, we earlier subtracted the
1734 // common base, and are adding it back here. Use the same expression
1735 // as before, rather than CommonBaseV, so DAGCombiner will zap it.
1736 if (!CommonExprs->isZero()) {
1737 if (L->contains(User.Inst->getParent()))
1738 RewriteExpr = SE->getAddExpr(RewriteExpr,
1739 SE->getUnknown(CommonBaseV));
1740 else
1741 RewriteExpr = SE->getAddExpr(RewriteExpr, CommonExprs);
1745 // Now that we know what we need to do, insert code before User for the
1746 // immediate and any loop-variant expressions.
1747 if (BaseV)
1748 // Add BaseV to the PHI value if needed.
1749 RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV));
1751 User.RewriteInstructionToUseNewBase(RewriteExpr, NewBasePt,
1752 Rewriter, L, this, *LI,
1753 DeadInsts);
1755 // Mark old value we replaced as possibly dead, so that it is eliminated
1756 // if we just replaced the last use of that value.
1757 DeadInsts.push_back(User.OperandValToReplace);
1759 UsersToProcess.pop_back();
1760 ++NumReduced;
1762 // If there are any more users to process with the same base, process them
1763 // now. We sorted by base above, so we just have to check the last elt.
1764 } while (!UsersToProcess.empty() && UsersToProcess.back().Base == Base);
1765 // TODO: Next, find out which base index is the most common, pull it out.
1768 // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
1769 // different starting values, into different PHIs.
1772 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
1773 /// set the IV user and stride information and return true, otherwise return
1774 /// false.
1775 bool LoopStrengthReduce::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
1776 const SCEV *const * &CondStride) {
1777 for (unsigned Stride = 0, e = IU->StrideOrder.size();
1778 Stride != e && !CondUse; ++Stride) {
1779 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
1780 IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
1781 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
1783 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
1784 E = SI->second->Users.end(); UI != E; ++UI)
1785 if (UI->getUser() == Cond) {
1786 // NOTE: we could handle setcc instructions with multiple uses here, but
1787 // InstCombine does it as well for simple uses, it's not clear that it
1788 // occurs enough in real life to handle.
1789 CondUse = UI;
1790 CondStride = &SI->first;
1791 return true;
1794 return false;
1797 namespace {
1798 // Constant strides come first which in turns are sorted by their absolute
1799 // values. If absolute values are the same, then positive strides comes first.
1800 // e.g.
1801 // 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X
1802 struct StrideCompare {
1803 const ScalarEvolution *SE;
1804 explicit StrideCompare(const ScalarEvolution *se) : SE(se) {}
1806 bool operator()(const SCEV *const &LHS, const SCEV *const &RHS) {
1807 const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS);
1808 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
1809 if (LHSC && RHSC) {
1810 int64_t LV = LHSC->getValue()->getSExtValue();
1811 int64_t RV = RHSC->getValue()->getSExtValue();
1812 uint64_t ALV = (LV < 0) ? -LV : LV;
1813 uint64_t ARV = (RV < 0) ? -RV : RV;
1814 if (ALV == ARV) {
1815 if (LV != RV)
1816 return LV > RV;
1817 } else {
1818 return ALV < ARV;
1821 // If it's the same value but different type, sort by bit width so
1822 // that we emit larger induction variables before smaller
1823 // ones, letting the smaller be re-written in terms of larger ones.
1824 return SE->getTypeSizeInBits(RHS->getType()) <
1825 SE->getTypeSizeInBits(LHS->getType());
1827 return LHSC && !RHSC;
1832 /// ChangeCompareStride - If a loop termination compare instruction is the
1833 /// only use of its stride, and the compaison is against a constant value,
1834 /// try eliminate the stride by moving the compare instruction to another
1835 /// stride and change its constant operand accordingly. e.g.
1837 /// loop:
1838 /// ...
1839 /// v1 = v1 + 3
1840 /// v2 = v2 + 1
1841 /// if (v2 < 10) goto loop
1842 /// =>
1843 /// loop:
1844 /// ...
1845 /// v1 = v1 + 3
1846 /// if (v1 < 30) goto loop
1847 ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
1848 IVStrideUse* &CondUse,
1849 const SCEV *const* &CondStride) {
1850 // If there's only one stride in the loop, there's nothing to do here.
1851 if (IU->StrideOrder.size() < 2)
1852 return Cond;
1853 // If there are other users of the condition's stride, don't bother
1854 // trying to change the condition because the stride will still
1855 // remain.
1856 std::map<const SCEV *, IVUsersOfOneStride *>::iterator I =
1857 IU->IVUsesByStride.find(*CondStride);
1858 if (I == IU->IVUsesByStride.end() ||
1859 I->second->Users.size() != 1)
1860 return Cond;
1861 // Only handle constant strides for now.
1862 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride);
1863 if (!SC) return Cond;
1865 LLVMContext &Context = Cond->getContext();
1867 ICmpInst::Predicate Predicate = Cond->getPredicate();
1868 int64_t CmpSSInt = SC->getValue()->getSExtValue();
1869 unsigned BitWidth = SE->getTypeSizeInBits((*CondStride)->getType());
1870 uint64_t SignBit = 1ULL << (BitWidth-1);
1871 const Type *CmpTy = Cond->getOperand(0)->getType();
1872 const Type *NewCmpTy = NULL;
1873 unsigned TyBits = SE->getTypeSizeInBits(CmpTy);
1874 unsigned NewTyBits = 0;
1875 const SCEV **NewStride = NULL;
1876 Value *NewCmpLHS = NULL;
1877 Value *NewCmpRHS = NULL;
1878 int64_t Scale = 1;
1879 const SCEV *NewOffset = SE->getIntegerSCEV(0, CmpTy);
1881 if (ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1))) {
1882 int64_t CmpVal = C->getValue().getSExtValue();
1884 // Check stride constant and the comparision constant signs to detect
1885 // overflow.
1886 if ((CmpVal & SignBit) != (CmpSSInt & SignBit))
1887 return Cond;
1889 // Look for a suitable stride / iv as replacement.
1890 for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
1891 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
1892 IU->IVUsesByStride.find(IU->StrideOrder[i]);
1893 if (!isa<SCEVConstant>(SI->first))
1894 continue;
1895 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1896 if (SSInt == CmpSSInt ||
1897 abs64(SSInt) < abs64(CmpSSInt) ||
1898 (SSInt % CmpSSInt) != 0)
1899 continue;
1901 Scale = SSInt / CmpSSInt;
1902 int64_t NewCmpVal = CmpVal * Scale;
1903 APInt Mul = APInt(BitWidth*2, CmpVal, true);
1904 Mul = Mul * APInt(BitWidth*2, Scale, true);
1905 // Check for overflow.
1906 if (!Mul.isSignedIntN(BitWidth))
1907 continue;
1908 // Check for overflow in the stride's type too.
1909 if (!Mul.isSignedIntN(SE->getTypeSizeInBits(SI->first->getType())))
1910 continue;
1912 // Watch out for overflow.
1913 if (ICmpInst::isSignedPredicate(Predicate) &&
1914 (CmpVal & SignBit) != (NewCmpVal & SignBit))
1915 continue;
1917 if (NewCmpVal == CmpVal)
1918 continue;
1919 // Pick the best iv to use trying to avoid a cast.
1920 NewCmpLHS = NULL;
1921 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
1922 E = SI->second->Users.end(); UI != E; ++UI) {
1923 Value *Op = UI->getOperandValToReplace();
1925 // If the IVStrideUse implies a cast, check for an actual cast which
1926 // can be used to find the original IV expression.
1927 if (SE->getEffectiveSCEVType(Op->getType()) !=
1928 SE->getEffectiveSCEVType(SI->first->getType())) {
1929 CastInst *CI = dyn_cast<CastInst>(Op);
1930 // If it's not a simple cast, it's complicated.
1931 if (!CI)
1932 continue;
1933 // If it's a cast from a type other than the stride type,
1934 // it's complicated.
1935 if (CI->getOperand(0)->getType() != SI->first->getType())
1936 continue;
1937 // Ok, we found the IV expression in the stride's type.
1938 Op = CI->getOperand(0);
1941 NewCmpLHS = Op;
1942 if (NewCmpLHS->getType() == CmpTy)
1943 break;
1945 if (!NewCmpLHS)
1946 continue;
1948 NewCmpTy = NewCmpLHS->getType();
1949 NewTyBits = SE->getTypeSizeInBits(NewCmpTy);
1950 const Type *NewCmpIntTy = Context.getIntegerType(NewTyBits);
1951 if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
1952 // Check if it is possible to rewrite it using
1953 // an iv / stride of a smaller integer type.
1954 unsigned Bits = NewTyBits;
1955 if (ICmpInst::isSignedPredicate(Predicate))
1956 --Bits;
1957 uint64_t Mask = (1ULL << Bits) - 1;
1958 if (((uint64_t)NewCmpVal & Mask) != (uint64_t)NewCmpVal)
1959 continue;
1962 // Don't rewrite if use offset is non-constant and the new type is
1963 // of a different type.
1964 // FIXME: too conservative?
1965 if (NewTyBits != TyBits && !isa<SCEVConstant>(CondUse->getOffset()))
1966 continue;
1968 bool AllUsesAreAddresses = true;
1969 bool AllUsesAreOutsideLoop = true;
1970 std::vector<BasedUser> UsersToProcess;
1971 const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L,
1972 AllUsesAreAddresses,
1973 AllUsesAreOutsideLoop,
1974 UsersToProcess);
1975 // Avoid rewriting the compare instruction with an iv of new stride
1976 // if it's likely the new stride uses will be rewritten using the
1977 // stride of the compare instruction.
1978 if (AllUsesAreAddresses &&
1979 ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess))
1980 continue;
1982 // Avoid rewriting the compare instruction with an iv which has
1983 // implicit extension or truncation built into it.
1984 // TODO: This is over-conservative.
1985 if (SE->getTypeSizeInBits(CondUse->getOffset()->getType()) != TyBits)
1986 continue;
1988 // If scale is negative, use swapped predicate unless it's testing
1989 // for equality.
1990 if (Scale < 0 && !Cond->isEquality())
1991 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1993 NewStride = &IU->StrideOrder[i];
1994 if (!isa<PointerType>(NewCmpTy))
1995 NewCmpRHS = ConstantInt::get(NewCmpTy, NewCmpVal);
1996 else {
1997 Constant *CI = ConstantInt::get(NewCmpIntTy, NewCmpVal);
1998 NewCmpRHS = ConstantExpr::getIntToPtr(CI, NewCmpTy);
2000 NewOffset = TyBits == NewTyBits
2001 ? SE->getMulExpr(CondUse->getOffset(),
2002 SE->getConstant(CmpTy, Scale))
2003 : SE->getConstant(NewCmpIntTy,
2004 cast<SCEVConstant>(CondUse->getOffset())->getValue()
2005 ->getSExtValue()*Scale);
2006 break;
2010 // Forgo this transformation if it the increment happens to be
2011 // unfortunately positioned after the condition, and the condition
2012 // has multiple uses which prevent it from being moved immediately
2013 // before the branch. See
2014 // test/Transforms/LoopStrengthReduce/change-compare-stride-trickiness-*.ll
2015 // for an example of this situation.
2016 if (!Cond->hasOneUse()) {
2017 for (BasicBlock::iterator I = Cond, E = Cond->getParent()->end();
2018 I != E; ++I)
2019 if (I == NewCmpLHS)
2020 return Cond;
2023 if (NewCmpRHS) {
2024 // Create a new compare instruction using new stride / iv.
2025 ICmpInst *OldCond = Cond;
2026 // Insert new compare instruction.
2027 Cond = new ICmpInst(OldCond, Predicate, NewCmpLHS, NewCmpRHS,
2028 L->getHeader()->getName() + ".termcond");
2030 // Remove the old compare instruction. The old indvar is probably dead too.
2031 DeadInsts.push_back(CondUse->getOperandValToReplace());
2032 OldCond->replaceAllUsesWith(Cond);
2033 OldCond->eraseFromParent();
2035 IU->IVUsesByStride[*NewStride]->addUser(NewOffset, Cond, NewCmpLHS);
2036 CondUse = &IU->IVUsesByStride[*NewStride]->Users.back();
2037 CondStride = NewStride;
2038 ++NumEliminated;
2039 Changed = true;
2042 return Cond;
2045 /// OptimizeMax - Rewrite the loop's terminating condition if it uses
2046 /// a max computation.
2048 /// This is a narrow solution to a specific, but acute, problem. For loops
2049 /// like this:
2051 /// i = 0;
2052 /// do {
2053 /// p[i] = 0.0;
2054 /// } while (++i < n);
2056 /// the trip count isn't just 'n', because 'n' might not be positive. And
2057 /// unfortunately this can come up even for loops where the user didn't use
2058 /// a C do-while loop. For example, seemingly well-behaved top-test loops
2059 /// will commonly be lowered like this:
2061 /// if (n > 0) {
2062 /// i = 0;
2063 /// do {
2064 /// p[i] = 0.0;
2065 /// } while (++i < n);
2066 /// }
2068 /// and then it's possible for subsequent optimization to obscure the if
2069 /// test in such a way that indvars can't find it.
2071 /// When indvars can't find the if test in loops like this, it creates a
2072 /// max expression, which allows it to give the loop a canonical
2073 /// induction variable:
2075 /// i = 0;
2076 /// max = n < 1 ? 1 : n;
2077 /// do {
2078 /// p[i] = 0.0;
2079 /// } while (++i != max);
2081 /// Canonical induction variables are necessary because the loop passes
2082 /// are designed around them. The most obvious example of this is the
2083 /// LoopInfo analysis, which doesn't remember trip count values. It
2084 /// expects to be able to rediscover the trip count each time it is
2085 /// needed, and it does this using a simple analyis that only succeeds if
2086 /// the loop has a canonical induction variable.
2088 /// However, when it comes time to generate code, the maximum operation
2089 /// can be quite costly, especially if it's inside of an outer loop.
2091 /// This function solves this problem by detecting this type of loop and
2092 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
2093 /// the instructions for the maximum computation.
2095 ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond,
2096 IVStrideUse* &CondUse) {
2097 // Check that the loop matches the pattern we're looking for.
2098 if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
2099 Cond->getPredicate() != CmpInst::ICMP_NE)
2100 return Cond;
2102 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
2103 if (!Sel || !Sel->hasOneUse()) return Cond;
2105 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2106 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2107 return Cond;
2108 const SCEV *One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType());
2110 // Add one to the backedge-taken count to get the trip count.
2111 const SCEV *IterationCount = SE->getAddExpr(BackedgeTakenCount, One);
2113 // Check for a max calculation that matches the pattern.
2114 if (!isa<SCEVSMaxExpr>(IterationCount) && !isa<SCEVUMaxExpr>(IterationCount))
2115 return Cond;
2116 const SCEVNAryExpr *Max = cast<SCEVNAryExpr>(IterationCount);
2117 if (Max != SE->getSCEV(Sel)) return Cond;
2119 // To handle a max with more than two operands, this optimization would
2120 // require additional checking and setup.
2121 if (Max->getNumOperands() != 2)
2122 return Cond;
2124 const SCEV *MaxLHS = Max->getOperand(0);
2125 const SCEV *MaxRHS = Max->getOperand(1);
2126 if (!MaxLHS || MaxLHS != One) return Cond;
2128 // Check the relevant induction variable for conformance to
2129 // the pattern.
2130 const SCEV *IV = SE->getSCEV(Cond->getOperand(0));
2131 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2132 if (!AR || !AR->isAffine() ||
2133 AR->getStart() != One ||
2134 AR->getStepRecurrence(*SE) != One)
2135 return Cond;
2137 assert(AR->getLoop() == L &&
2138 "Loop condition operand is an addrec in a different loop!");
2140 // Check the right operand of the select, and remember it, as it will
2141 // be used in the new comparison instruction.
2142 Value *NewRHS = 0;
2143 if (SE->getSCEV(Sel->getOperand(1)) == MaxRHS)
2144 NewRHS = Sel->getOperand(1);
2145 else if (SE->getSCEV(Sel->getOperand(2)) == MaxRHS)
2146 NewRHS = Sel->getOperand(2);
2147 if (!NewRHS) return Cond;
2149 // Determine the new comparison opcode. It may be signed or unsigned,
2150 // and the original comparison may be either equality or inequality.
2151 CmpInst::Predicate Pred =
2152 isa<SCEVSMaxExpr>(Max) ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT;
2153 if (Cond->getPredicate() == CmpInst::ICMP_EQ)
2154 Pred = CmpInst::getInversePredicate(Pred);
2156 // Ok, everything looks ok to change the condition into an SLT or SGE and
2157 // delete the max calculation.
2158 ICmpInst *NewCond =
2159 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp");
2161 // Delete the max calculation instructions.
2162 Cond->replaceAllUsesWith(NewCond);
2163 CondUse->setUser(NewCond);
2164 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
2165 Cond->eraseFromParent();
2166 Sel->eraseFromParent();
2167 if (Cmp->use_empty())
2168 Cmp->eraseFromParent();
2169 return NewCond;
2172 /// OptimizeShadowIV - If IV is used in a int-to-float cast
2173 /// inside the loop then try to eliminate the cast opeation.
2174 void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
2176 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2177 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2178 return;
2180 for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e;
2181 ++Stride) {
2182 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
2183 IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
2184 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
2185 if (!isa<SCEVConstant>(SI->first))
2186 continue;
2188 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
2189 E = SI->second->Users.end(); UI != E; /* empty */) {
2190 ilist<IVStrideUse>::iterator CandidateUI = UI;
2191 ++UI;
2192 Instruction *ShadowUse = CandidateUI->getUser();
2193 const Type *DestTy = NULL;
2195 /* If shadow use is a int->float cast then insert a second IV
2196 to eliminate this cast.
2198 for (unsigned i = 0; i < n; ++i)
2199 foo((double)i);
2201 is transformed into
2203 double d = 0.0;
2204 for (unsigned i = 0; i < n; ++i, ++d)
2205 foo(d);
2207 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser()))
2208 DestTy = UCast->getDestTy();
2209 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser()))
2210 DestTy = SCast->getDestTy();
2211 if (!DestTy) continue;
2213 if (TLI) {
2214 // If target does not support DestTy natively then do not apply
2215 // this transformation.
2216 MVT DVT = TLI->getValueType(DestTy);
2217 if (!TLI->isTypeLegal(DVT)) continue;
2220 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
2221 if (!PH) continue;
2222 if (PH->getNumIncomingValues() != 2) continue;
2224 const Type *SrcTy = PH->getType();
2225 int Mantissa = DestTy->getFPMantissaWidth();
2226 if (Mantissa == -1) continue;
2227 if ((int)SE->getTypeSizeInBits(SrcTy) > Mantissa)
2228 continue;
2230 unsigned Entry, Latch;
2231 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
2232 Entry = 0;
2233 Latch = 1;
2234 } else {
2235 Entry = 1;
2236 Latch = 0;
2239 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
2240 if (!Init) continue;
2241 Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue());
2243 BinaryOperator *Incr =
2244 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
2245 if (!Incr) continue;
2246 if (Incr->getOpcode() != Instruction::Add
2247 && Incr->getOpcode() != Instruction::Sub)
2248 continue;
2250 /* Initialize new IV, double d = 0.0 in above example. */
2251 ConstantInt *C = NULL;
2252 if (Incr->getOperand(0) == PH)
2253 C = dyn_cast<ConstantInt>(Incr->getOperand(1));
2254 else if (Incr->getOperand(1) == PH)
2255 C = dyn_cast<ConstantInt>(Incr->getOperand(0));
2256 else
2257 continue;
2259 if (!C) continue;
2261 /* Add new PHINode. */
2262 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH);
2264 /* create new increment. '++d' in above example. */
2265 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue());
2266 BinaryOperator *NewIncr =
2267 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
2268 Instruction::FAdd : Instruction::FSub,
2269 NewPH, CFP, "IV.S.next.", Incr);
2271 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
2272 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
2274 /* Remove cast operation */
2275 ShadowUse->replaceAllUsesWith(NewPH);
2276 ShadowUse->eraseFromParent();
2277 NumShadow++;
2278 break;
2283 /// OptimizeIndvars - Now that IVUsesByStride is set up with all of the indvar
2284 /// uses in the loop, look to see if we can eliminate some, in favor of using
2285 /// common indvars for the different uses.
2286 void LoopStrengthReduce::OptimizeIndvars(Loop *L) {
2287 // TODO: implement optzns here.
2289 OptimizeShadowIV(L);
2292 /// OptimizeLoopTermCond - Change loop terminating condition to use the
2293 /// postinc iv when possible.
2294 void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) {
2295 // Finally, get the terminating condition for the loop if possible. If we
2296 // can, we want to change it to use a post-incremented version of its
2297 // induction variable, to allow coalescing the live ranges for the IV into
2298 // one register value.
2299 BasicBlock *LatchBlock = L->getLoopLatch();
2300 BasicBlock *ExitingBlock = L->getExitingBlock();
2301 LLVMContext &Context = LatchBlock->getContext();
2303 if (!ExitingBlock)
2304 // Multiple exits, just look at the exit in the latch block if there is one.
2305 ExitingBlock = LatchBlock;
2306 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2307 if (!TermBr)
2308 return;
2309 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
2310 return;
2312 // Search IVUsesByStride to find Cond's IVUse if there is one.
2313 IVStrideUse *CondUse = 0;
2314 const SCEV *const *CondStride = 0;
2315 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2316 if (!FindIVUserForCond(Cond, CondUse, CondStride))
2317 return; // setcc doesn't use the IV.
2319 if (ExitingBlock != LatchBlock) {
2320 if (!Cond->hasOneUse())
2321 // See below, we don't want the condition to be cloned.
2322 return;
2324 // If exiting block is the latch block, we know it's safe and profitable to
2325 // transform the icmp to use post-inc iv. Otherwise do so only if it would
2326 // not reuse another iv and its iv would be reused by other uses. We are
2327 // optimizing for the case where the icmp is the only use of the iv.
2328 IVUsersOfOneStride &StrideUses = *IU->IVUsesByStride[*CondStride];
2329 for (ilist<IVStrideUse>::iterator I = StrideUses.Users.begin(),
2330 E = StrideUses.Users.end(); I != E; ++I) {
2331 if (I->getUser() == Cond)
2332 continue;
2333 if (!I->isUseOfPostIncrementedValue())
2334 return;
2337 // FIXME: This is expensive, and worse still ChangeCompareStride does a
2338 // similar check. Can we perform all the icmp related transformations after
2339 // StrengthReduceStridedIVUsers?
2340 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride)) {
2341 int64_t SInt = SC->getValue()->getSExtValue();
2342 for (unsigned NewStride = 0, ee = IU->StrideOrder.size(); NewStride != ee;
2343 ++NewStride) {
2344 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
2345 IU->IVUsesByStride.find(IU->StrideOrder[NewStride]);
2346 if (!isa<SCEVConstant>(SI->first) || SI->first == *CondStride)
2347 continue;
2348 int64_t SSInt =
2349 cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
2350 if (SSInt == SInt)
2351 return; // This can definitely be reused.
2352 if (unsigned(abs64(SSInt)) < SInt || (SSInt % SInt) != 0)
2353 continue;
2354 int64_t Scale = SSInt / SInt;
2355 bool AllUsesAreAddresses = true;
2356 bool AllUsesAreOutsideLoop = true;
2357 std::vector<BasedUser> UsersToProcess;
2358 const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L,
2359 AllUsesAreAddresses,
2360 AllUsesAreOutsideLoop,
2361 UsersToProcess);
2362 // Avoid rewriting the compare instruction with an iv of new stride
2363 // if it's likely the new stride uses will be rewritten using the
2364 // stride of the compare instruction.
2365 if (AllUsesAreAddresses &&
2366 ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess))
2367 return;
2371 StrideNoReuse.insert(*CondStride);
2374 // If the trip count is computed in terms of a max (due to ScalarEvolution
2375 // being unable to find a sufficient guard, for example), change the loop
2376 // comparison to use SLT or ULT instead of NE.
2377 Cond = OptimizeMax(L, Cond, CondUse);
2379 // If possible, change stride and operands of the compare instruction to
2380 // eliminate one stride.
2381 if (ExitingBlock == LatchBlock)
2382 Cond = ChangeCompareStride(L, Cond, CondUse, CondStride);
2384 // It's possible for the setcc instruction to be anywhere in the loop, and
2385 // possible for it to have multiple users. If it is not immediately before
2386 // the latch block branch, move it.
2387 if (&*++BasicBlock::iterator(Cond) != (Instruction*)TermBr) {
2388 if (Cond->hasOneUse()) { // Condition has a single use, just move it.
2389 Cond->moveBefore(TermBr);
2390 } else {
2391 // Otherwise, clone the terminating condition and insert into the loopend.
2392 Cond = cast<ICmpInst>(Cond->clone(Context));
2393 Cond->setName(L->getHeader()->getName() + ".termcond");
2394 LatchBlock->getInstList().insert(TermBr, Cond);
2396 // Clone the IVUse, as the old use still exists!
2397 IU->IVUsesByStride[*CondStride]->addUser(CondUse->getOffset(), Cond,
2398 CondUse->getOperandValToReplace());
2399 CondUse = &IU->IVUsesByStride[*CondStride]->Users.back();
2403 // If we get to here, we know that we can transform the setcc instruction to
2404 // use the post-incremented version of the IV, allowing us to coalesce the
2405 // live ranges for the IV correctly.
2406 CondUse->setOffset(SE->getMinusSCEV(CondUse->getOffset(), *CondStride));
2407 CondUse->setIsUseOfPostIncrementedValue(true);
2408 Changed = true;
2410 ++NumLoopCond;
2413 /// OptimizeLoopCountIV - If, after all sharing of IVs, the IV used for deciding
2414 /// when to exit the loop is used only for that purpose, try to rearrange things
2415 /// so it counts down to a test against zero.
2416 void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) {
2418 // If the number of times the loop is executed isn't computable, give up.
2419 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2420 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2421 return;
2423 // Get the terminating condition for the loop if possible (this isn't
2424 // necessarily in the latch, or a block that's a predecessor of the header).
2425 if (!L->getExitBlock())
2426 return; // More than one loop exit blocks.
2428 // Okay, there is one exit block. Try to find the condition that causes the
2429 // loop to be exited.
2430 BasicBlock *ExitingBlock = L->getExitingBlock();
2431 if (!ExitingBlock)
2432 return; // More than one block exiting!
2434 // Okay, we've computed the exiting block. See what condition causes us to
2435 // exit.
2437 // FIXME: we should be able to handle switch instructions (with a single exit)
2438 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2439 if (TermBr == 0) return;
2440 assert(TermBr->isConditional() && "If unconditional, it can't be in loop!");
2441 if (!isa<ICmpInst>(TermBr->getCondition()))
2442 return;
2443 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2445 // Handle only tests for equality for the moment, and only stride 1.
2446 if (Cond->getPredicate() != CmpInst::ICMP_EQ)
2447 return;
2448 const SCEV *IV = SE->getSCEV(Cond->getOperand(0));
2449 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2450 const SCEV *One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType());
2451 if (!AR || !AR->isAffine() || AR->getStepRecurrence(*SE) != One)
2452 return;
2453 // If the RHS of the comparison is defined inside the loop, the rewrite
2454 // cannot be done.
2455 if (Instruction *CR = dyn_cast<Instruction>(Cond->getOperand(1)))
2456 if (L->contains(CR->getParent()))
2457 return;
2459 // Make sure the IV is only used for counting. Value may be preinc or
2460 // postinc; 2 uses in either case.
2461 if (!Cond->getOperand(0)->hasNUses(2))
2462 return;
2463 PHINode *phi = dyn_cast<PHINode>(Cond->getOperand(0));
2464 Instruction *incr;
2465 if (phi && phi->getParent()==L->getHeader()) {
2466 // value tested is preinc. Find the increment.
2467 // A CmpInst is not a BinaryOperator; we depend on this.
2468 Instruction::use_iterator UI = phi->use_begin();
2469 incr = dyn_cast<BinaryOperator>(UI);
2470 if (!incr)
2471 incr = dyn_cast<BinaryOperator>(++UI);
2472 // 1 use for postinc value, the phi. Unnecessarily conservative?
2473 if (!incr || !incr->hasOneUse() || incr->getOpcode()!=Instruction::Add)
2474 return;
2475 } else {
2476 // Value tested is postinc. Find the phi node.
2477 incr = dyn_cast<BinaryOperator>(Cond->getOperand(0));
2478 if (!incr || incr->getOpcode()!=Instruction::Add)
2479 return;
2481 Instruction::use_iterator UI = Cond->getOperand(0)->use_begin();
2482 phi = dyn_cast<PHINode>(UI);
2483 if (!phi)
2484 phi = dyn_cast<PHINode>(++UI);
2485 // 1 use for preinc value, the increment.
2486 if (!phi || phi->getParent()!=L->getHeader() || !phi->hasOneUse())
2487 return;
2490 // Replace the increment with a decrement.
2491 BinaryOperator *decr =
2492 BinaryOperator::Create(Instruction::Sub, incr->getOperand(0),
2493 incr->getOperand(1), "tmp", incr);
2494 incr->replaceAllUsesWith(decr);
2495 incr->eraseFromParent();
2497 // Substitute endval-startval for the original startval, and 0 for the
2498 // original endval. Since we're only testing for equality this is OK even
2499 // if the computation wraps around.
2500 BasicBlock *Preheader = L->getLoopPreheader();
2501 Instruction *PreInsertPt = Preheader->getTerminator();
2502 int inBlock = L->contains(phi->getIncomingBlock(0)) ? 1 : 0;
2503 Value *startVal = phi->getIncomingValue(inBlock);
2504 Value *endVal = Cond->getOperand(1);
2505 // FIXME check for case where both are constant
2506 Constant* Zero = ConstantInt::get(Cond->getOperand(1)->getType(), 0);
2507 BinaryOperator *NewStartVal =
2508 BinaryOperator::Create(Instruction::Sub, endVal, startVal,
2509 "tmp", PreInsertPt);
2510 phi->setIncomingValue(inBlock, NewStartVal);
2511 Cond->setOperand(1, Zero);
2513 Changed = true;
2516 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) {
2518 IU = &getAnalysis<IVUsers>();
2519 LI = &getAnalysis<LoopInfo>();
2520 DT = &getAnalysis<DominatorTree>();
2521 SE = &getAnalysis<ScalarEvolution>();
2522 Changed = false;
2524 if (!IU->IVUsesByStride.empty()) {
2525 DEBUG(errs() << "\nLSR on \"" << L->getHeader()->getParent()->getName()
2526 << "\" ";
2527 L->dump());
2529 // Sort the StrideOrder so we process larger strides first.
2530 std::stable_sort(IU->StrideOrder.begin(), IU->StrideOrder.end(),
2531 StrideCompare(SE));
2533 // Optimize induction variables. Some indvar uses can be transformed to use
2534 // strides that will be needed for other purposes. A common example of this
2535 // is the exit test for the loop, which can often be rewritten to use the
2536 // computation of some other indvar to decide when to terminate the loop.
2537 OptimizeIndvars(L);
2539 // Change loop terminating condition to use the postinc iv when possible
2540 // and optimize loop terminating compare. FIXME: Move this after
2541 // StrengthReduceStridedIVUsers?
2542 OptimizeLoopTermCond(L);
2544 // FIXME: We can shrink overlarge IV's here. e.g. if the code has
2545 // computation in i64 values and the target doesn't support i64, demote
2546 // the computation to 32-bit if safe.
2548 // FIXME: Attempt to reuse values across multiple IV's. In particular, we
2549 // could have something like "for(i) { foo(i*8); bar(i*16) }", which should
2550 // be codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC.
2551 // Need to be careful that IV's are all the same type. Only works for
2552 // intptr_t indvars.
2554 // IVsByStride keeps IVs for one particular loop.
2555 assert(IVsByStride.empty() && "Stale entries in IVsByStride?");
2557 // Note: this processes each stride/type pair individually. All users
2558 // passed into StrengthReduceStridedIVUsers have the same type AND stride.
2559 // Also, note that we iterate over IVUsesByStride indirectly by using
2560 // StrideOrder. This extra layer of indirection makes the ordering of
2561 // strides deterministic - not dependent on map order.
2562 for (unsigned Stride = 0, e = IU->StrideOrder.size();
2563 Stride != e; ++Stride) {
2564 std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
2565 IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
2566 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
2567 // FIXME: Generalize to non-affine IV's.
2568 if (!SI->first->isLoopInvariant(L))
2569 continue;
2570 StrengthReduceStridedIVUsers(SI->first, *SI->second, L);
2574 // After all sharing is done, see if we can adjust the loop to test against
2575 // zero instead of counting up to a maximum. This is usually faster.
2576 OptimizeLoopCountIV(L);
2578 // We're done analyzing this loop; release all the state we built up for it.
2579 IVsByStride.clear();
2580 StrideNoReuse.clear();
2582 // Clean up after ourselves
2583 if (!DeadInsts.empty())
2584 DeleteTriviallyDeadInstructions();
2586 // At this point, it is worth checking to see if any recurrence PHIs are also
2587 // dead, so that we can remove them as well.
2588 DeleteDeadPHIs(L->getHeader());
2590 return Changed;