1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs a strength reduction on array references inside loops that
11 // have as one or more of their components the loop induction variable.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "loop-reduce"
16 #include "llvm/Transforms/Scalar.h"
17 #include "llvm/Constants.h"
18 #include "llvm/Instructions.h"
19 #include "llvm/IntrinsicInst.h"
20 #include "llvm/Type.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Analysis/Dominators.h"
23 #include "llvm/Analysis/LoopInfo.h"
24 #include "llvm/Analysis/LoopPass.h"
25 #include "llvm/Analysis/ScalarEvolutionExpander.h"
26 #include "llvm/Transforms/Utils/AddrModeMatcher.h"
27 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
28 #include "llvm/Transforms/Utils/Local.h"
29 #include "llvm/ADT/SmallPtrSet.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/Support/CFG.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/Compiler.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/ValueHandle.h"
36 #include "llvm/Target/TargetLowering.h"
40 STATISTIC(NumReduced
, "Number of IV uses strength reduced");
41 STATISTIC(NumInserted
, "Number of PHIs inserted");
42 STATISTIC(NumVariable
, "Number of PHIs with variable strides");
43 STATISTIC(NumEliminated
, "Number of strides eliminated");
44 STATISTIC(NumShadow
, "Number of Shadow IVs optimized");
45 STATISTIC(NumImmSunk
, "Number of common expr immediates sunk into uses");
47 static cl::opt
<bool> EnableFullLSRMode("enable-full-lsr",
55 /// IVStrideUse - Keep track of one use of a strided induction variable, where
56 /// the stride is stored externally. The Offset member keeps track of the
57 /// offset from the IV, User is the actual user of the operand, and
58 /// 'OperandValToReplace' is the operand of the User that is the use.
59 struct VISIBILITY_HIDDEN IVStrideUse
{
62 Value
*OperandValToReplace
;
64 // isUseOfPostIncrementedValue - True if this should use the
65 // post-incremented version of this IV, not the preincremented version.
66 // This can only be set in special cases, such as the terminating setcc
67 // instruction for a loop or uses dominated by the loop.
68 bool isUseOfPostIncrementedValue
;
70 IVStrideUse(const SCEVHandle
&Offs
, Instruction
*U
, Value
*O
)
71 : Offset(Offs
), User(U
), OperandValToReplace(O
),
72 isUseOfPostIncrementedValue(false) {}
75 /// IVUsersOfOneStride - This structure keeps track of all instructions that
76 /// have an operand that is based on the trip count multiplied by some stride.
77 /// The stride for all of these users is common and kept external to this
79 struct VISIBILITY_HIDDEN IVUsersOfOneStride
{
80 /// Users - Keep track of all of the users of this stride as well as the
81 /// initial value and the operand that uses the IV.
82 std::vector
<IVStrideUse
> Users
;
84 void addUser(const SCEVHandle
&Offset
,Instruction
*User
, Value
*Operand
) {
85 Users
.push_back(IVStrideUse(Offset
, User
, Operand
));
89 /// IVInfo - This structure keeps track of one IV expression inserted during
90 /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as
91 /// well as the PHI node and increment value created for rewrite.
92 struct VISIBILITY_HIDDEN IVExpr
{
97 IVExpr(const SCEVHandle
&stride
, const SCEVHandle
&base
, PHINode
*phi
)
98 : Stride(stride
), Base(base
), PHI(phi
) {}
101 /// IVsOfOneStride - This structure keeps track of all IV expression inserted
102 /// during StrengthReduceStridedIVUsers for a particular stride of the IV.
103 struct VISIBILITY_HIDDEN IVsOfOneStride
{
104 std::vector
<IVExpr
> IVs
;
106 void addIV(const SCEVHandle
&Stride
, const SCEVHandle
&Base
, PHINode
*PHI
) {
107 IVs
.push_back(IVExpr(Stride
, Base
, PHI
));
111 class VISIBILITY_HIDDEN LoopStrengthReduce
: public LoopPass
{
117 /// IVUsesByStride - Keep track of all uses of induction variables that we
118 /// are interested in. The key of the map is the stride of the access.
119 std::map
<SCEVHandle
, IVUsersOfOneStride
> IVUsesByStride
;
121 /// IVsByStride - Keep track of all IVs that have been inserted for a
122 /// particular stride.
123 std::map
<SCEVHandle
, IVsOfOneStride
> IVsByStride
;
125 /// StrideOrder - An ordering of the keys in IVUsesByStride that is stable:
126 /// We use this to iterate over the IVUsesByStride collection without being
127 /// dependent on random ordering of pointers in the process.
128 SmallVector
<SCEVHandle
, 16> StrideOrder
;
130 /// DeadInsts - Keep track of instructions we may have made dead, so that
131 /// we can remove them after we are done working.
132 SmallVector
<Instruction
*, 16> DeadInsts
;
134 /// TLI - Keep a pointer of a TargetLowering to consult for determining
135 /// transformation profitability.
136 const TargetLowering
*TLI
;
139 static char ID
; // Pass ID, replacement for typeid
140 explicit LoopStrengthReduce(const TargetLowering
*tli
= NULL
) :
141 LoopPass(&ID
), TLI(tli
) {
144 bool runOnLoop(Loop
*L
, LPPassManager
&LPM
);
146 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
147 // We split critical edges, so we change the CFG. However, we do update
148 // many analyses if they are around.
149 AU
.addPreservedID(LoopSimplifyID
);
150 AU
.addPreserved
<LoopInfo
>();
151 AU
.addPreserved
<DominanceFrontier
>();
152 AU
.addPreserved
<DominatorTree
>();
154 AU
.addRequiredID(LoopSimplifyID
);
155 AU
.addRequired
<LoopInfo
>();
156 AU
.addRequired
<DominatorTree
>();
157 AU
.addRequired
<ScalarEvolution
>();
158 AU
.addPreserved
<ScalarEvolution
>();
162 bool AddUsersIfInteresting(Instruction
*I
, Loop
*L
,
163 SmallPtrSet
<Instruction
*,16> &Processed
);
164 ICmpInst
*ChangeCompareStride(Loop
*L
, ICmpInst
*Cond
,
165 IVStrideUse
* &CondUse
,
166 const SCEVHandle
* &CondStride
);
167 void OptimizeIndvars(Loop
*L
);
169 /// OptimizeShadowIV - If IV is used in a int-to-float cast
170 /// inside the loop then try to eliminate the cast opeation.
171 void OptimizeShadowIV(Loop
*L
);
173 /// OptimizeSMax - Rewrite the loop's terminating condition
174 /// if it uses an smax computation.
175 ICmpInst
*OptimizeSMax(Loop
*L
, ICmpInst
*Cond
,
176 IVStrideUse
* &CondUse
);
178 bool FindIVUserForCond(ICmpInst
*Cond
, IVStrideUse
*&CondUse
,
179 const SCEVHandle
*&CondStride
);
180 bool RequiresTypeConversion(const Type
*Ty
, const Type
*NewTy
);
181 SCEVHandle
CheckForIVReuse(bool, bool, bool, const SCEVHandle
&,
182 IVExpr
&, const Type
*,
183 const std::vector
<BasedUser
>& UsersToProcess
);
184 bool ValidStride(bool, int64_t,
185 const std::vector
<BasedUser
>& UsersToProcess
);
186 SCEVHandle
CollectIVUsers(const SCEVHandle
&Stride
,
187 IVUsersOfOneStride
&Uses
,
189 bool &AllUsesAreAddresses
,
190 bool &AllUsesAreOutsideLoop
,
191 std::vector
<BasedUser
> &UsersToProcess
);
192 bool ShouldUseFullStrengthReductionMode(
193 const std::vector
<BasedUser
> &UsersToProcess
,
195 bool AllUsesAreAddresses
,
197 void PrepareToStrengthReduceFully(
198 std::vector
<BasedUser
> &UsersToProcess
,
200 SCEVHandle CommonExprs
,
202 SCEVExpander
&PreheaderRewriter
);
203 void PrepareToStrengthReduceFromSmallerStride(
204 std::vector
<BasedUser
> &UsersToProcess
,
206 const IVExpr
&ReuseIV
,
207 Instruction
*PreInsertPt
);
208 void PrepareToStrengthReduceWithNewPhi(
209 std::vector
<BasedUser
> &UsersToProcess
,
211 SCEVHandle CommonExprs
,
214 SCEVExpander
&PreheaderRewriter
);
215 void StrengthReduceStridedIVUsers(const SCEVHandle
&Stride
,
216 IVUsersOfOneStride
&Uses
,
218 void DeleteTriviallyDeadInstructions();
222 char LoopStrengthReduce::ID
= 0;
223 static RegisterPass
<LoopStrengthReduce
>
224 X("loop-reduce", "Loop Strength Reduction");
226 Pass
*llvm::createLoopStrengthReducePass(const TargetLowering
*TLI
) {
227 return new LoopStrengthReduce(TLI
);
230 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
231 /// specified set are trivially dead, delete them and see if this makes any of
232 /// their operands subsequently dead.
233 void LoopStrengthReduce::DeleteTriviallyDeadInstructions() {
234 if (DeadInsts
.empty()) return;
236 // Sort the deadinsts list so that we can trivially eliminate duplicates as we
237 // go. The code below never adds a non-dead instruction to the worklist, but
238 // callers may not be so careful.
239 array_pod_sort(DeadInsts
.begin(), DeadInsts
.end());
241 // Drop duplicate instructions and those with uses.
242 for (unsigned i
= 0, e
= DeadInsts
.size()-1; i
< e
; ++i
) {
243 Instruction
*I
= DeadInsts
[i
];
244 if (!I
->use_empty()) DeadInsts
[i
] = 0;
245 while (i
!= e
&& DeadInsts
[i
+1] == I
)
249 while (!DeadInsts
.empty()) {
250 Instruction
*I
= DeadInsts
.back();
251 DeadInsts
.pop_back();
253 if (I
== 0 || !isInstructionTriviallyDead(I
))
256 for (User::op_iterator OI
= I
->op_begin(), E
= I
->op_end(); OI
!= E
; ++OI
) {
257 if (Instruction
*U
= dyn_cast
<Instruction
>(*OI
)) {
260 DeadInsts
.push_back(U
);
264 I
->eraseFromParent();
269 /// containsAddRecFromDifferentLoop - Determine whether expression S involves a
270 /// subexpression that is an AddRec from a loop other than L. An outer loop
271 /// of L is OK, but not an inner loop nor a disjoint loop.
272 static bool containsAddRecFromDifferentLoop(SCEVHandle S
, Loop
*L
) {
273 // This is very common, put it first.
274 if (isa
<SCEVConstant
>(S
))
276 if (const SCEVCommutativeExpr
*AE
= dyn_cast
<SCEVCommutativeExpr
>(S
)) {
277 for (unsigned int i
=0; i
< AE
->getNumOperands(); i
++)
278 if (containsAddRecFromDifferentLoop(AE
->getOperand(i
), L
))
282 if (const SCEVAddRecExpr
*AE
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
283 if (const Loop
*newLoop
= AE
->getLoop()) {
286 // if newLoop is an outer loop of L, this is OK.
287 if (!LoopInfoBase
<BasicBlock
>::isNotAlreadyContainedIn(L
, newLoop
))
292 if (const SCEVUDivExpr
*DE
= dyn_cast
<SCEVUDivExpr
>(S
))
293 return containsAddRecFromDifferentLoop(DE
->getLHS(), L
) ||
294 containsAddRecFromDifferentLoop(DE
->getRHS(), L
);
296 // SCEVSDivExpr has been backed out temporarily, but will be back; we'll
297 // need this when it is.
298 if (const SCEVSDivExpr
*DE
= dyn_cast
<SCEVSDivExpr
>(S
))
299 return containsAddRecFromDifferentLoop(DE
->getLHS(), L
) ||
300 containsAddRecFromDifferentLoop(DE
->getRHS(), L
);
302 if (const SCEVCastExpr
*CE
= dyn_cast
<SCEVCastExpr
>(S
))
303 return containsAddRecFromDifferentLoop(CE
->getOperand(), L
);
307 /// getSCEVStartAndStride - Compute the start and stride of this expression,
308 /// returning false if the expression is not a start/stride pair, or true if it
309 /// is. The stride must be a loop invariant expression, but the start may be
310 /// a mix of loop invariant and loop variant expressions. The start cannot,
311 /// however, contain an AddRec from a different loop, unless that loop is an
312 /// outer loop of the current loop.
313 static bool getSCEVStartAndStride(const SCEVHandle
&SH
, Loop
*L
,
314 SCEVHandle
&Start
, SCEVHandle
&Stride
,
315 ScalarEvolution
*SE
, DominatorTree
*DT
) {
316 SCEVHandle TheAddRec
= Start
; // Initialize to zero.
318 // If the outer level is an AddExpr, the operands are all start values except
319 // for a nested AddRecExpr.
320 if (const SCEVAddExpr
*AE
= dyn_cast
<SCEVAddExpr
>(SH
)) {
321 for (unsigned i
= 0, e
= AE
->getNumOperands(); i
!= e
; ++i
)
322 if (const SCEVAddRecExpr
*AddRec
=
323 dyn_cast
<SCEVAddRecExpr
>(AE
->getOperand(i
))) {
324 if (AddRec
->getLoop() == L
)
325 TheAddRec
= SE
->getAddExpr(AddRec
, TheAddRec
);
327 return false; // Nested IV of some sort?
329 Start
= SE
->getAddExpr(Start
, AE
->getOperand(i
));
332 } else if (isa
<SCEVAddRecExpr
>(SH
)) {
335 return false; // not analyzable.
338 const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(TheAddRec
);
339 if (!AddRec
|| AddRec
->getLoop() != L
) return false;
341 // FIXME: Generalize to non-affine IV's.
342 if (!AddRec
->isAffine()) return false;
344 // If Start contains an SCEVAddRecExpr from a different loop, other than an
345 // outer loop of the current loop, reject it. SCEV has no concept of
346 // operating on more than one loop at a time so don't confuse it with such
348 if (containsAddRecFromDifferentLoop(AddRec
->getOperand(0), L
))
351 Start
= SE
->getAddExpr(Start
, AddRec
->getOperand(0));
353 if (!isa
<SCEVConstant
>(AddRec
->getOperand(1))) {
354 // If stride is an instruction, make sure it dominates the loop preheader.
355 // Otherwise we could end up with a use before def situation.
356 BasicBlock
*Preheader
= L
->getLoopPreheader();
357 if (!AddRec
->getOperand(1)->dominates(Preheader
, DT
))
360 DOUT
<< "[" << L
->getHeader()->getName()
361 << "] Variable stride: " << *AddRec
<< "\n";
364 Stride
= AddRec
->getOperand(1);
368 /// IVUseShouldUsePostIncValue - We have discovered a "User" of an IV expression
369 /// and now we need to decide whether the user should use the preinc or post-inc
370 /// value. If this user should use the post-inc version of the IV, return true.
372 /// Choosing wrong here can break dominance properties (if we choose to use the
373 /// post-inc value when we cannot) or it can end up adding extra live-ranges to
374 /// the loop, resulting in reg-reg copies (if we use the pre-inc value when we
375 /// should use the post-inc value).
376 static bool IVUseShouldUsePostIncValue(Instruction
*User
, Instruction
*IV
,
377 Loop
*L
, DominatorTree
*DT
, Pass
*P
,
378 SmallVectorImpl
<Instruction
*> &DeadInsts
){
379 // If the user is in the loop, use the preinc value.
380 if (L
->contains(User
->getParent())) return false;
382 BasicBlock
*LatchBlock
= L
->getLoopLatch();
384 // Ok, the user is outside of the loop. If it is dominated by the latch
385 // block, use the post-inc value.
386 if (DT
->dominates(LatchBlock
, User
->getParent()))
389 // There is one case we have to be careful of: PHI nodes. These little guys
390 // can live in blocks that do not dominate the latch block, but (since their
391 // uses occur in the predecessor block, not the block the PHI lives in) should
392 // still use the post-inc value. Check for this case now.
393 PHINode
*PN
= dyn_cast
<PHINode
>(User
);
394 if (!PN
) return false; // not a phi, not dominated by latch block.
396 // Look at all of the uses of IV by the PHI node. If any use corresponds to
397 // a block that is not dominated by the latch block, give up and use the
398 // preincremented value.
399 unsigned NumUses
= 0;
400 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
401 if (PN
->getIncomingValue(i
) == IV
) {
403 if (!DT
->dominates(LatchBlock
, PN
->getIncomingBlock(i
)))
407 // Okay, all uses of IV by PN are in predecessor blocks that really are
408 // dominated by the latch block. Use the post-incremented value.
412 /// isAddressUse - Returns true if the specified instruction is using the
413 /// specified value as an address.
414 static bool isAddressUse(Instruction
*Inst
, Value
*OperandVal
) {
415 bool isAddress
= isa
<LoadInst
>(Inst
);
416 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
417 if (SI
->getOperand(1) == OperandVal
)
419 } else if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
)) {
420 // Addressing modes can also be folded into prefetches and a variety
422 switch (II
->getIntrinsicID()) {
424 case Intrinsic::prefetch
:
425 case Intrinsic::x86_sse2_loadu_dq
:
426 case Intrinsic::x86_sse2_loadu_pd
:
427 case Intrinsic::x86_sse_loadu_ps
:
428 case Intrinsic::x86_sse_storeu_ps
:
429 case Intrinsic::x86_sse2_storeu_pd
:
430 case Intrinsic::x86_sse2_storeu_dq
:
431 case Intrinsic::x86_sse2_storel_dq
:
432 if (II
->getOperand(1) == OperandVal
)
440 /// getAccessType - Return the type of the memory being accessed.
441 static const Type
*getAccessType(const Instruction
*Inst
) {
442 const Type
*UseTy
= Inst
->getType();
443 if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
))
444 UseTy
= SI
->getOperand(0)->getType();
445 else if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
)) {
446 // Addressing modes can also be folded into prefetches and a variety
448 switch (II
->getIntrinsicID()) {
450 case Intrinsic::x86_sse_storeu_ps
:
451 case Intrinsic::x86_sse2_storeu_pd
:
452 case Intrinsic::x86_sse2_storeu_dq
:
453 case Intrinsic::x86_sse2_storel_dq
:
454 UseTy
= II
->getOperand(1)->getType();
461 /// AddUsersIfInteresting - Inspect the specified instruction. If it is a
462 /// reducible SCEV, recursively add its users to the IVUsesByStride set and
463 /// return true. Otherwise, return false.
464 bool LoopStrengthReduce::AddUsersIfInteresting(Instruction
*I
, Loop
*L
,
465 SmallPtrSet
<Instruction
*,16> &Processed
) {
466 if (!SE
->isSCEVable(I
->getType()))
467 return false; // Void and FP expressions cannot be reduced.
469 // LSR is not APInt clean, do not touch integers bigger than 64-bits.
470 if (SE
->getTypeSizeInBits(I
->getType()) > 64)
473 if (!Processed
.insert(I
))
474 return true; // Instruction already handled.
476 // Get the symbolic expression for this instruction.
477 SCEVHandle ISE
= SE
->getSCEV(I
);
478 if (isa
<SCEVCouldNotCompute
>(ISE
)) return false;
480 // Get the start and stride for this expression.
481 SCEVHandle Start
= SE
->getIntegerSCEV(0, ISE
->getType());
482 SCEVHandle Stride
= Start
;
483 if (!getSCEVStartAndStride(ISE
, L
, Start
, Stride
, SE
, DT
))
484 return false; // Non-reducible symbolic expression, bail out.
486 std::vector
<Instruction
*> IUsers
;
487 // Collect all I uses now because IVUseShouldUsePostIncValue may
488 // invalidate use_iterator.
489 for (Value::use_iterator UI
= I
->use_begin(), E
= I
->use_end(); UI
!= E
; ++UI
)
490 IUsers
.push_back(cast
<Instruction
>(*UI
));
492 for (unsigned iused_index
= 0, iused_size
= IUsers
.size();
493 iused_index
!= iused_size
; ++iused_index
) {
495 Instruction
*User
= IUsers
[iused_index
];
497 // Do not infinitely recurse on PHI nodes.
498 if (isa
<PHINode
>(User
) && Processed
.count(User
))
501 // Descend recursively, but not into PHI nodes outside the current loop.
502 // It's important to see the entire expression outside the loop to get
503 // choices that depend on addressing mode use right, although we won't
504 // consider references ouside the loop in all cases.
505 // If User is already in Processed, we don't want to recurse into it again,
506 // but do want to record a second reference in the same instruction.
507 bool AddUserToIVUsers
= false;
508 if (LI
->getLoopFor(User
->getParent()) != L
) {
509 if (isa
<PHINode
>(User
) || Processed
.count(User
) ||
510 !AddUsersIfInteresting(User
, L
, Processed
)) {
511 DOUT
<< "FOUND USER in other loop: " << *User
512 << " OF SCEV: " << *ISE
<< "\n";
513 AddUserToIVUsers
= true;
515 } else if (Processed
.count(User
) ||
516 !AddUsersIfInteresting(User
, L
, Processed
)) {
517 DOUT
<< "FOUND USER: " << *User
518 << " OF SCEV: " << *ISE
<< "\n";
519 AddUserToIVUsers
= true;
522 if (AddUserToIVUsers
) {
523 IVUsersOfOneStride
&StrideUses
= IVUsesByStride
[Stride
];
524 if (StrideUses
.Users
.empty()) // First occurrence of this stride?
525 StrideOrder
.push_back(Stride
);
527 // Okay, we found a user that we cannot reduce. Analyze the instruction
528 // and decide what to do with it. If we are a use inside of the loop, use
529 // the value before incrementation, otherwise use it after incrementation.
530 if (IVUseShouldUsePostIncValue(User
, I
, L
, DT
, this, DeadInsts
)) {
531 // The value used will be incremented by the stride more than we are
532 // expecting, so subtract this off.
533 SCEVHandle NewStart
= SE
->getMinusSCEV(Start
, Stride
);
534 StrideUses
.addUser(NewStart
, User
, I
);
535 StrideUses
.Users
.back().isUseOfPostIncrementedValue
= true;
536 DOUT
<< " USING POSTINC SCEV, START=" << *NewStart
<< "\n";
538 StrideUses
.addUser(Start
, User
, I
);
546 /// BasedUser - For a particular base value, keep information about how we've
547 /// partitioned the expression so far.
549 /// SE - The current ScalarEvolution object.
552 /// Base - The Base value for the PHI node that needs to be inserted for
553 /// this use. As the use is processed, information gets moved from this
554 /// field to the Imm field (below). BasedUser values are sorted by this
558 /// Inst - The instruction using the induction variable.
561 /// OperandValToReplace - The operand value of Inst to replace with the
563 Value
*OperandValToReplace
;
565 /// Imm - The immediate value that should be added to the base immediately
566 /// before Inst, because it will be folded into the imm field of the
567 /// instruction. This is also sometimes used for loop-variant values that
568 /// must be added inside the loop.
571 /// Phi - The induction variable that performs the striding that
572 /// should be used for this user.
575 // isUseOfPostIncrementedValue - True if this should use the
576 // post-incremented version of this IV, not the preincremented version.
577 // This can only be set in special cases, such as the terminating setcc
578 // instruction for a loop and uses outside the loop that are dominated by
580 bool isUseOfPostIncrementedValue
;
582 BasedUser(IVStrideUse
&IVSU
, ScalarEvolution
*se
)
583 : SE(se
), Base(IVSU
.Offset
), Inst(IVSU
.User
),
584 OperandValToReplace(IVSU
.OperandValToReplace
),
585 Imm(SE
->getIntegerSCEV(0, Base
->getType())),
586 isUseOfPostIncrementedValue(IVSU
.isUseOfPostIncrementedValue
) {}
588 // Once we rewrite the code to insert the new IVs we want, update the
589 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
591 void RewriteInstructionToUseNewBase(const SCEVHandle
&NewBase
,
592 Instruction
*InsertPt
,
593 SCEVExpander
&Rewriter
, Loop
*L
, Pass
*P
,
594 SmallVectorImpl
<Instruction
*> &DeadInsts
);
596 Value
*InsertCodeForBaseAtPosition(const SCEVHandle
&NewBase
,
598 SCEVExpander
&Rewriter
,
599 Instruction
*IP
, Loop
*L
);
604 void BasedUser::dump() const {
605 cerr
<< " Base=" << *Base
;
606 cerr
<< " Imm=" << *Imm
;
607 cerr
<< " Inst: " << *Inst
;
610 Value
*BasedUser::InsertCodeForBaseAtPosition(const SCEVHandle
&NewBase
,
612 SCEVExpander
&Rewriter
,
613 Instruction
*IP
, Loop
*L
) {
614 // Figure out where we *really* want to insert this code. In particular, if
615 // the user is inside of a loop that is nested inside of L, we really don't
616 // want to insert this expression before the user, we'd rather pull it out as
617 // many loops as possible.
618 LoopInfo
&LI
= Rewriter
.getLoopInfo();
619 Instruction
*BaseInsertPt
= IP
;
621 // Figure out the most-nested loop that IP is in.
622 Loop
*InsertLoop
= LI
.getLoopFor(IP
->getParent());
624 // If InsertLoop is not L, and InsertLoop is nested inside of L, figure out
625 // the preheader of the outer-most loop where NewBase is not loop invariant.
626 if (L
->contains(IP
->getParent()))
627 while (InsertLoop
&& NewBase
->isLoopInvariant(InsertLoop
)) {
628 BaseInsertPt
= InsertLoop
->getLoopPreheader()->getTerminator();
629 InsertLoop
= InsertLoop
->getParentLoop();
632 Value
*Base
= Rewriter
.expandCodeFor(NewBase
, Ty
, BaseInsertPt
);
634 // If there is no immediate value, skip the next part.
638 // If we are inserting the base and imm values in the same block, make sure to
639 // adjust the IP position if insertion reused a result.
640 if (IP
== BaseInsertPt
)
641 IP
= Rewriter
.getInsertionPoint();
643 // Always emit the immediate (if non-zero) into the same block as the user.
644 SCEVHandle NewValSCEV
= SE
->getAddExpr(SE
->getUnknown(Base
), Imm
);
645 return Rewriter
.expandCodeFor(NewValSCEV
, Ty
, IP
);
649 // Once we rewrite the code to insert the new IVs we want, update the
650 // operands of Inst to use the new expression 'NewBase', with 'Imm' added
651 // to it. NewBasePt is the last instruction which contributes to the
652 // value of NewBase in the case that it's a diffferent instruction from
653 // the PHI that NewBase is computed from, or null otherwise.
655 void BasedUser::RewriteInstructionToUseNewBase(const SCEVHandle
&NewBase
,
656 Instruction
*NewBasePt
,
657 SCEVExpander
&Rewriter
, Loop
*L
, Pass
*P
,
658 SmallVectorImpl
<Instruction
*> &DeadInsts
){
659 if (!isa
<PHINode
>(Inst
)) {
660 // By default, insert code at the user instruction.
661 BasicBlock::iterator InsertPt
= Inst
;
663 // However, if the Operand is itself an instruction, the (potentially
664 // complex) inserted code may be shared by many users. Because of this, we
665 // want to emit code for the computation of the operand right before its old
666 // computation. This is usually safe, because we obviously used to use the
667 // computation when it was computed in its current block. However, in some
668 // cases (e.g. use of a post-incremented induction variable) the NewBase
669 // value will be pinned to live somewhere after the original computation.
670 // In this case, we have to back off.
672 // If this is a use outside the loop (which means after, since it is based
673 // on a loop indvar) we use the post-incremented value, so that we don't
674 // artificially make the preinc value live out the bottom of the loop.
675 if (!isUseOfPostIncrementedValue
&& L
->contains(Inst
->getParent())) {
676 if (NewBasePt
&& isa
<PHINode
>(OperandValToReplace
)) {
677 InsertPt
= NewBasePt
;
679 } else if (Instruction
*OpInst
680 = dyn_cast
<Instruction
>(OperandValToReplace
)) {
682 while (isa
<PHINode
>(InsertPt
)) ++InsertPt
;
685 Value
*NewVal
= InsertCodeForBaseAtPosition(NewBase
,
686 OperandValToReplace
->getType(),
687 Rewriter
, InsertPt
, L
);
688 // Replace the use of the operand Value with the new Phi we just created.
689 Inst
->replaceUsesOfWith(OperandValToReplace
, NewVal
);
691 DOUT
<< " Replacing with ";
692 DEBUG(WriteAsOperand(*DOUT
, NewVal
, /*PrintType=*/false));
693 DOUT
<< ", which has value " << *NewBase
<< " plus IMM " << *Imm
<< "\n";
697 // PHI nodes are more complex. We have to insert one copy of the NewBase+Imm
698 // expression into each operand block that uses it. Note that PHI nodes can
699 // have multiple entries for the same predecessor. We use a map to make sure
700 // that a PHI node only has a single Value* for each predecessor (which also
701 // prevents us from inserting duplicate code in some blocks).
702 DenseMap
<BasicBlock
*, Value
*> InsertedCode
;
703 PHINode
*PN
= cast
<PHINode
>(Inst
);
704 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
705 if (PN
->getIncomingValue(i
) == OperandValToReplace
) {
706 // If the original expression is outside the loop, put the replacement
707 // code in the same place as the original expression,
708 // which need not be an immediate predecessor of this PHI. This way we
709 // need only one copy of it even if it is referenced multiple times in
710 // the PHI. We don't do this when the original expression is inside the
711 // loop because multiple copies sometimes do useful sinking of code in
713 Instruction
*OldLoc
= dyn_cast
<Instruction
>(OperandValToReplace
);
714 if (L
->contains(OldLoc
->getParent())) {
715 // If this is a critical edge, split the edge so that we do not insert
716 // the code on all predecessor/successor paths. We do this unless this
717 // is the canonical backedge for this loop, as this can make some
718 // inserted code be in an illegal position.
719 BasicBlock
*PHIPred
= PN
->getIncomingBlock(i
);
720 if (e
!= 1 && PHIPred
->getTerminator()->getNumSuccessors() > 1 &&
721 (PN
->getParent() != L
->getHeader() || !L
->contains(PHIPred
))) {
723 // First step, split the critical edge.
724 SplitCriticalEdge(PHIPred
, PN
->getParent(), P
, false);
726 // Next step: move the basic block. In particular, if the PHI node
727 // is outside of the loop, and PredTI is in the loop, we want to
728 // move the block to be immediately before the PHI block, not
729 // immediately after PredTI.
730 if (L
->contains(PHIPred
) && !L
->contains(PN
->getParent())) {
731 BasicBlock
*NewBB
= PN
->getIncomingBlock(i
);
732 NewBB
->moveBefore(PN
->getParent());
735 // Splitting the edge can reduce the number of PHI entries we have.
736 e
= PN
->getNumIncomingValues();
739 Value
*&Code
= InsertedCode
[PN
->getIncomingBlock(i
)];
741 // Insert the code into the end of the predecessor block.
742 Instruction
*InsertPt
= (L
->contains(OldLoc
->getParent())) ?
743 PN
->getIncomingBlock(i
)->getTerminator() :
744 OldLoc
->getParent()->getTerminator();
745 Code
= InsertCodeForBaseAtPosition(NewBase
, PN
->getType(),
746 Rewriter
, InsertPt
, L
);
748 DOUT
<< " Changing PHI use to ";
749 DEBUG(WriteAsOperand(*DOUT
, Code
, /*PrintType=*/false));
750 DOUT
<< ", which has value " << *NewBase
<< " plus IMM " << *Imm
<< "\n";
753 // Replace the use of the operand Value with the new Phi we just created.
754 PN
->setIncomingValue(i
, Code
);
759 // PHI node might have become a constant value after SplitCriticalEdge.
760 DeadInsts
.push_back(Inst
);
764 /// fitsInAddressMode - Return true if V can be subsumed within an addressing
765 /// mode, and does not need to be put in a register first.
766 static bool fitsInAddressMode(const SCEVHandle
&V
, const Type
*UseTy
,
767 const TargetLowering
*TLI
, bool HasBaseReg
) {
768 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(V
)) {
769 int64_t VC
= SC
->getValue()->getSExtValue();
771 TargetLowering::AddrMode AM
;
773 AM
.HasBaseReg
= HasBaseReg
;
774 return TLI
->isLegalAddressingMode(AM
, UseTy
);
776 // Defaults to PPC. PPC allows a sign-extended 16-bit immediate field.
777 return (VC
> -(1 << 16) && VC
< (1 << 16)-1);
781 if (const SCEVUnknown
*SU
= dyn_cast
<SCEVUnknown
>(V
))
782 if (GlobalValue
*GV
= dyn_cast
<GlobalValue
>(SU
->getValue())) {
784 TargetLowering::AddrMode AM
;
786 AM
.HasBaseReg
= HasBaseReg
;
787 return TLI
->isLegalAddressingMode(AM
, UseTy
);
789 // Default: assume global addresses are not legal.
796 /// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are
797 /// loop varying to the Imm operand.
798 static void MoveLoopVariantsToImmediateField(SCEVHandle
&Val
, SCEVHandle
&Imm
,
799 Loop
*L
, ScalarEvolution
*SE
) {
800 if (Val
->isLoopInvariant(L
)) return; // Nothing to do.
802 if (const SCEVAddExpr
*SAE
= dyn_cast
<SCEVAddExpr
>(Val
)) {
803 std::vector
<SCEVHandle
> NewOps
;
804 NewOps
.reserve(SAE
->getNumOperands());
806 for (unsigned i
= 0; i
!= SAE
->getNumOperands(); ++i
)
807 if (!SAE
->getOperand(i
)->isLoopInvariant(L
)) {
808 // If this is a loop-variant expression, it must stay in the immediate
809 // field of the expression.
810 Imm
= SE
->getAddExpr(Imm
, SAE
->getOperand(i
));
812 NewOps
.push_back(SAE
->getOperand(i
));
816 Val
= SE
->getIntegerSCEV(0, Val
->getType());
818 Val
= SE
->getAddExpr(NewOps
);
819 } else if (const SCEVAddRecExpr
*SARE
= dyn_cast
<SCEVAddRecExpr
>(Val
)) {
820 // Try to pull immediates out of the start value of nested addrec's.
821 SCEVHandle Start
= SARE
->getStart();
822 MoveLoopVariantsToImmediateField(Start
, Imm
, L
, SE
);
824 std::vector
<SCEVHandle
> Ops(SARE
->op_begin(), SARE
->op_end());
826 Val
= SE
->getAddRecExpr(Ops
, SARE
->getLoop());
828 // Otherwise, all of Val is variant, move the whole thing over.
829 Imm
= SE
->getAddExpr(Imm
, Val
);
830 Val
= SE
->getIntegerSCEV(0, Val
->getType());
835 /// MoveImmediateValues - Look at Val, and pull out any additions of constants
836 /// that can fit into the immediate field of instructions in the target.
837 /// Accumulate these immediate values into the Imm value.
838 static void MoveImmediateValues(const TargetLowering
*TLI
,
840 SCEVHandle
&Val
, SCEVHandle
&Imm
,
841 bool isAddress
, Loop
*L
,
842 ScalarEvolution
*SE
) {
843 if (const SCEVAddExpr
*SAE
= dyn_cast
<SCEVAddExpr
>(Val
)) {
844 std::vector
<SCEVHandle
> NewOps
;
845 NewOps
.reserve(SAE
->getNumOperands());
847 for (unsigned i
= 0; i
!= SAE
->getNumOperands(); ++i
) {
848 SCEVHandle NewOp
= SAE
->getOperand(i
);
849 MoveImmediateValues(TLI
, UseTy
, NewOp
, Imm
, isAddress
, L
, SE
);
851 if (!NewOp
->isLoopInvariant(L
)) {
852 // If this is a loop-variant expression, it must stay in the immediate
853 // field of the expression.
854 Imm
= SE
->getAddExpr(Imm
, NewOp
);
856 NewOps
.push_back(NewOp
);
861 Val
= SE
->getIntegerSCEV(0, Val
->getType());
863 Val
= SE
->getAddExpr(NewOps
);
865 } else if (const SCEVAddRecExpr
*SARE
= dyn_cast
<SCEVAddRecExpr
>(Val
)) {
866 // Try to pull immediates out of the start value of nested addrec's.
867 SCEVHandle Start
= SARE
->getStart();
868 MoveImmediateValues(TLI
, UseTy
, Start
, Imm
, isAddress
, L
, SE
);
870 if (Start
!= SARE
->getStart()) {
871 std::vector
<SCEVHandle
> Ops(SARE
->op_begin(), SARE
->op_end());
873 Val
= SE
->getAddRecExpr(Ops
, SARE
->getLoop());
876 } else if (const SCEVMulExpr
*SME
= dyn_cast
<SCEVMulExpr
>(Val
)) {
877 // Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field.
878 if (isAddress
&& fitsInAddressMode(SME
->getOperand(0), UseTy
, TLI
, false) &&
879 SME
->getNumOperands() == 2 && SME
->isLoopInvariant(L
)) {
881 SCEVHandle SubImm
= SE
->getIntegerSCEV(0, Val
->getType());
882 SCEVHandle NewOp
= SME
->getOperand(1);
883 MoveImmediateValues(TLI
, UseTy
, NewOp
, SubImm
, isAddress
, L
, SE
);
885 // If we extracted something out of the subexpressions, see if we can
887 if (NewOp
!= SME
->getOperand(1)) {
888 // Scale SubImm up by "8". If the result is a target constant, we are
890 SubImm
= SE
->getMulExpr(SubImm
, SME
->getOperand(0));
891 if (fitsInAddressMode(SubImm
, UseTy
, TLI
, false)) {
892 // Accumulate the immediate.
893 Imm
= SE
->getAddExpr(Imm
, SubImm
);
895 // Update what is left of 'Val'.
896 Val
= SE
->getMulExpr(SME
->getOperand(0), NewOp
);
903 // Loop-variant expressions must stay in the immediate field of the
905 if ((isAddress
&& fitsInAddressMode(Val
, UseTy
, TLI
, false)) ||
906 !Val
->isLoopInvariant(L
)) {
907 Imm
= SE
->getAddExpr(Imm
, Val
);
908 Val
= SE
->getIntegerSCEV(0, Val
->getType());
912 // Otherwise, no immediates to move.
915 static void MoveImmediateValues(const TargetLowering
*TLI
,
917 SCEVHandle
&Val
, SCEVHandle
&Imm
,
918 bool isAddress
, Loop
*L
,
919 ScalarEvolution
*SE
) {
920 const Type
*UseTy
= getAccessType(User
);
921 MoveImmediateValues(TLI
, UseTy
, Val
, Imm
, isAddress
, L
, SE
);
924 /// SeparateSubExprs - Decompose Expr into all of the subexpressions that are
925 /// added together. This is used to reassociate common addition subexprs
926 /// together for maximal sharing when rewriting bases.
927 static void SeparateSubExprs(std::vector
<SCEVHandle
> &SubExprs
,
929 ScalarEvolution
*SE
) {
930 if (const SCEVAddExpr
*AE
= dyn_cast
<SCEVAddExpr
>(Expr
)) {
931 for (unsigned j
= 0, e
= AE
->getNumOperands(); j
!= e
; ++j
)
932 SeparateSubExprs(SubExprs
, AE
->getOperand(j
), SE
);
933 } else if (const SCEVAddRecExpr
*SARE
= dyn_cast
<SCEVAddRecExpr
>(Expr
)) {
934 SCEVHandle Zero
= SE
->getIntegerSCEV(0, Expr
->getType());
935 if (SARE
->getOperand(0) == Zero
) {
936 SubExprs
.push_back(Expr
);
938 // Compute the addrec with zero as its base.
939 std::vector
<SCEVHandle
> Ops(SARE
->op_begin(), SARE
->op_end());
940 Ops
[0] = Zero
; // Start with zero base.
941 SubExprs
.push_back(SE
->getAddRecExpr(Ops
, SARE
->getLoop()));
944 SeparateSubExprs(SubExprs
, SARE
->getOperand(0), SE
);
946 } else if (!Expr
->isZero()) {
948 SubExprs
.push_back(Expr
);
952 // This is logically local to the following function, but C++ says we have
953 // to make it file scope.
954 struct SubExprUseData
{ unsigned Count
; bool notAllUsesAreFree
; };
956 /// RemoveCommonExpressionsFromUseBases - Look through all of the Bases of all
957 /// the Uses, removing any common subexpressions, except that if all such
958 /// subexpressions can be folded into an addressing mode for all uses inside
959 /// the loop (this case is referred to as "free" in comments herein) we do
960 /// not remove anything. This looks for things like (a+b+c) and
961 /// (a+c+d) and computes the common (a+c) subexpression. The common expression
962 /// is *removed* from the Bases and returned.
964 RemoveCommonExpressionsFromUseBases(std::vector
<BasedUser
> &Uses
,
965 ScalarEvolution
*SE
, Loop
*L
,
966 const TargetLowering
*TLI
) {
967 unsigned NumUses
= Uses
.size();
969 // Only one use? This is a very common case, so we handle it specially and
971 SCEVHandle Zero
= SE
->getIntegerSCEV(0, Uses
[0].Base
->getType());
972 SCEVHandle Result
= Zero
;
973 SCEVHandle FreeResult
= Zero
;
975 // If the use is inside the loop, use its base, regardless of what it is:
976 // it is clearly shared across all the IV's. If the use is outside the loop
977 // (which means after it) we don't want to factor anything *into* the loop,
978 // so just use 0 as the base.
979 if (L
->contains(Uses
[0].Inst
->getParent()))
980 std::swap(Result
, Uses
[0].Base
);
984 // To find common subexpressions, count how many of Uses use each expression.
985 // If any subexpressions are used Uses.size() times, they are common.
986 // Also track whether all uses of each expression can be moved into an
987 // an addressing mode "for free"; such expressions are left within the loop.
988 // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
989 std::map
<SCEVHandle
, SubExprUseData
> SubExpressionUseData
;
991 // UniqueSubExprs - Keep track of all of the subexpressions we see in the
992 // order we see them.
993 std::vector
<SCEVHandle
> UniqueSubExprs
;
995 std::vector
<SCEVHandle
> SubExprs
;
996 unsigned NumUsesInsideLoop
= 0;
997 for (unsigned i
= 0; i
!= NumUses
; ++i
) {
998 // If the user is outside the loop, just ignore it for base computation.
999 // Since the user is outside the loop, it must be *after* the loop (if it
1000 // were before, it could not be based on the loop IV). We don't want users
1001 // after the loop to affect base computation of values *inside* the loop,
1002 // because we can always add their offsets to the result IV after the loop
1003 // is done, ensuring we get good code inside the loop.
1004 if (!L
->contains(Uses
[i
].Inst
->getParent()))
1006 NumUsesInsideLoop
++;
1008 // If the base is zero (which is common), return zero now, there are no
1009 // CSEs we can find.
1010 if (Uses
[i
].Base
== Zero
) return Zero
;
1012 // If this use is as an address we may be able to put CSEs in the addressing
1013 // mode rather than hoisting them.
1014 bool isAddrUse
= isAddressUse(Uses
[i
].Inst
, Uses
[i
].OperandValToReplace
);
1015 // We may need the UseTy below, but only when isAddrUse, so compute it
1016 // only in that case.
1017 const Type
*UseTy
= 0;
1019 UseTy
= getAccessType(Uses
[i
].Inst
);
1021 // Split the expression into subexprs.
1022 SeparateSubExprs(SubExprs
, Uses
[i
].Base
, SE
);
1023 // Add one to SubExpressionUseData.Count for each subexpr present, and
1024 // if the subexpr is not a valid immediate within an addressing mode use,
1025 // set SubExpressionUseData.notAllUsesAreFree. We definitely want to
1026 // hoist these out of the loop (if they are common to all uses).
1027 for (unsigned j
= 0, e
= SubExprs
.size(); j
!= e
; ++j
) {
1028 if (++SubExpressionUseData
[SubExprs
[j
]].Count
== 1)
1029 UniqueSubExprs
.push_back(SubExprs
[j
]);
1030 if (!isAddrUse
|| !fitsInAddressMode(SubExprs
[j
], UseTy
, TLI
, false))
1031 SubExpressionUseData
[SubExprs
[j
]].notAllUsesAreFree
= true;
1036 // Now that we know how many times each is used, build Result. Iterate over
1037 // UniqueSubexprs so that we have a stable ordering.
1038 for (unsigned i
= 0, e
= UniqueSubExprs
.size(); i
!= e
; ++i
) {
1039 std::map
<SCEVHandle
, SubExprUseData
>::iterator I
=
1040 SubExpressionUseData
.find(UniqueSubExprs
[i
]);
1041 assert(I
!= SubExpressionUseData
.end() && "Entry not found?");
1042 if (I
->second
.Count
== NumUsesInsideLoop
) { // Found CSE!
1043 if (I
->second
.notAllUsesAreFree
)
1044 Result
= SE
->getAddExpr(Result
, I
->first
);
1046 FreeResult
= SE
->getAddExpr(FreeResult
, I
->first
);
1048 // Remove non-cse's from SubExpressionUseData.
1049 SubExpressionUseData
.erase(I
);
1052 if (FreeResult
!= Zero
) {
1053 // We have some subexpressions that can be subsumed into addressing
1054 // modes in every use inside the loop. However, it's possible that
1055 // there are so many of them that the combined FreeResult cannot
1056 // be subsumed, or that the target cannot handle both a FreeResult
1057 // and a Result in the same instruction (for example because it would
1058 // require too many registers). Check this.
1059 for (unsigned i
=0; i
<NumUses
; ++i
) {
1060 if (!L
->contains(Uses
[i
].Inst
->getParent()))
1062 // We know this is an addressing mode use; if there are any uses that
1063 // are not, FreeResult would be Zero.
1064 const Type
*UseTy
= getAccessType(Uses
[i
].Inst
);
1065 if (!fitsInAddressMode(FreeResult
, UseTy
, TLI
, Result
!=Zero
)) {
1066 // FIXME: could split up FreeResult into pieces here, some hoisted
1067 // and some not. There is no obvious advantage to this.
1068 Result
= SE
->getAddExpr(Result
, FreeResult
);
1075 // If we found no CSE's, return now.
1076 if (Result
== Zero
) return Result
;
1078 // If we still have a FreeResult, remove its subexpressions from
1079 // SubExpressionUseData. This means they will remain in the use Bases.
1080 if (FreeResult
!= Zero
) {
1081 SeparateSubExprs(SubExprs
, FreeResult
, SE
);
1082 for (unsigned j
= 0, e
= SubExprs
.size(); j
!= e
; ++j
) {
1083 std::map
<SCEVHandle
, SubExprUseData
>::iterator I
=
1084 SubExpressionUseData
.find(SubExprs
[j
]);
1085 SubExpressionUseData
.erase(I
);
1090 // Otherwise, remove all of the CSE's we found from each of the base values.
1091 for (unsigned i
= 0; i
!= NumUses
; ++i
) {
1092 // Uses outside the loop don't necessarily include the common base, but
1093 // the final IV value coming into those uses does. Instead of trying to
1094 // remove the pieces of the common base, which might not be there,
1095 // subtract off the base to compensate for this.
1096 if (!L
->contains(Uses
[i
].Inst
->getParent())) {
1097 Uses
[i
].Base
= SE
->getMinusSCEV(Uses
[i
].Base
, Result
);
1101 // Split the expression into subexprs.
1102 SeparateSubExprs(SubExprs
, Uses
[i
].Base
, SE
);
1104 // Remove any common subexpressions.
1105 for (unsigned j
= 0, e
= SubExprs
.size(); j
!= e
; ++j
)
1106 if (SubExpressionUseData
.count(SubExprs
[j
])) {
1107 SubExprs
.erase(SubExprs
.begin()+j
);
1111 // Finally, add the non-shared expressions together.
1112 if (SubExprs
.empty())
1113 Uses
[i
].Base
= Zero
;
1115 Uses
[i
].Base
= SE
->getAddExpr(SubExprs
);
1122 /// ValidStride - Check whether the given Scale is valid for all loads and
1123 /// stores in UsersToProcess.
1125 bool LoopStrengthReduce::ValidStride(bool HasBaseReg
,
1127 const std::vector
<BasedUser
>& UsersToProcess
) {
1131 for (unsigned i
=0, e
= UsersToProcess
.size(); i
!=e
; ++i
) {
1132 // If this is a load or other access, pass the type of the access in.
1133 const Type
*AccessTy
= Type::VoidTy
;
1134 if (isAddressUse(UsersToProcess
[i
].Inst
,
1135 UsersToProcess
[i
].OperandValToReplace
))
1136 AccessTy
= getAccessType(UsersToProcess
[i
].Inst
);
1137 else if (isa
<PHINode
>(UsersToProcess
[i
].Inst
))
1140 TargetLowering::AddrMode AM
;
1141 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(UsersToProcess
[i
].Imm
))
1142 AM
.BaseOffs
= SC
->getValue()->getSExtValue();
1143 AM
.HasBaseReg
= HasBaseReg
|| !UsersToProcess
[i
].Base
->isZero();
1146 // If load[imm+r*scale] is illegal, bail out.
1147 if (!TLI
->isLegalAddressingMode(AM
, AccessTy
))
1153 /// RequiresTypeConversion - Returns true if converting Ty1 to Ty2 is not
1155 bool LoopStrengthReduce::RequiresTypeConversion(const Type
*Ty1
,
1159 Ty1
= SE
->getEffectiveSCEVType(Ty1
);
1160 Ty2
= SE
->getEffectiveSCEVType(Ty2
);
1163 if (Ty1
->canLosslesslyBitCastTo(Ty2
))
1165 if (TLI
&& TLI
->isTruncateFree(Ty1
, Ty2
))
1170 /// CheckForIVReuse - Returns the multiple if the stride is the multiple
1171 /// of a previous stride and it is a legal value for the target addressing
1172 /// mode scale component and optional base reg. This allows the users of
1173 /// this stride to be rewritten as prev iv * factor. It returns 0 if no
1174 /// reuse is possible. Factors can be negative on same targets, e.g. ARM.
1176 /// If all uses are outside the loop, we don't require that all multiplies
1177 /// be folded into the addressing mode, nor even that the factor be constant;
1178 /// a multiply (executed once) outside the loop is better than another IV
1179 /// within. Well, usually.
1180 SCEVHandle
LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg
,
1181 bool AllUsesAreAddresses
,
1182 bool AllUsesAreOutsideLoop
,
1183 const SCEVHandle
&Stride
,
1184 IVExpr
&IV
, const Type
*Ty
,
1185 const std::vector
<BasedUser
>& UsersToProcess
) {
1186 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Stride
)) {
1187 int64_t SInt
= SC
->getValue()->getSExtValue();
1188 for (unsigned NewStride
= 0, e
= StrideOrder
.size(); NewStride
!= e
;
1190 std::map
<SCEVHandle
, IVsOfOneStride
>::iterator SI
=
1191 IVsByStride
.find(StrideOrder
[NewStride
]);
1192 if (SI
== IVsByStride
.end() || !isa
<SCEVConstant
>(SI
->first
))
1194 int64_t SSInt
= cast
<SCEVConstant
>(SI
->first
)->getValue()->getSExtValue();
1195 if (SI
->first
!= Stride
&&
1196 (unsigned(abs(SInt
)) < SSInt
|| (SInt
% SSInt
) != 0))
1198 int64_t Scale
= SInt
/ SSInt
;
1199 // Check that this stride is valid for all the types used for loads and
1200 // stores; if it can be used for some and not others, we might as well use
1201 // the original stride everywhere, since we have to create the IV for it
1202 // anyway. If the scale is 1, then we don't need to worry about folding
1205 (AllUsesAreAddresses
&&
1206 ValidStride(HasBaseReg
, Scale
, UsersToProcess
)))
1207 for (std::vector
<IVExpr
>::iterator II
= SI
->second
.IVs
.begin(),
1208 IE
= SI
->second
.IVs
.end(); II
!= IE
; ++II
)
1209 // FIXME: Only handle base == 0 for now.
1210 // Only reuse previous IV if it would not require a type conversion.
1211 if (II
->Base
->isZero() &&
1212 !RequiresTypeConversion(II
->Base
->getType(), Ty
)) {
1214 return SE
->getIntegerSCEV(Scale
, Stride
->getType());
1217 } else if (AllUsesAreOutsideLoop
) {
1218 // Accept nonconstant strides here; it is really really right to substitute
1219 // an existing IV if we can.
1220 for (unsigned NewStride
= 0, e
= StrideOrder
.size(); NewStride
!= e
;
1222 std::map
<SCEVHandle
, IVsOfOneStride
>::iterator SI
=
1223 IVsByStride
.find(StrideOrder
[NewStride
]);
1224 if (SI
== IVsByStride
.end() || !isa
<SCEVConstant
>(SI
->first
))
1226 int64_t SSInt
= cast
<SCEVConstant
>(SI
->first
)->getValue()->getSExtValue();
1227 if (SI
->first
!= Stride
&& SSInt
!= 1)
1229 for (std::vector
<IVExpr
>::iterator II
= SI
->second
.IVs
.begin(),
1230 IE
= SI
->second
.IVs
.end(); II
!= IE
; ++II
)
1231 // Accept nonzero base here.
1232 // Only reuse previous IV if it would not require a type conversion.
1233 if (!RequiresTypeConversion(II
->Base
->getType(), Ty
)) {
1238 // Special case, old IV is -1*x and this one is x. Can treat this one as
1240 for (unsigned NewStride
= 0, e
= StrideOrder
.size(); NewStride
!= e
;
1242 std::map
<SCEVHandle
, IVsOfOneStride
>::iterator SI
=
1243 IVsByStride
.find(StrideOrder
[NewStride
]);
1244 if (SI
== IVsByStride
.end())
1246 if (const SCEVMulExpr
*ME
= dyn_cast
<SCEVMulExpr
>(SI
->first
))
1247 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(ME
->getOperand(0)))
1248 if (Stride
== ME
->getOperand(1) &&
1249 SC
->getValue()->getSExtValue() == -1LL)
1250 for (std::vector
<IVExpr
>::iterator II
= SI
->second
.IVs
.begin(),
1251 IE
= SI
->second
.IVs
.end(); II
!= IE
; ++II
)
1252 // Accept nonzero base here.
1253 // Only reuse previous IV if it would not require type conversion.
1254 if (!RequiresTypeConversion(II
->Base
->getType(), Ty
)) {
1256 return SE
->getIntegerSCEV(-1LL, Stride
->getType());
1260 return SE
->getIntegerSCEV(0, Stride
->getType());
1263 /// PartitionByIsUseOfPostIncrementedValue - Simple boolean predicate that
1264 /// returns true if Val's isUseOfPostIncrementedValue is true.
1265 static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser
&Val
) {
1266 return Val
.isUseOfPostIncrementedValue
;
1269 /// isNonConstantNegative - Return true if the specified scev is negated, but
1271 static bool isNonConstantNegative(const SCEVHandle
&Expr
) {
1272 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(Expr
);
1273 if (!Mul
) return false;
1275 // If there is a constant factor, it will be first.
1276 const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Mul
->getOperand(0));
1277 if (!SC
) return false;
1279 // Return true if the value is negative, this matches things like (-42 * V).
1280 return SC
->getValue()->getValue().isNegative();
1283 // CollectIVUsers - Transform our list of users and offsets to a bit more
1284 // complex table. In this new vector, each 'BasedUser' contains 'Base', the base
1285 // of the strided accesses, as well as the old information from Uses. We
1286 // progressively move information from the Base field to the Imm field, until
1287 // we eventually have the full access expression to rewrite the use.
1288 SCEVHandle
LoopStrengthReduce::CollectIVUsers(const SCEVHandle
&Stride
,
1289 IVUsersOfOneStride
&Uses
,
1291 bool &AllUsesAreAddresses
,
1292 bool &AllUsesAreOutsideLoop
,
1293 std::vector
<BasedUser
> &UsersToProcess
) {
1294 UsersToProcess
.reserve(Uses
.Users
.size());
1295 for (unsigned i
= 0, e
= Uses
.Users
.size(); i
!= e
; ++i
) {
1296 UsersToProcess
.push_back(BasedUser(Uses
.Users
[i
], SE
));
1298 // Move any loop variant operands from the offset field to the immediate
1299 // field of the use, so that we don't try to use something before it is
1301 MoveLoopVariantsToImmediateField(UsersToProcess
.back().Base
,
1302 UsersToProcess
.back().Imm
, L
, SE
);
1303 assert(UsersToProcess
.back().Base
->isLoopInvariant(L
) &&
1304 "Base value is not loop invariant!");
1307 // We now have a whole bunch of uses of like-strided induction variables, but
1308 // they might all have different bases. We want to emit one PHI node for this
1309 // stride which we fold as many common expressions (between the IVs) into as
1310 // possible. Start by identifying the common expressions in the base values
1311 // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find
1312 // "A+B"), emit it to the preheader, then remove the expression from the
1313 // UsersToProcess base values.
1314 SCEVHandle CommonExprs
=
1315 RemoveCommonExpressionsFromUseBases(UsersToProcess
, SE
, L
, TLI
);
1317 // Next, figure out what we can represent in the immediate fields of
1318 // instructions. If we can represent anything there, move it to the imm
1319 // fields of the BasedUsers. We do this so that it increases the commonality
1320 // of the remaining uses.
1321 unsigned NumPHI
= 0;
1322 bool HasAddress
= false;
1323 for (unsigned i
= 0, e
= UsersToProcess
.size(); i
!= e
; ++i
) {
1324 // If the user is not in the current loop, this means it is using the exit
1325 // value of the IV. Do not put anything in the base, make sure it's all in
1326 // the immediate field to allow as much factoring as possible.
1327 if (!L
->contains(UsersToProcess
[i
].Inst
->getParent())) {
1328 UsersToProcess
[i
].Imm
= SE
->getAddExpr(UsersToProcess
[i
].Imm
,
1329 UsersToProcess
[i
].Base
);
1330 UsersToProcess
[i
].Base
=
1331 SE
->getIntegerSCEV(0, UsersToProcess
[i
].Base
->getType());
1333 // Not all uses are outside the loop.
1334 AllUsesAreOutsideLoop
= false;
1336 // Addressing modes can be folded into loads and stores. Be careful that
1337 // the store is through the expression, not of the expression though.
1339 bool isAddress
= isAddressUse(UsersToProcess
[i
].Inst
,
1340 UsersToProcess
[i
].OperandValToReplace
);
1341 if (isa
<PHINode
>(UsersToProcess
[i
].Inst
)) {
1349 // If this use isn't an address, then not all uses are addresses.
1350 if (!isAddress
&& !isPHI
)
1351 AllUsesAreAddresses
= false;
1353 MoveImmediateValues(TLI
, UsersToProcess
[i
].Inst
, UsersToProcess
[i
].Base
,
1354 UsersToProcess
[i
].Imm
, isAddress
, L
, SE
);
1358 // If one of the use is a PHI node and all other uses are addresses, still
1359 // allow iv reuse. Essentially we are trading one constant multiplication
1360 // for one fewer iv.
1362 AllUsesAreAddresses
= false;
1364 // There are no in-loop address uses.
1365 if (AllUsesAreAddresses
&& (!HasAddress
&& !AllUsesAreOutsideLoop
))
1366 AllUsesAreAddresses
= false;
1371 /// ShouldUseFullStrengthReductionMode - Test whether full strength-reduction
1372 /// is valid and profitable for the given set of users of a stride. In
1373 /// full strength-reduction mode, all addresses at the current stride are
1374 /// strength-reduced all the way down to pointer arithmetic.
1376 bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode(
1377 const std::vector
<BasedUser
> &UsersToProcess
,
1379 bool AllUsesAreAddresses
,
1380 SCEVHandle Stride
) {
1381 if (!EnableFullLSRMode
)
1384 // The heuristics below aim to avoid increasing register pressure, but
1385 // fully strength-reducing all the addresses increases the number of
1386 // add instructions, so don't do this when optimizing for size.
1387 // TODO: If the loop is large, the savings due to simpler addresses
1388 // may oughtweight the costs of the extra increment instructions.
1389 if (L
->getHeader()->getParent()->hasFnAttr(Attribute::OptimizeForSize
))
1392 // TODO: For now, don't do full strength reduction if there could
1393 // potentially be greater-stride multiples of the current stride
1394 // which could reuse the current stride IV.
1395 if (StrideOrder
.back() != Stride
)
1398 // Iterate through the uses to find conditions that automatically rule out
1400 for (unsigned i
= 0, e
= UsersToProcess
.size(); i
!= e
; ) {
1401 const SCEV
*Base
= UsersToProcess
[i
].Base
;
1402 const SCEV
*Imm
= UsersToProcess
[i
].Imm
;
1403 // If any users have a loop-variant component, they can't be fully
1404 // strength-reduced.
1405 if (Imm
&& !Imm
->isLoopInvariant(L
))
1407 // If there are to users with the same base and the difference between
1408 // the two Imm values can't be folded into the address, full
1409 // strength reduction would increase register pressure.
1411 const SCEV
*CurImm
= UsersToProcess
[i
].Imm
;
1412 if ((CurImm
|| Imm
) && CurImm
!= Imm
) {
1413 if (!CurImm
) CurImm
= SE
->getIntegerSCEV(0, Stride
->getType());
1414 if (!Imm
) Imm
= SE
->getIntegerSCEV(0, Stride
->getType());
1415 const Instruction
*Inst
= UsersToProcess
[i
].Inst
;
1416 const Type
*UseTy
= getAccessType(Inst
);
1417 SCEVHandle Diff
= SE
->getMinusSCEV(UsersToProcess
[i
].Imm
, Imm
);
1418 if (!Diff
->isZero() &&
1419 (!AllUsesAreAddresses
||
1420 !fitsInAddressMode(Diff
, UseTy
, TLI
, /*HasBaseReg=*/true)))
1423 } while (++i
!= e
&& Base
== UsersToProcess
[i
].Base
);
1426 // If there's exactly one user in this stride, fully strength-reducing it
1427 // won't increase register pressure. If it's starting from a non-zero base,
1428 // it'll be simpler this way.
1429 if (UsersToProcess
.size() == 1 && !UsersToProcess
[0].Base
->isZero())
1432 // Otherwise, if there are any users in this stride that don't require
1433 // a register for their base, full strength-reduction will increase
1434 // register pressure.
1435 for (unsigned i
= 0, e
= UsersToProcess
.size(); i
!= e
; ++i
)
1436 if (UsersToProcess
[i
].Base
->isZero())
1439 // Otherwise, go for it.
1443 /// InsertAffinePhi Create and insert a PHI node for an induction variable
1444 /// with the specified start and step values in the specified loop.
1446 /// If NegateStride is true, the stride should be negated by using a
1447 /// subtract instead of an add.
1449 /// Return the created phi node.
1451 static PHINode
*InsertAffinePhi(SCEVHandle Start
, SCEVHandle Step
,
1453 SCEVExpander
&Rewriter
) {
1454 assert(Start
->isLoopInvariant(L
) && "New PHI start is not loop invariant!");
1455 assert(Step
->isLoopInvariant(L
) && "New PHI stride is not loop invariant!");
1457 BasicBlock
*Header
= L
->getHeader();
1458 BasicBlock
*Preheader
= L
->getLoopPreheader();
1459 BasicBlock
*LatchBlock
= L
->getLoopLatch();
1460 const Type
*Ty
= Start
->getType();
1461 Ty
= Rewriter
.SE
.getEffectiveSCEVType(Ty
);
1463 PHINode
*PN
= PHINode::Create(Ty
, "lsr.iv", Header
->begin());
1464 PN
->addIncoming(Rewriter
.expandCodeFor(Start
, Ty
, Preheader
->getTerminator()),
1467 // If the stride is negative, insert a sub instead of an add for the
1469 bool isNegative
= isNonConstantNegative(Step
);
1470 SCEVHandle IncAmount
= Step
;
1472 IncAmount
= Rewriter
.SE
.getNegativeSCEV(Step
);
1474 // Insert an add instruction right before the terminator corresponding
1475 // to the back-edge.
1476 Value
*StepV
= Rewriter
.expandCodeFor(IncAmount
, Ty
,
1477 Preheader
->getTerminator());
1480 IncV
= BinaryOperator::CreateSub(PN
, StepV
, "lsr.iv.next",
1481 LatchBlock
->getTerminator());
1483 IncV
= BinaryOperator::CreateAdd(PN
, StepV
, "lsr.iv.next",
1484 LatchBlock
->getTerminator());
1486 if (!isa
<ConstantInt
>(StepV
)) ++NumVariable
;
1488 PN
->addIncoming(IncV
, LatchBlock
);
1494 static void SortUsersToProcess(std::vector
<BasedUser
> &UsersToProcess
) {
1495 // We want to emit code for users inside the loop first. To do this, we
1496 // rearrange BasedUser so that the entries at the end have
1497 // isUseOfPostIncrementedValue = false, because we pop off the end of the
1498 // vector (so we handle them first).
1499 std::partition(UsersToProcess
.begin(), UsersToProcess
.end(),
1500 PartitionByIsUseOfPostIncrementedValue
);
1502 // Sort this by base, so that things with the same base are handled
1503 // together. By partitioning first and stable-sorting later, we are
1504 // guaranteed that within each base we will pop off users from within the
1505 // loop before users outside of the loop with a particular base.
1507 // We would like to use stable_sort here, but we can't. The problem is that
1508 // SCEVHandle's don't have a deterministic ordering w.r.t to each other, so
1509 // we don't have anything to do a '<' comparison on. Because we think the
1510 // number of uses is small, do a horrible bubble sort which just relies on
1512 for (unsigned i
= 0, e
= UsersToProcess
.size(); i
!= e
; ++i
) {
1513 // Get a base value.
1514 SCEVHandle Base
= UsersToProcess
[i
].Base
;
1516 // Compact everything with this base to be consecutive with this one.
1517 for (unsigned j
= i
+1; j
!= e
; ++j
) {
1518 if (UsersToProcess
[j
].Base
== Base
) {
1519 std::swap(UsersToProcess
[i
+1], UsersToProcess
[j
]);
1526 /// PrepareToStrengthReduceFully - Prepare to fully strength-reduce
1527 /// UsersToProcess, meaning lowering addresses all the way down to direct
1528 /// pointer arithmetic.
1531 LoopStrengthReduce::PrepareToStrengthReduceFully(
1532 std::vector
<BasedUser
> &UsersToProcess
,
1534 SCEVHandle CommonExprs
,
1536 SCEVExpander
&PreheaderRewriter
) {
1537 DOUT
<< " Fully reducing all users\n";
1539 // Rewrite the UsersToProcess records, creating a separate PHI for each
1540 // unique Base value.
1541 for (unsigned i
= 0, e
= UsersToProcess
.size(); i
!= e
; ) {
1542 // TODO: The uses are grouped by base, but not sorted. We arbitrarily
1543 // pick the first Imm value here to start with, and adjust it for the
1545 SCEVHandle Imm
= UsersToProcess
[i
].Imm
;
1546 SCEVHandle Base
= UsersToProcess
[i
].Base
;
1547 SCEVHandle Start
= SE
->getAddExpr(CommonExprs
, Base
, Imm
);
1548 PHINode
*Phi
= InsertAffinePhi(Start
, Stride
, L
,
1550 // Loop over all the users with the same base.
1552 UsersToProcess
[i
].Base
= SE
->getIntegerSCEV(0, Stride
->getType());
1553 UsersToProcess
[i
].Imm
= SE
->getMinusSCEV(UsersToProcess
[i
].Imm
, Imm
);
1554 UsersToProcess
[i
].Phi
= Phi
;
1555 assert(UsersToProcess
[i
].Imm
->isLoopInvariant(L
) &&
1556 "ShouldUseFullStrengthReductionMode should reject this!");
1557 } while (++i
!= e
&& Base
== UsersToProcess
[i
].Base
);
1561 /// PrepareToStrengthReduceWithNewPhi - Insert a new induction variable for the
1562 /// given users to share.
1565 LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi(
1566 std::vector
<BasedUser
> &UsersToProcess
,
1568 SCEVHandle CommonExprs
,
1571 SCEVExpander
&PreheaderRewriter
) {
1572 DOUT
<< " Inserting new PHI:\n";
1574 PHINode
*Phi
= InsertAffinePhi(SE
->getUnknown(CommonBaseV
),
1578 // Remember this in case a later stride is multiple of this.
1579 IVsByStride
[Stride
].addIV(Stride
, CommonExprs
, Phi
);
1581 // All the users will share this new IV.
1582 for (unsigned i
= 0, e
= UsersToProcess
.size(); i
!= e
; ++i
)
1583 UsersToProcess
[i
].Phi
= Phi
;
1586 DEBUG(WriteAsOperand(*DOUT
, Phi
, /*PrintType=*/false));
1590 /// PrepareToStrengthReduceWithNewPhi - Prepare for the given users to reuse
1591 /// an induction variable with a stride that is a factor of the current
1592 /// induction variable.
1595 LoopStrengthReduce::PrepareToStrengthReduceFromSmallerStride(
1596 std::vector
<BasedUser
> &UsersToProcess
,
1598 const IVExpr
&ReuseIV
,
1599 Instruction
*PreInsertPt
) {
1600 DOUT
<< " Rewriting in terms of existing IV of STRIDE " << *ReuseIV
.Stride
1601 << " and BASE " << *ReuseIV
.Base
<< "\n";
1603 // All the users will share the reused IV.
1604 for (unsigned i
= 0, e
= UsersToProcess
.size(); i
!= e
; ++i
)
1605 UsersToProcess
[i
].Phi
= ReuseIV
.PHI
;
1607 Constant
*C
= dyn_cast
<Constant
>(CommonBaseV
);
1609 (!C
->isNullValue() &&
1610 !fitsInAddressMode(SE
->getUnknown(CommonBaseV
), CommonBaseV
->getType(),
1612 // We want the common base emitted into the preheader! This is just
1613 // using cast as a copy so BitCast (no-op cast) is appropriate
1614 CommonBaseV
= new BitCastInst(CommonBaseV
, CommonBaseV
->getType(),
1615 "commonbase", PreInsertPt
);
1618 static bool IsImmFoldedIntoAddrMode(GlobalValue
*GV
, int64_t Offset
,
1619 const Type
*AccessTy
,
1620 std::vector
<BasedUser
> &UsersToProcess
,
1621 const TargetLowering
*TLI
) {
1622 SmallVector
<Instruction
*, 16> AddrModeInsts
;
1623 for (unsigned i
= 0, e
= UsersToProcess
.size(); i
!= e
; ++i
) {
1624 if (UsersToProcess
[i
].isUseOfPostIncrementedValue
)
1626 ExtAddrMode AddrMode
=
1627 AddressingModeMatcher::Match(UsersToProcess
[i
].OperandValToReplace
,
1628 AccessTy
, UsersToProcess
[i
].Inst
,
1629 AddrModeInsts
, *TLI
);
1630 if (GV
&& GV
!= AddrMode
.BaseGV
)
1632 if (Offset
&& !AddrMode
.BaseOffs
)
1633 // FIXME: How to accurate check it's immediate offset is folded.
1635 AddrModeInsts
.clear();
1640 /// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single
1641 /// stride of IV. All of the users may have different starting values, and this
1642 /// may not be the only stride.
1643 void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle
&Stride
,
1644 IVUsersOfOneStride
&Uses
,
1646 // If all the users are moved to another stride, then there is nothing to do.
1647 if (Uses
.Users
.empty())
1650 // Keep track if every use in UsersToProcess is an address. If they all are,
1651 // we may be able to rewrite the entire collection of them in terms of a
1652 // smaller-stride IV.
1653 bool AllUsesAreAddresses
= true;
1655 // Keep track if every use of a single stride is outside the loop. If so,
1656 // we want to be more aggressive about reusing a smaller-stride IV; a
1657 // multiply outside the loop is better than another IV inside. Well, usually.
1658 bool AllUsesAreOutsideLoop
= true;
1660 // Transform our list of users and offsets to a bit more complex table. In
1661 // this new vector, each 'BasedUser' contains 'Base' the base of the
1662 // strided accessas well as the old information from Uses. We progressively
1663 // move information from the Base field to the Imm field, until we eventually
1664 // have the full access expression to rewrite the use.
1665 std::vector
<BasedUser
> UsersToProcess
;
1666 SCEVHandle CommonExprs
= CollectIVUsers(Stride
, Uses
, L
, AllUsesAreAddresses
,
1667 AllUsesAreOutsideLoop
,
1670 // Sort the UsersToProcess array so that users with common bases are
1671 // next to each other.
1672 SortUsersToProcess(UsersToProcess
);
1674 // If we managed to find some expressions in common, we'll need to carry
1675 // their value in a register and add it in for each use. This will take up
1676 // a register operand, which potentially restricts what stride values are
1678 bool HaveCommonExprs
= !CommonExprs
->isZero();
1679 const Type
*ReplacedTy
= CommonExprs
->getType();
1681 // If all uses are addresses, consider sinking the immediate part of the
1682 // common expression back into uses if they can fit in the immediate fields.
1683 if (TLI
&& HaveCommonExprs
&& AllUsesAreAddresses
) {
1684 SCEVHandle NewCommon
= CommonExprs
;
1685 SCEVHandle Imm
= SE
->getIntegerSCEV(0, ReplacedTy
);
1686 MoveImmediateValues(TLI
, Type::VoidTy
, NewCommon
, Imm
, true, L
, SE
);
1687 if (!Imm
->isZero()) {
1690 // If the immediate part of the common expression is a GV, check if it's
1691 // possible to fold it into the target addressing mode.
1692 GlobalValue
*GV
= 0;
1693 if (const SCEVUnknown
*SU
= dyn_cast
<SCEVUnknown
>(Imm
))
1694 GV
= dyn_cast
<GlobalValue
>(SU
->getValue());
1696 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Imm
))
1697 Offset
= SC
->getValue()->getSExtValue();
1699 // Pass VoidTy as the AccessTy to be conservative, because
1700 // there could be multiple access types among all the uses.
1701 DoSink
= IsImmFoldedIntoAddrMode(GV
, Offset
, Type::VoidTy
,
1702 UsersToProcess
, TLI
);
1705 DOUT
<< " Sinking " << *Imm
<< " back down into uses\n";
1706 for (unsigned i
= 0, e
= UsersToProcess
.size(); i
!= e
; ++i
)
1707 UsersToProcess
[i
].Imm
= SE
->getAddExpr(UsersToProcess
[i
].Imm
, Imm
);
1708 CommonExprs
= NewCommon
;
1709 HaveCommonExprs
= !CommonExprs
->isZero();
1715 // Now that we know what we need to do, insert the PHI node itself.
1717 DOUT
<< "LSR: Examining IVs of TYPE " << *ReplacedTy
<< " of STRIDE "
1719 << " Common base: " << *CommonExprs
<< "\n";
1721 SCEVExpander
Rewriter(*SE
, *LI
);
1722 SCEVExpander
PreheaderRewriter(*SE
, *LI
);
1724 BasicBlock
*Preheader
= L
->getLoopPreheader();
1725 Instruction
*PreInsertPt
= Preheader
->getTerminator();
1726 BasicBlock
*LatchBlock
= L
->getLoopLatch();
1728 Value
*CommonBaseV
= Constant::getNullValue(ReplacedTy
);
1730 SCEVHandle RewriteFactor
= SE
->getIntegerSCEV(0, ReplacedTy
);
1731 IVExpr
ReuseIV(SE
->getIntegerSCEV(0, Type::Int32Ty
),
1732 SE
->getIntegerSCEV(0, Type::Int32Ty
),
1735 /// Choose a strength-reduction strategy and prepare for it by creating
1736 /// the necessary PHIs and adjusting the bookkeeping.
1737 if (ShouldUseFullStrengthReductionMode(UsersToProcess
, L
,
1738 AllUsesAreAddresses
, Stride
)) {
1739 PrepareToStrengthReduceFully(UsersToProcess
, Stride
, CommonExprs
, L
,
1742 // Emit the initial base value into the loop preheader.
1743 CommonBaseV
= PreheaderRewriter
.expandCodeFor(CommonExprs
, ReplacedTy
,
1746 // If all uses are addresses, check if it is possible to reuse an IV with a
1747 // stride that is a factor of this stride. And that the multiple is a number
1748 // that can be encoded in the scale field of the target addressing mode. And
1749 // that we will have a valid instruction after this substition, including
1750 // the immediate field, if any.
1751 RewriteFactor
= CheckForIVReuse(HaveCommonExprs
, AllUsesAreAddresses
,
1752 AllUsesAreOutsideLoop
,
1753 Stride
, ReuseIV
, ReplacedTy
,
1755 if (isa
<SCEVConstant
>(RewriteFactor
) &&
1756 cast
<SCEVConstant
>(RewriteFactor
)->isZero())
1757 PrepareToStrengthReduceWithNewPhi(UsersToProcess
, Stride
, CommonExprs
,
1758 CommonBaseV
, L
, PreheaderRewriter
);
1760 PrepareToStrengthReduceFromSmallerStride(UsersToProcess
, CommonBaseV
,
1761 ReuseIV
, PreInsertPt
);
1764 // Process all the users now, replacing their strided uses with
1765 // strength-reduced forms. This outer loop handles all bases, the inner
1766 // loop handles all users of a particular base.
1767 while (!UsersToProcess
.empty()) {
1768 SCEVHandle Base
= UsersToProcess
.back().Base
;
1769 Instruction
*Inst
= UsersToProcess
.back().Inst
;
1771 // Emit the code for Base into the preheader.
1773 if (!Base
->isZero()) {
1774 BaseV
= PreheaderRewriter
.expandCodeFor(Base
, Base
->getType(),
1777 DOUT
<< " INSERTING code for BASE = " << *Base
<< ":";
1778 if (BaseV
->hasName())
1779 DOUT
<< " Result value name = %" << BaseV
->getNameStr();
1782 // If BaseV is a non-zero constant, make sure that it gets inserted into
1783 // the preheader, instead of being forward substituted into the uses. We
1784 // do this by forcing a BitCast (noop cast) to be inserted into the
1785 // preheader in this case.
1786 if (!fitsInAddressMode(Base
, getAccessType(Inst
), TLI
, false)) {
1787 // We want this constant emitted into the preheader! This is just
1788 // using cast as a copy so BitCast (no-op cast) is appropriate
1789 BaseV
= new BitCastInst(BaseV
, BaseV
->getType(), "preheaderinsert",
1794 // Emit the code to add the immediate offset to the Phi value, just before
1795 // the instructions that we identified as using this stride and base.
1797 // FIXME: Use emitted users to emit other users.
1798 BasedUser
&User
= UsersToProcess
.back();
1800 DOUT
<< " Examining use ";
1801 DEBUG(WriteAsOperand(*DOUT
, UsersToProcess
.back().OperandValToReplace
,
1802 /*PrintType=*/false));
1803 DOUT
<< " in Inst: " << *(User
.Inst
);
1805 // If this instruction wants to use the post-incremented value, move it
1806 // after the post-inc and use its value instead of the PHI.
1807 Value
*RewriteOp
= User
.Phi
;
1808 if (User
.isUseOfPostIncrementedValue
) {
1809 RewriteOp
= User
.Phi
->getIncomingValueForBlock(LatchBlock
);
1811 // If this user is in the loop, make sure it is the last thing in the
1812 // loop to ensure it is dominated by the increment.
1813 if (L
->contains(User
.Inst
->getParent()))
1814 User
.Inst
->moveBefore(LatchBlock
->getTerminator());
1817 SCEVHandle RewriteExpr
= SE
->getUnknown(RewriteOp
);
1819 if (SE
->getTypeSizeInBits(RewriteOp
->getType()) !=
1820 SE
->getTypeSizeInBits(ReplacedTy
)) {
1821 assert(SE
->getTypeSizeInBits(RewriteOp
->getType()) >
1822 SE
->getTypeSizeInBits(ReplacedTy
) &&
1823 "Unexpected widening cast!");
1824 RewriteExpr
= SE
->getTruncateExpr(RewriteExpr
, ReplacedTy
);
1827 // If we had to insert new instructions for RewriteOp, we have to
1828 // consider that they may not have been able to end up immediately
1829 // next to RewriteOp, because non-PHI instructions may never precede
1830 // PHI instructions in a block. In this case, remember where the last
1831 // instruction was inserted so that if we're replacing a different
1832 // PHI node, we can use the later point to expand the final
1834 Instruction
*NewBasePt
= dyn_cast
<Instruction
>(RewriteOp
);
1835 if (RewriteOp
== User
.Phi
) NewBasePt
= 0;
1837 // Clear the SCEVExpander's expression map so that we are guaranteed
1838 // to have the code emitted where we expect it.
1841 // If we are reusing the iv, then it must be multiplied by a constant
1842 // factor to take advantage of the addressing mode scale component.
1843 if (!RewriteFactor
->isZero()) {
1844 // If we're reusing an IV with a nonzero base (currently this happens
1845 // only when all reuses are outside the loop) subtract that base here.
1846 // The base has been used to initialize the PHI node but we don't want
1848 if (!ReuseIV
.Base
->isZero()) {
1849 SCEVHandle typedBase
= ReuseIV
.Base
;
1850 if (SE
->getTypeSizeInBits(RewriteExpr
->getType()) !=
1851 SE
->getTypeSizeInBits(ReuseIV
.Base
->getType())) {
1852 // It's possible the original IV is a larger type than the new IV,
1853 // in which case we have to truncate the Base. We checked in
1854 // RequiresTypeConversion that this is valid.
1855 assert(SE
->getTypeSizeInBits(RewriteExpr
->getType()) <
1856 SE
->getTypeSizeInBits(ReuseIV
.Base
->getType()) &&
1857 "Unexpected lengthening conversion!");
1858 typedBase
= SE
->getTruncateExpr(ReuseIV
.Base
,
1859 RewriteExpr
->getType());
1861 RewriteExpr
= SE
->getMinusSCEV(RewriteExpr
, typedBase
);
1864 // Multiply old variable, with base removed, by new scale factor.
1865 RewriteExpr
= SE
->getMulExpr(RewriteFactor
,
1868 // The common base is emitted in the loop preheader. But since we
1869 // are reusing an IV, it has not been used to initialize the PHI node.
1870 // Add it to the expression used to rewrite the uses.
1871 // When this use is outside the loop, we earlier subtracted the
1872 // common base, and are adding it back here. Use the same expression
1873 // as before, rather than CommonBaseV, so DAGCombiner will zap it.
1874 if (!CommonExprs
->isZero()) {
1875 if (L
->contains(User
.Inst
->getParent()))
1876 RewriteExpr
= SE
->getAddExpr(RewriteExpr
,
1877 SE
->getUnknown(CommonBaseV
));
1879 RewriteExpr
= SE
->getAddExpr(RewriteExpr
, CommonExprs
);
1883 // Now that we know what we need to do, insert code before User for the
1884 // immediate and any loop-variant expressions.
1886 // Add BaseV to the PHI value if needed.
1887 RewriteExpr
= SE
->getAddExpr(RewriteExpr
, SE
->getUnknown(BaseV
));
1889 User
.RewriteInstructionToUseNewBase(RewriteExpr
, NewBasePt
,
1893 // Mark old value we replaced as possibly dead, so that it is eliminated
1894 // if we just replaced the last use of that value.
1895 DeadInsts
.push_back(cast
<Instruction
>(User
.OperandValToReplace
));
1897 UsersToProcess
.pop_back();
1900 // If there are any more users to process with the same base, process them
1901 // now. We sorted by base above, so we just have to check the last elt.
1902 } while (!UsersToProcess
.empty() && UsersToProcess
.back().Base
== Base
);
1903 // TODO: Next, find out which base index is the most common, pull it out.
1906 // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
1907 // different starting values, into different PHIs.
1910 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
1911 /// set the IV user and stride information and return true, otherwise return
1913 bool LoopStrengthReduce::FindIVUserForCond(ICmpInst
*Cond
, IVStrideUse
*&CondUse
,
1914 const SCEVHandle
*&CondStride
) {
1915 for (unsigned Stride
= 0, e
= StrideOrder
.size(); Stride
!= e
&& !CondUse
;
1917 std::map
<SCEVHandle
, IVUsersOfOneStride
>::iterator SI
=
1918 IVUsesByStride
.find(StrideOrder
[Stride
]);
1919 assert(SI
!= IVUsesByStride
.end() && "Stride doesn't exist!");
1921 for (std::vector
<IVStrideUse
>::iterator UI
= SI
->second
.Users
.begin(),
1922 E
= SI
->second
.Users
.end(); UI
!= E
; ++UI
)
1923 if (UI
->User
== Cond
) {
1924 // NOTE: we could handle setcc instructions with multiple uses here, but
1925 // InstCombine does it as well for simple uses, it's not clear that it
1926 // occurs enough in real life to handle.
1928 CondStride
= &SI
->first
;
1936 // Constant strides come first which in turns are sorted by their absolute
1937 // values. If absolute values are the same, then positive strides comes first.
1939 // 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X
1940 struct StrideCompare
{
1941 const ScalarEvolution
*SE
;
1942 explicit StrideCompare(const ScalarEvolution
*se
) : SE(se
) {}
1944 bool operator()(const SCEVHandle
&LHS
, const SCEVHandle
&RHS
) {
1945 const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(LHS
);
1946 const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
);
1948 int64_t LV
= LHSC
->getValue()->getSExtValue();
1949 int64_t RV
= RHSC
->getValue()->getSExtValue();
1950 uint64_t ALV
= (LV
< 0) ? -LV
: LV
;
1951 uint64_t ARV
= (RV
< 0) ? -RV
: RV
;
1959 // If it's the same value but different type, sort by bit width so
1960 // that we emit larger induction variables before smaller
1961 // ones, letting the smaller be re-written in terms of larger ones.
1962 return SE
->getTypeSizeInBits(RHS
->getType()) <
1963 SE
->getTypeSizeInBits(LHS
->getType());
1965 return LHSC
&& !RHSC
;
1970 /// ChangeCompareStride - If a loop termination compare instruction is the
1971 /// only use of its stride, and the compaison is against a constant value,
1972 /// try eliminate the stride by moving the compare instruction to another
1973 /// stride and change its constant operand accordingly. e.g.
1979 /// if (v2 < 10) goto loop
1984 /// if (v1 < 30) goto loop
1985 ICmpInst
*LoopStrengthReduce::ChangeCompareStride(Loop
*L
, ICmpInst
*Cond
,
1986 IVStrideUse
* &CondUse
,
1987 const SCEVHandle
* &CondStride
) {
1988 if (StrideOrder
.size() < 2 ||
1989 IVUsesByStride
[*CondStride
].Users
.size() != 1)
1991 const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(*CondStride
);
1992 if (!SC
) return Cond
;
1994 ICmpInst::Predicate Predicate
= Cond
->getPredicate();
1995 int64_t CmpSSInt
= SC
->getValue()->getSExtValue();
1996 unsigned BitWidth
= SE
->getTypeSizeInBits((*CondStride
)->getType());
1997 uint64_t SignBit
= 1ULL << (BitWidth
-1);
1998 const Type
*CmpTy
= Cond
->getOperand(0)->getType();
1999 const Type
*NewCmpTy
= NULL
;
2000 unsigned TyBits
= SE
->getTypeSizeInBits(CmpTy
);
2001 unsigned NewTyBits
= 0;
2002 SCEVHandle
*NewStride
= NULL
;
2003 Value
*NewCmpLHS
= NULL
;
2004 Value
*NewCmpRHS
= NULL
;
2006 SCEVHandle NewOffset
= SE
->getIntegerSCEV(0, CmpTy
);
2008 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(Cond
->getOperand(1))) {
2009 int64_t CmpVal
= C
->getValue().getSExtValue();
2011 // Check stride constant and the comparision constant signs to detect
2013 if ((CmpVal
& SignBit
) != (CmpSSInt
& SignBit
))
2016 // Look for a suitable stride / iv as replacement.
2017 for (unsigned i
= 0, e
= StrideOrder
.size(); i
!= e
; ++i
) {
2018 std::map
<SCEVHandle
, IVUsersOfOneStride
>::iterator SI
=
2019 IVUsesByStride
.find(StrideOrder
[i
]);
2020 if (!isa
<SCEVConstant
>(SI
->first
))
2022 int64_t SSInt
= cast
<SCEVConstant
>(SI
->first
)->getValue()->getSExtValue();
2023 if (SSInt
== CmpSSInt
||
2024 abs(SSInt
) < abs(CmpSSInt
) ||
2025 (SSInt
% CmpSSInt
) != 0)
2028 Scale
= SSInt
/ CmpSSInt
;
2029 int64_t NewCmpVal
= CmpVal
* Scale
;
2030 APInt Mul
= APInt(BitWidth
, NewCmpVal
);
2031 // Check for overflow.
2032 if (Mul
.getSExtValue() != NewCmpVal
)
2035 // Watch out for overflow.
2036 if (ICmpInst::isSignedPredicate(Predicate
) &&
2037 (CmpVal
& SignBit
) != (NewCmpVal
& SignBit
))
2040 if (NewCmpVal
== CmpVal
)
2042 // Pick the best iv to use trying to avoid a cast.
2044 for (std::vector
<IVStrideUse
>::iterator UI
= SI
->second
.Users
.begin(),
2045 E
= SI
->second
.Users
.end(); UI
!= E
; ++UI
) {
2046 NewCmpLHS
= UI
->OperandValToReplace
;
2047 if (NewCmpLHS
->getType() == CmpTy
)
2053 NewCmpTy
= NewCmpLHS
->getType();
2054 NewTyBits
= SE
->getTypeSizeInBits(NewCmpTy
);
2055 const Type
*NewCmpIntTy
= IntegerType::get(NewTyBits
);
2056 if (RequiresTypeConversion(NewCmpTy
, CmpTy
)) {
2057 // Check if it is possible to rewrite it using
2058 // an iv / stride of a smaller integer type.
2059 unsigned Bits
= NewTyBits
;
2060 if (ICmpInst::isSignedPredicate(Predicate
))
2062 uint64_t Mask
= (1ULL << Bits
) - 1;
2063 if (((uint64_t)NewCmpVal
& Mask
) != (uint64_t)NewCmpVal
)
2067 // Don't rewrite if use offset is non-constant and the new type is
2068 // of a different type.
2069 // FIXME: too conservative?
2070 if (NewTyBits
!= TyBits
&& !isa
<SCEVConstant
>(CondUse
->Offset
))
2073 bool AllUsesAreAddresses
= true;
2074 bool AllUsesAreOutsideLoop
= true;
2075 std::vector
<BasedUser
> UsersToProcess
;
2076 SCEVHandle CommonExprs
= CollectIVUsers(SI
->first
, SI
->second
, L
,
2077 AllUsesAreAddresses
,
2078 AllUsesAreOutsideLoop
,
2080 // Avoid rewriting the compare instruction with an iv of new stride
2081 // if it's likely the new stride uses will be rewritten using the
2082 // stride of the compare instruction.
2083 if (AllUsesAreAddresses
&&
2084 ValidStride(!CommonExprs
->isZero(), Scale
, UsersToProcess
))
2087 // If scale is negative, use swapped predicate unless it's testing
2089 if (Scale
< 0 && !Cond
->isEquality())
2090 Predicate
= ICmpInst::getSwappedPredicate(Predicate
);
2092 NewStride
= &StrideOrder
[i
];
2093 if (!isa
<PointerType
>(NewCmpTy
))
2094 NewCmpRHS
= ConstantInt::get(NewCmpTy
, NewCmpVal
);
2096 ConstantInt
*CI
= ConstantInt::get(NewCmpIntTy
, NewCmpVal
);
2097 NewCmpRHS
= ConstantExpr::getIntToPtr(CI
, NewCmpTy
);
2099 NewOffset
= TyBits
== NewTyBits
2100 ? SE
->getMulExpr(CondUse
->Offset
,
2101 SE
->getConstant(ConstantInt::get(CmpTy
, Scale
)))
2102 : SE
->getConstant(ConstantInt::get(NewCmpIntTy
,
2103 cast
<SCEVConstant
>(CondUse
->Offset
)->getValue()->getSExtValue()*Scale
));
2108 // Forgo this transformation if it the increment happens to be
2109 // unfortunately positioned after the condition, and the condition
2110 // has multiple uses which prevent it from being moved immediately
2111 // before the branch. See
2112 // test/Transforms/LoopStrengthReduce/change-compare-stride-trickiness-*.ll
2113 // for an example of this situation.
2114 if (!Cond
->hasOneUse()) {
2115 for (BasicBlock::iterator I
= Cond
, E
= Cond
->getParent()->end();
2122 // Create a new compare instruction using new stride / iv.
2123 ICmpInst
*OldCond
= Cond
;
2124 // Insert new compare instruction.
2125 Cond
= new ICmpInst(Predicate
, NewCmpLHS
, NewCmpRHS
,
2126 L
->getHeader()->getName() + ".termcond",
2129 // Remove the old compare instruction. The old indvar is probably dead too.
2130 DeadInsts
.push_back(cast
<Instruction
>(CondUse
->OperandValToReplace
));
2131 OldCond
->replaceAllUsesWith(Cond
);
2132 OldCond
->eraseFromParent();
2134 IVUsesByStride
[*CondStride
].Users
.pop_back();
2135 IVUsesByStride
[*NewStride
].addUser(NewOffset
, Cond
, NewCmpLHS
);
2136 CondUse
= &IVUsesByStride
[*NewStride
].Users
.back();
2137 CondStride
= NewStride
;
2145 /// OptimizeSMax - Rewrite the loop's terminating condition if it uses
2146 /// an smax computation.
2148 /// This is a narrow solution to a specific, but acute, problem. For loops
2154 /// } while (++i < n);
2156 /// where the comparison is signed, the trip count isn't just 'n', because
2157 /// 'n' could be negative. And unfortunately this can come up even for loops
2158 /// where the user didn't use a C do-while loop. For example, seemingly
2159 /// well-behaved top-test loops will commonly be lowered like this:
2165 /// } while (++i < n);
2168 /// and then it's possible for subsequent optimization to obscure the if
2169 /// test in such a way that indvars can't find it.
2171 /// When indvars can't find the if test in loops like this, it creates a
2172 /// signed-max expression, which allows it to give the loop a canonical
2173 /// induction variable:
2176 /// smax = n < 1 ? 1 : n;
2179 /// } while (++i != smax);
2181 /// Canonical induction variables are necessary because the loop passes
2182 /// are designed around them. The most obvious example of this is the
2183 /// LoopInfo analysis, which doesn't remember trip count values. It
2184 /// expects to be able to rediscover the trip count each time it is
2185 /// needed, and it does this using a simple analyis that only succeeds if
2186 /// the loop has a canonical induction variable.
2188 /// However, when it comes time to generate code, the maximum operation
2189 /// can be quite costly, especially if it's inside of an outer loop.
2191 /// This function solves this problem by detecting this type of loop and
2192 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
2193 /// the instructions for the maximum computation.
2195 ICmpInst
*LoopStrengthReduce::OptimizeSMax(Loop
*L
, ICmpInst
*Cond
,
2196 IVStrideUse
* &CondUse
) {
2197 // Check that the loop matches the pattern we're looking for.
2198 if (Cond
->getPredicate() != CmpInst::ICMP_EQ
&&
2199 Cond
->getPredicate() != CmpInst::ICMP_NE
)
2202 SelectInst
*Sel
= dyn_cast
<SelectInst
>(Cond
->getOperand(1));
2203 if (!Sel
|| !Sel
->hasOneUse()) return Cond
;
2205 SCEVHandle BackedgeTakenCount
= SE
->getBackedgeTakenCount(L
);
2206 if (isa
<SCEVCouldNotCompute
>(BackedgeTakenCount
))
2208 SCEVHandle One
= SE
->getIntegerSCEV(1, BackedgeTakenCount
->getType());
2210 // Add one to the backedge-taken count to get the trip count.
2211 SCEVHandle IterationCount
= SE
->getAddExpr(BackedgeTakenCount
, One
);
2213 // Check for a max calculation that matches the pattern.
2214 const SCEVSMaxExpr
*SMax
= dyn_cast
<SCEVSMaxExpr
>(IterationCount
);
2215 if (!SMax
|| SMax
!= SE
->getSCEV(Sel
)) return Cond
;
2217 SCEVHandle SMaxLHS
= SMax
->getOperand(0);
2218 SCEVHandle SMaxRHS
= SMax
->getOperand(1);
2219 if (!SMaxLHS
|| SMaxLHS
!= One
) return Cond
;
2221 // Check the relevant induction variable for conformance to
2223 SCEVHandle IV
= SE
->getSCEV(Cond
->getOperand(0));
2224 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(IV
);
2225 if (!AR
|| !AR
->isAffine() ||
2226 AR
->getStart() != One
||
2227 AR
->getStepRecurrence(*SE
) != One
)
2230 assert(AR
->getLoop() == L
&&
2231 "Loop condition operand is an addrec in a different loop!");
2233 // Check the right operand of the select, and remember it, as it will
2234 // be used in the new comparison instruction.
2236 if (SE
->getSCEV(Sel
->getOperand(1)) == SMaxRHS
)
2237 NewRHS
= Sel
->getOperand(1);
2238 else if (SE
->getSCEV(Sel
->getOperand(2)) == SMaxRHS
)
2239 NewRHS
= Sel
->getOperand(2);
2240 if (!NewRHS
) return Cond
;
2242 // Ok, everything looks ok to change the condition into an SLT or SGE and
2243 // delete the max calculation.
2245 new ICmpInst(Cond
->getPredicate() == CmpInst::ICMP_NE
?
2248 Cond
->getOperand(0), NewRHS
, "scmp", Cond
);
2250 // Delete the max calculation instructions.
2251 Cond
->replaceAllUsesWith(NewCond
);
2252 Cond
->eraseFromParent();
2253 Instruction
*Cmp
= cast
<Instruction
>(Sel
->getOperand(0));
2254 Sel
->eraseFromParent();
2255 if (Cmp
->use_empty())
2256 Cmp
->eraseFromParent();
2257 CondUse
->User
= NewCond
;
2261 /// OptimizeShadowIV - If IV is used in a int-to-float cast
2262 /// inside the loop then try to eliminate the cast opeation.
2263 void LoopStrengthReduce::OptimizeShadowIV(Loop
*L
) {
2265 SCEVHandle BackedgeTakenCount
= SE
->getBackedgeTakenCount(L
);
2266 if (isa
<SCEVCouldNotCompute
>(BackedgeTakenCount
))
2269 for (unsigned Stride
= 0, e
= StrideOrder
.size(); Stride
!= e
;
2271 std::map
<SCEVHandle
, IVUsersOfOneStride
>::iterator SI
=
2272 IVUsesByStride
.find(StrideOrder
[Stride
]);
2273 assert(SI
!= IVUsesByStride
.end() && "Stride doesn't exist!");
2274 if (!isa
<SCEVConstant
>(SI
->first
))
2277 for (std::vector
<IVStrideUse
>::iterator UI
= SI
->second
.Users
.begin(),
2278 E
= SI
->second
.Users
.end(); UI
!= E
; /* empty */) {
2279 std::vector
<IVStrideUse
>::iterator CandidateUI
= UI
;
2281 Instruction
*ShadowUse
= CandidateUI
->User
;
2282 const Type
*DestTy
= NULL
;
2284 /* If shadow use is a int->float cast then insert a second IV
2285 to eliminate this cast.
2287 for (unsigned i = 0; i < n; ++i)
2293 for (unsigned i = 0; i < n; ++i, ++d)
2296 if (UIToFPInst
*UCast
= dyn_cast
<UIToFPInst
>(CandidateUI
->User
))
2297 DestTy
= UCast
->getDestTy();
2298 else if (SIToFPInst
*SCast
= dyn_cast
<SIToFPInst
>(CandidateUI
->User
))
2299 DestTy
= SCast
->getDestTy();
2300 if (!DestTy
) continue;
2303 /* If target does not support DestTy natively then do not apply
2304 this transformation. */
2305 MVT DVT
= TLI
->getValueType(DestTy
);
2306 if (!TLI
->isTypeLegal(DVT
)) continue;
2309 PHINode
*PH
= dyn_cast
<PHINode
>(ShadowUse
->getOperand(0));
2311 if (PH
->getNumIncomingValues() != 2) continue;
2313 const Type
*SrcTy
= PH
->getType();
2314 int Mantissa
= DestTy
->getFPMantissaWidth();
2315 if (Mantissa
== -1) continue;
2316 if ((int)SE
->getTypeSizeInBits(SrcTy
) > Mantissa
)
2319 unsigned Entry
, Latch
;
2320 if (PH
->getIncomingBlock(0) == L
->getLoopPreheader()) {
2328 ConstantInt
*Init
= dyn_cast
<ConstantInt
>(PH
->getIncomingValue(Entry
));
2329 if (!Init
) continue;
2330 ConstantFP
*NewInit
= ConstantFP::get(DestTy
, Init
->getZExtValue());
2332 BinaryOperator
*Incr
=
2333 dyn_cast
<BinaryOperator
>(PH
->getIncomingValue(Latch
));
2334 if (!Incr
) continue;
2335 if (Incr
->getOpcode() != Instruction::Add
2336 && Incr
->getOpcode() != Instruction::Sub
)
2339 /* Initialize new IV, double d = 0.0 in above example. */
2340 ConstantInt
*C
= NULL
;
2341 if (Incr
->getOperand(0) == PH
)
2342 C
= dyn_cast
<ConstantInt
>(Incr
->getOperand(1));
2343 else if (Incr
->getOperand(1) == PH
)
2344 C
= dyn_cast
<ConstantInt
>(Incr
->getOperand(0));
2350 /* Add new PHINode. */
2351 PHINode
*NewPH
= PHINode::Create(DestTy
, "IV.S.", PH
);
2353 /* create new increment. '++d' in above example. */
2354 ConstantFP
*CFP
= ConstantFP::get(DestTy
, C
->getZExtValue());
2355 BinaryOperator
*NewIncr
=
2356 BinaryOperator::Create(Incr
->getOpcode(),
2357 NewPH
, CFP
, "IV.S.next.", Incr
);
2359 NewPH
->addIncoming(NewInit
, PH
->getIncomingBlock(Entry
));
2360 NewPH
->addIncoming(NewIncr
, PH
->getIncomingBlock(Latch
));
2362 /* Remove cast operation */
2363 ShadowUse
->replaceAllUsesWith(NewPH
);
2364 ShadowUse
->eraseFromParent();
2365 SI
->second
.Users
.erase(CandidateUI
);
2372 // OptimizeIndvars - Now that IVUsesByStride is set up with all of the indvar
2373 // uses in the loop, look to see if we can eliminate some, in favor of using
2374 // common indvars for the different uses.
2375 void LoopStrengthReduce::OptimizeIndvars(Loop
*L
) {
2376 // TODO: implement optzns here.
2378 OptimizeShadowIV(L
);
2380 // Finally, get the terminating condition for the loop if possible. If we
2381 // can, we want to change it to use a post-incremented version of its
2382 // induction variable, to allow coalescing the live ranges for the IV into
2383 // one register value.
2384 PHINode
*SomePHI
= cast
<PHINode
>(L
->getHeader()->begin());
2385 BasicBlock
*Preheader
= L
->getLoopPreheader();
2386 BasicBlock
*LatchBlock
=
2387 SomePHI
->getIncomingBlock(SomePHI
->getIncomingBlock(0) == Preheader
);
2388 BranchInst
*TermBr
= dyn_cast
<BranchInst
>(LatchBlock
->getTerminator());
2389 if (!TermBr
|| TermBr
->isUnconditional() ||
2390 !isa
<ICmpInst
>(TermBr
->getCondition()))
2392 ICmpInst
*Cond
= cast
<ICmpInst
>(TermBr
->getCondition());
2394 // Search IVUsesByStride to find Cond's IVUse if there is one.
2395 IVStrideUse
*CondUse
= 0;
2396 const SCEVHandle
*CondStride
= 0;
2398 if (!FindIVUserForCond(Cond
, CondUse
, CondStride
))
2399 return; // setcc doesn't use the IV.
2401 // If the trip count is computed in terms of an smax (due to ScalarEvolution
2402 // being unable to find a sufficient guard, for example), change the loop
2403 // comparison to use SLT instead of NE.
2404 Cond
= OptimizeSMax(L
, Cond
, CondUse
);
2406 // If possible, change stride and operands of the compare instruction to
2407 // eliminate one stride.
2408 Cond
= ChangeCompareStride(L
, Cond
, CondUse
, CondStride
);
2410 // It's possible for the setcc instruction to be anywhere in the loop, and
2411 // possible for it to have multiple users. If it is not immediately before
2412 // the latch block branch, move it.
2413 if (&*++BasicBlock::iterator(Cond
) != (Instruction
*)TermBr
) {
2414 if (Cond
->hasOneUse()) { // Condition has a single use, just move it.
2415 Cond
->moveBefore(TermBr
);
2417 // Otherwise, clone the terminating condition and insert into the loopend.
2418 Cond
= cast
<ICmpInst
>(Cond
->clone());
2419 Cond
->setName(L
->getHeader()->getName() + ".termcond");
2420 LatchBlock
->getInstList().insert(TermBr
, Cond
);
2422 // Clone the IVUse, as the old use still exists!
2423 IVUsesByStride
[*CondStride
].addUser(CondUse
->Offset
, Cond
,
2424 CondUse
->OperandValToReplace
);
2425 CondUse
= &IVUsesByStride
[*CondStride
].Users
.back();
2429 // If we get to here, we know that we can transform the setcc instruction to
2430 // use the post-incremented version of the IV, allowing us to coalesce the
2431 // live ranges for the IV correctly.
2432 CondUse
->Offset
= SE
->getMinusSCEV(CondUse
->Offset
, *CondStride
);
2433 CondUse
->isUseOfPostIncrementedValue
= true;
2437 bool LoopStrengthReduce::runOnLoop(Loop
*L
, LPPassManager
&LPM
) {
2439 LI
= &getAnalysis
<LoopInfo
>();
2440 DT
= &getAnalysis
<DominatorTree
>();
2441 SE
= &getAnalysis
<ScalarEvolution
>();
2444 // Find all uses of induction variables in this loop, and categorize
2445 // them by stride. Start by finding all of the PHI nodes in the header for
2446 // this loop. If they are induction variables, inspect their uses.
2447 SmallPtrSet
<Instruction
*,16> Processed
; // Don't reprocess instructions.
2448 for (BasicBlock::iterator I
= L
->getHeader()->begin(); isa
<PHINode
>(I
); ++I
)
2449 AddUsersIfInteresting(I
, L
, Processed
);
2451 if (!IVUsesByStride
.empty()) {
2453 DOUT
<< "\nLSR on \"" << L
->getHeader()->getParent()->getNameStart()
2458 // Sort the StrideOrder so we process larger strides first.
2459 std::stable_sort(StrideOrder
.begin(), StrideOrder
.end(), StrideCompare(SE
));
2461 // Optimize induction variables. Some indvar uses can be transformed to use
2462 // strides that will be needed for other purposes. A common example of this
2463 // is the exit test for the loop, which can often be rewritten to use the
2464 // computation of some other indvar to decide when to terminate the loop.
2467 // FIXME: We can widen subreg IV's here for RISC targets. e.g. instead of
2468 // doing computation in byte values, promote to 32-bit values if safe.
2470 // FIXME: Attempt to reuse values across multiple IV's. In particular, we
2471 // could have something like "for(i) { foo(i*8); bar(i*16) }", which should
2472 // be codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC.
2473 // Need to be careful that IV's are all the same type. Only works for
2474 // intptr_t indvars.
2476 // IVsByStride keeps IVs for one particular loop.
2477 assert(IVsByStride
.empty() && "Stale entries in IVsByStride?");
2479 // Note: this processes each stride/type pair individually. All users
2480 // passed into StrengthReduceStridedIVUsers have the same type AND stride.
2481 // Also, note that we iterate over IVUsesByStride indirectly by using
2482 // StrideOrder. This extra layer of indirection makes the ordering of
2483 // strides deterministic - not dependent on map order.
2484 for (unsigned Stride
= 0, e
= StrideOrder
.size(); Stride
!= e
; ++Stride
) {
2485 std::map
<SCEVHandle
, IVUsersOfOneStride
>::iterator SI
=
2486 IVUsesByStride
.find(StrideOrder
[Stride
]);
2487 assert(SI
!= IVUsesByStride
.end() && "Stride doesn't exist!");
2488 StrengthReduceStridedIVUsers(SI
->first
, SI
->second
, L
);
2492 // We're done analyzing this loop; release all the state we built up for it.
2493 IVUsesByStride
.clear();
2494 IVsByStride
.clear();
2495 StrideOrder
.clear();
2497 // Clean up after ourselves
2498 if (!DeadInsts
.empty())
2499 DeleteTriviallyDeadInstructions();
2501 // At this point, it is worth checking to see if any recurrence PHIs are also
2502 // dead, so that we can remove them as well.
2503 DeleteDeadPHIs(L
->getHeader());