1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This transformation analyzes and transforms the induction variables (and
11 // computations derived from them) into forms suitable for efficient execution
14 // This pass performs a strength reduction on array references inside loops that
15 // have as one or more of their components the loop induction variable, it
16 // rewrites expressions to take advantage of scaled-index addressing modes
17 // available on the target, and it performs a variety of other optimizations
18 // related to loop induction variables.
20 // Terminology note: this code has a lot of handling for "post-increment" or
21 // "post-inc" users. This is not talking about post-increment addressing modes;
22 // it is instead talking about code like this:
24 // %i = phi [ 0, %entry ], [ %i.next, %latch ]
26 // %i.next = add %i, 1
27 // %c = icmp eq %i.next, %n
29 // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however
30 // it's useful to think about these as the same register, with some uses using
31 // the value of the register before the add and some using // it after. In this
32 // example, the icmp is a post-increment user, since it uses %i.next, which is
33 // the value of the induction variable after the increment. The other common
34 // case of post-increment users is users outside the loop.
36 // TODO: More sophistication in the way Formulae are generated and filtered.
38 // TODO: Handle multiple loops at a time.
40 // TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr
41 // instead of a GlobalValue?
43 // TODO: When truncation is free, truncate ICmp users' operands to make it a
44 // smaller encoding (on x86 at least).
46 // TODO: When a negated register is used by an add (such as in a list of
47 // multiple base registers, or as the increment expression in an addrec),
48 // we may not actually need both reg and (-1 * reg) in registers; the
49 // negation can be implemented by using a sub instead of an add. The
50 // lack of support for taking this into consideration when making
51 // register pressure decisions is partly worked around by the "Special"
54 //===----------------------------------------------------------------------===//
56 #define DEBUG_TYPE "loop-reduce"
57 #include "llvm/Transforms/Scalar.h"
58 #include "llvm/Constants.h"
59 #include "llvm/Instructions.h"
60 #include "llvm/IntrinsicInst.h"
61 #include "llvm/DerivedTypes.h"
62 #include "llvm/Analysis/IVUsers.h"
63 #include "llvm/Analysis/Dominators.h"
64 #include "llvm/Analysis/LoopPass.h"
65 #include "llvm/Analysis/ScalarEvolutionExpander.h"
66 #include "llvm/Assembly/Writer.h"
67 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
68 #include "llvm/Transforms/Utils/Local.h"
69 #include "llvm/ADT/SmallBitVector.h"
70 #include "llvm/ADT/SetVector.h"
71 #include "llvm/ADT/DenseSet.h"
72 #include "llvm/Support/Debug.h"
73 #include "llvm/Support/ValueHandle.h"
74 #include "llvm/Support/raw_ostream.h"
75 #include "llvm/Target/TargetLowering.h"
81 /// RegSortData - This class holds data which is used to order reuse candidates.
84 /// UsedByIndices - This represents the set of LSRUse indices which reference
85 /// a particular register.
86 SmallBitVector UsedByIndices
;
90 void print(raw_ostream
&OS
) const;
96 void RegSortData::print(raw_ostream
&OS
) const {
97 OS
<< "[NumUses=" << UsedByIndices
.count() << ']';
100 void RegSortData::dump() const {
101 print(errs()); errs() << '\n';
106 /// RegUseTracker - Map register candidates to information about how they are
108 class RegUseTracker
{
109 typedef DenseMap
<const SCEV
*, RegSortData
> RegUsesTy
;
111 RegUsesTy RegUsesMap
;
112 SmallVector
<const SCEV
*, 16> RegSequence
;
115 void CountRegister(const SCEV
*Reg
, size_t LUIdx
);
116 void DropRegister(const SCEV
*Reg
, size_t LUIdx
);
117 void SwapAndDropUse(size_t LUIdx
, size_t LastLUIdx
);
119 bool isRegUsedByUsesOtherThan(const SCEV
*Reg
, size_t LUIdx
) const;
121 const SmallBitVector
&getUsedByIndices(const SCEV
*Reg
) const;
125 typedef SmallVectorImpl
<const SCEV
*>::iterator iterator
;
126 typedef SmallVectorImpl
<const SCEV
*>::const_iterator const_iterator
;
127 iterator
begin() { return RegSequence
.begin(); }
128 iterator
end() { return RegSequence
.end(); }
129 const_iterator
begin() const { return RegSequence
.begin(); }
130 const_iterator
end() const { return RegSequence
.end(); }
136 RegUseTracker::CountRegister(const SCEV
*Reg
, size_t LUIdx
) {
137 std::pair
<RegUsesTy::iterator
, bool> Pair
=
138 RegUsesMap
.insert(std::make_pair(Reg
, RegSortData()));
139 RegSortData
&RSD
= Pair
.first
->second
;
141 RegSequence
.push_back(Reg
);
142 RSD
.UsedByIndices
.resize(std::max(RSD
.UsedByIndices
.size(), LUIdx
+ 1));
143 RSD
.UsedByIndices
.set(LUIdx
);
147 RegUseTracker::DropRegister(const SCEV
*Reg
, size_t LUIdx
) {
148 RegUsesTy::iterator It
= RegUsesMap
.find(Reg
);
149 assert(It
!= RegUsesMap
.end());
150 RegSortData
&RSD
= It
->second
;
151 assert(RSD
.UsedByIndices
.size() > LUIdx
);
152 RSD
.UsedByIndices
.reset(LUIdx
);
156 RegUseTracker::SwapAndDropUse(size_t LUIdx
, size_t LastLUIdx
) {
157 assert(LUIdx
<= LastLUIdx
);
159 // Update RegUses. The data structure is not optimized for this purpose;
160 // we must iterate through it and update each of the bit vectors.
161 for (RegUsesTy::iterator I
= RegUsesMap
.begin(), E
= RegUsesMap
.end();
163 SmallBitVector
&UsedByIndices
= I
->second
.UsedByIndices
;
164 if (LUIdx
< UsedByIndices
.size())
165 UsedByIndices
[LUIdx
] =
166 LastLUIdx
< UsedByIndices
.size() ? UsedByIndices
[LastLUIdx
] : 0;
167 UsedByIndices
.resize(std::min(UsedByIndices
.size(), LastLUIdx
));
172 RegUseTracker::isRegUsedByUsesOtherThan(const SCEV
*Reg
, size_t LUIdx
) const {
173 RegUsesTy::const_iterator I
= RegUsesMap
.find(Reg
);
174 if (I
== RegUsesMap
.end())
176 const SmallBitVector
&UsedByIndices
= I
->second
.UsedByIndices
;
177 int i
= UsedByIndices
.find_first();
178 if (i
== -1) return false;
179 if ((size_t)i
!= LUIdx
) return true;
180 return UsedByIndices
.find_next(i
) != -1;
183 const SmallBitVector
&RegUseTracker::getUsedByIndices(const SCEV
*Reg
) const {
184 RegUsesTy::const_iterator I
= RegUsesMap
.find(Reg
);
185 assert(I
!= RegUsesMap
.end() && "Unknown register!");
186 return I
->second
.UsedByIndices
;
189 void RegUseTracker::clear() {
196 /// Formula - This class holds information that describes a formula for
197 /// computing satisfying a use. It may include broken-out immediates and scaled
200 /// AM - This is used to represent complex addressing, as well as other kinds
201 /// of interesting uses.
202 TargetLowering::AddrMode AM
;
204 /// BaseRegs - The list of "base" registers for this use. When this is
205 /// non-empty, AM.HasBaseReg should be set to true.
206 SmallVector
<const SCEV
*, 2> BaseRegs
;
208 /// ScaledReg - The 'scaled' register for this use. This should be non-null
209 /// when AM.Scale is not zero.
210 const SCEV
*ScaledReg
;
212 Formula() : ScaledReg(0) {}
214 void InitialMatch(const SCEV
*S
, Loop
*L
, ScalarEvolution
&SE
);
216 unsigned getNumRegs() const;
217 const Type
*getType() const;
219 void DeleteBaseReg(const SCEV
*&S
);
221 bool referencesReg(const SCEV
*S
) const;
222 bool hasRegsUsedByUsesOtherThan(size_t LUIdx
,
223 const RegUseTracker
&RegUses
) const;
225 void print(raw_ostream
&OS
) const;
231 /// DoInitialMatch - Recursion helper for InitialMatch.
232 static void DoInitialMatch(const SCEV
*S
, Loop
*L
,
233 SmallVectorImpl
<const SCEV
*> &Good
,
234 SmallVectorImpl
<const SCEV
*> &Bad
,
235 ScalarEvolution
&SE
) {
236 // Collect expressions which properly dominate the loop header.
237 if (SE
.properlyDominates(S
, L
->getHeader())) {
242 // Look at add operands.
243 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
244 for (SCEVAddExpr::op_iterator I
= Add
->op_begin(), E
= Add
->op_end();
246 DoInitialMatch(*I
, L
, Good
, Bad
, SE
);
250 // Look at addrec operands.
251 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
))
252 if (!AR
->getStart()->isZero()) {
253 DoInitialMatch(AR
->getStart(), L
, Good
, Bad
, SE
);
254 DoInitialMatch(SE
.getAddRecExpr(SE
.getConstant(AR
->getType(), 0),
255 AR
->getStepRecurrence(SE
),
256 // FIXME: AR->getNoWrapFlags()
257 AR
->getLoop(), SCEV::FlagAnyWrap
),
262 // Handle a multiplication by -1 (negation) if it didn't fold.
263 if (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(S
))
264 if (Mul
->getOperand(0)->isAllOnesValue()) {
265 SmallVector
<const SCEV
*, 4> Ops(Mul
->op_begin()+1, Mul
->op_end());
266 const SCEV
*NewMul
= SE
.getMulExpr(Ops
);
268 SmallVector
<const SCEV
*, 4> MyGood
;
269 SmallVector
<const SCEV
*, 4> MyBad
;
270 DoInitialMatch(NewMul
, L
, MyGood
, MyBad
, SE
);
271 const SCEV
*NegOne
= SE
.getSCEV(ConstantInt::getAllOnesValue(
272 SE
.getEffectiveSCEVType(NewMul
->getType())));
273 for (SmallVectorImpl
<const SCEV
*>::const_iterator I
= MyGood
.begin(),
274 E
= MyGood
.end(); I
!= E
; ++I
)
275 Good
.push_back(SE
.getMulExpr(NegOne
, *I
));
276 for (SmallVectorImpl
<const SCEV
*>::const_iterator I
= MyBad
.begin(),
277 E
= MyBad
.end(); I
!= E
; ++I
)
278 Bad
.push_back(SE
.getMulExpr(NegOne
, *I
));
282 // Ok, we can't do anything interesting. Just stuff the whole thing into a
283 // register and hope for the best.
287 /// InitialMatch - Incorporate loop-variant parts of S into this Formula,
288 /// attempting to keep all loop-invariant and loop-computable values in a
289 /// single base register.
290 void Formula::InitialMatch(const SCEV
*S
, Loop
*L
, ScalarEvolution
&SE
) {
291 SmallVector
<const SCEV
*, 4> Good
;
292 SmallVector
<const SCEV
*, 4> Bad
;
293 DoInitialMatch(S
, L
, Good
, Bad
, SE
);
295 const SCEV
*Sum
= SE
.getAddExpr(Good
);
297 BaseRegs
.push_back(Sum
);
298 AM
.HasBaseReg
= true;
301 const SCEV
*Sum
= SE
.getAddExpr(Bad
);
303 BaseRegs
.push_back(Sum
);
304 AM
.HasBaseReg
= true;
308 /// getNumRegs - Return the total number of register operands used by this
309 /// formula. This does not include register uses implied by non-constant
311 unsigned Formula::getNumRegs() const {
312 return !!ScaledReg
+ BaseRegs
.size();
315 /// getType - Return the type of this formula, if it has one, or null
316 /// otherwise. This type is meaningless except for the bit size.
317 const Type
*Formula::getType() const {
318 return !BaseRegs
.empty() ? BaseRegs
.front()->getType() :
319 ScaledReg
? ScaledReg
->getType() :
320 AM
.BaseGV
? AM
.BaseGV
->getType() :
324 /// DeleteBaseReg - Delete the given base reg from the BaseRegs list.
325 void Formula::DeleteBaseReg(const SCEV
*&S
) {
326 if (&S
!= &BaseRegs
.back())
327 std::swap(S
, BaseRegs
.back());
331 /// referencesReg - Test if this formula references the given register.
332 bool Formula::referencesReg(const SCEV
*S
) const {
333 return S
== ScaledReg
||
334 std::find(BaseRegs
.begin(), BaseRegs
.end(), S
) != BaseRegs
.end();
337 /// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers
338 /// which are used by uses other than the use with the given index.
339 bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx
,
340 const RegUseTracker
&RegUses
) const {
342 if (RegUses
.isRegUsedByUsesOtherThan(ScaledReg
, LUIdx
))
344 for (SmallVectorImpl
<const SCEV
*>::const_iterator I
= BaseRegs
.begin(),
345 E
= BaseRegs
.end(); I
!= E
; ++I
)
346 if (RegUses
.isRegUsedByUsesOtherThan(*I
, LUIdx
))
351 void Formula::print(raw_ostream
&OS
) const {
354 if (!First
) OS
<< " + "; else First
= false;
355 WriteAsOperand(OS
, AM
.BaseGV
, /*PrintType=*/false);
357 if (AM
.BaseOffs
!= 0) {
358 if (!First
) OS
<< " + "; else First
= false;
361 for (SmallVectorImpl
<const SCEV
*>::const_iterator I
= BaseRegs
.begin(),
362 E
= BaseRegs
.end(); I
!= E
; ++I
) {
363 if (!First
) OS
<< " + "; else First
= false;
364 OS
<< "reg(" << **I
<< ')';
366 if (AM
.HasBaseReg
&& BaseRegs
.empty()) {
367 if (!First
) OS
<< " + "; else First
= false;
368 OS
<< "**error: HasBaseReg**";
369 } else if (!AM
.HasBaseReg
&& !BaseRegs
.empty()) {
370 if (!First
) OS
<< " + "; else First
= false;
371 OS
<< "**error: !HasBaseReg**";
374 if (!First
) OS
<< " + "; else First
= false;
375 OS
<< AM
.Scale
<< "*reg(";
384 void Formula::dump() const {
385 print(errs()); errs() << '\n';
388 /// isAddRecSExtable - Return true if the given addrec can be sign-extended
389 /// without changing its value.
390 static bool isAddRecSExtable(const SCEVAddRecExpr
*AR
, ScalarEvolution
&SE
) {
392 IntegerType::get(SE
.getContext(), SE
.getTypeSizeInBits(AR
->getType()) + 1);
393 return isa
<SCEVAddRecExpr
>(SE
.getSignExtendExpr(AR
, WideTy
));
396 /// isAddSExtable - Return true if the given add can be sign-extended
397 /// without changing its value.
398 static bool isAddSExtable(const SCEVAddExpr
*A
, ScalarEvolution
&SE
) {
400 IntegerType::get(SE
.getContext(), SE
.getTypeSizeInBits(A
->getType()) + 1);
401 return isa
<SCEVAddExpr
>(SE
.getSignExtendExpr(A
, WideTy
));
404 /// isMulSExtable - Return true if the given mul can be sign-extended
405 /// without changing its value.
406 static bool isMulSExtable(const SCEVMulExpr
*M
, ScalarEvolution
&SE
) {
408 IntegerType::get(SE
.getContext(),
409 SE
.getTypeSizeInBits(M
->getType()) * M
->getNumOperands());
410 return isa
<SCEVMulExpr
>(SE
.getSignExtendExpr(M
, WideTy
));
413 /// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined
414 /// and if the remainder is known to be zero, or null otherwise. If
415 /// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified
416 /// to Y, ignoring that the multiplication may overflow, which is useful when
417 /// the result will be used in a context where the most significant bits are
419 static const SCEV
*getExactSDiv(const SCEV
*LHS
, const SCEV
*RHS
,
421 bool IgnoreSignificantBits
= false) {
422 // Handle the trivial case, which works for any SCEV type.
424 return SE
.getConstant(LHS
->getType(), 1);
426 // Handle a few RHS special cases.
427 const SCEVConstant
*RC
= dyn_cast
<SCEVConstant
>(RHS
);
429 const APInt
&RA
= RC
->getValue()->getValue();
430 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do
432 if (RA
.isAllOnesValue())
433 return SE
.getMulExpr(LHS
, RC
);
434 // Handle x /s 1 as x.
439 // Check for a division of a constant by a constant.
440 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(LHS
)) {
443 const APInt
&LA
= C
->getValue()->getValue();
444 const APInt
&RA
= RC
->getValue()->getValue();
445 if (LA
.srem(RA
) != 0)
447 return SE
.getConstant(LA
.sdiv(RA
));
450 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow.
451 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(LHS
)) {
452 if (IgnoreSignificantBits
|| isAddRecSExtable(AR
, SE
)) {
453 const SCEV
*Step
= getExactSDiv(AR
->getStepRecurrence(SE
), RHS
, SE
,
454 IgnoreSignificantBits
);
456 const SCEV
*Start
= getExactSDiv(AR
->getStart(), RHS
, SE
,
457 IgnoreSignificantBits
);
458 if (!Start
) return 0;
459 // FlagNW is independent of the start value, step direction, and is
460 // preserved with smaller magnitude steps.
461 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
462 return SE
.getAddRecExpr(Start
, Step
, AR
->getLoop(), SCEV::FlagAnyWrap
);
467 // Distribute the sdiv over add operands, if the add doesn't overflow.
468 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(LHS
)) {
469 if (IgnoreSignificantBits
|| isAddSExtable(Add
, SE
)) {
470 SmallVector
<const SCEV
*, 8> Ops
;
471 for (SCEVAddExpr::op_iterator I
= Add
->op_begin(), E
= Add
->op_end();
473 const SCEV
*Op
= getExactSDiv(*I
, RHS
, SE
,
474 IgnoreSignificantBits
);
478 return SE
.getAddExpr(Ops
);
483 // Check for a multiply operand that we can pull RHS out of.
484 if (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(LHS
)) {
485 if (IgnoreSignificantBits
|| isMulSExtable(Mul
, SE
)) {
486 SmallVector
<const SCEV
*, 4> Ops
;
488 for (SCEVMulExpr::op_iterator I
= Mul
->op_begin(), E
= Mul
->op_end();
492 if (const SCEV
*Q
= getExactSDiv(S
, RHS
, SE
,
493 IgnoreSignificantBits
)) {
499 return Found
? SE
.getMulExpr(Ops
) : 0;
504 // Otherwise we don't know.
508 /// ExtractImmediate - If S involves the addition of a constant integer value,
509 /// return that integer value, and mutate S to point to a new SCEV with that
511 static int64_t ExtractImmediate(const SCEV
*&S
, ScalarEvolution
&SE
) {
512 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
)) {
513 if (C
->getValue()->getValue().getMinSignedBits() <= 64) {
514 S
= SE
.getConstant(C
->getType(), 0);
515 return C
->getValue()->getSExtValue();
517 } else if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
518 SmallVector
<const SCEV
*, 8> NewOps(Add
->op_begin(), Add
->op_end());
519 int64_t Result
= ExtractImmediate(NewOps
.front(), SE
);
521 S
= SE
.getAddExpr(NewOps
);
523 } else if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
524 SmallVector
<const SCEV
*, 8> NewOps(AR
->op_begin(), AR
->op_end());
525 int64_t Result
= ExtractImmediate(NewOps
.front(), SE
);
527 S
= SE
.getAddRecExpr(NewOps
, AR
->getLoop(),
528 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
535 /// ExtractSymbol - If S involves the addition of a GlobalValue address,
536 /// return that symbol, and mutate S to point to a new SCEV with that
538 static GlobalValue
*ExtractSymbol(const SCEV
*&S
, ScalarEvolution
&SE
) {
539 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
540 if (GlobalValue
*GV
= dyn_cast
<GlobalValue
>(U
->getValue())) {
541 S
= SE
.getConstant(GV
->getType(), 0);
544 } else if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
545 SmallVector
<const SCEV
*, 8> NewOps(Add
->op_begin(), Add
->op_end());
546 GlobalValue
*Result
= ExtractSymbol(NewOps
.back(), SE
);
548 S
= SE
.getAddExpr(NewOps
);
550 } else if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
551 SmallVector
<const SCEV
*, 8> NewOps(AR
->op_begin(), AR
->op_end());
552 GlobalValue
*Result
= ExtractSymbol(NewOps
.front(), SE
);
554 S
= SE
.getAddRecExpr(NewOps
, AR
->getLoop(),
555 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
562 /// isAddressUse - Returns true if the specified instruction is using the
563 /// specified value as an address.
564 static bool isAddressUse(Instruction
*Inst
, Value
*OperandVal
) {
565 bool isAddress
= isa
<LoadInst
>(Inst
);
566 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
567 if (SI
->getOperand(1) == OperandVal
)
569 } else if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
)) {
570 // Addressing modes can also be folded into prefetches and a variety
572 switch (II
->getIntrinsicID()) {
574 case Intrinsic::prefetch
:
575 case Intrinsic::x86_sse_storeu_ps
:
576 case Intrinsic::x86_sse2_storeu_pd
:
577 case Intrinsic::x86_sse2_storeu_dq
:
578 case Intrinsic::x86_sse2_storel_dq
:
579 if (II
->getArgOperand(0) == OperandVal
)
587 /// getAccessType - Return the type of the memory being accessed.
588 static const Type
*getAccessType(const Instruction
*Inst
) {
589 const Type
*AccessTy
= Inst
->getType();
590 if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
))
591 AccessTy
= SI
->getOperand(0)->getType();
592 else if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
)) {
593 // Addressing modes can also be folded into prefetches and a variety
595 switch (II
->getIntrinsicID()) {
597 case Intrinsic::x86_sse_storeu_ps
:
598 case Intrinsic::x86_sse2_storeu_pd
:
599 case Intrinsic::x86_sse2_storeu_dq
:
600 case Intrinsic::x86_sse2_storel_dq
:
601 AccessTy
= II
->getArgOperand(0)->getType();
606 // All pointers have the same requirements, so canonicalize them to an
607 // arbitrary pointer type to minimize variation.
608 if (const PointerType
*PTy
= dyn_cast
<PointerType
>(AccessTy
))
609 AccessTy
= PointerType::get(IntegerType::get(PTy
->getContext(), 1),
610 PTy
->getAddressSpace());
615 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
616 /// specified set are trivially dead, delete them and see if this makes any of
617 /// their operands subsequently dead.
619 DeleteTriviallyDeadInstructions(SmallVectorImpl
<WeakVH
> &DeadInsts
) {
620 bool Changed
= false;
622 while (!DeadInsts
.empty()) {
623 Instruction
*I
= dyn_cast_or_null
<Instruction
>(&*DeadInsts
.pop_back_val());
625 if (I
== 0 || !isInstructionTriviallyDead(I
))
628 for (User::op_iterator OI
= I
->op_begin(), E
= I
->op_end(); OI
!= E
; ++OI
)
629 if (Instruction
*U
= dyn_cast
<Instruction
>(*OI
)) {
632 DeadInsts
.push_back(U
);
635 I
->eraseFromParent();
644 /// Cost - This class is used to measure and compare candidate formulae.
646 /// TODO: Some of these could be merged. Also, a lexical ordering
647 /// isn't always optimal.
651 unsigned NumBaseAdds
;
657 : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0),
660 bool operator<(const Cost
&Other
) const;
664 void RateFormula(const Formula
&F
,
665 SmallPtrSet
<const SCEV
*, 16> &Regs
,
666 const DenseSet
<const SCEV
*> &VisitedRegs
,
668 const SmallVectorImpl
<int64_t> &Offsets
,
669 ScalarEvolution
&SE
, DominatorTree
&DT
);
671 void print(raw_ostream
&OS
) const;
675 void RateRegister(const SCEV
*Reg
,
676 SmallPtrSet
<const SCEV
*, 16> &Regs
,
678 ScalarEvolution
&SE
, DominatorTree
&DT
);
679 void RatePrimaryRegister(const SCEV
*Reg
,
680 SmallPtrSet
<const SCEV
*, 16> &Regs
,
682 ScalarEvolution
&SE
, DominatorTree
&DT
);
687 /// RateRegister - Tally up interesting quantities from the given register.
688 void Cost::RateRegister(const SCEV
*Reg
,
689 SmallPtrSet
<const SCEV
*, 16> &Regs
,
691 ScalarEvolution
&SE
, DominatorTree
&DT
) {
692 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Reg
)) {
693 if (AR
->getLoop() == L
)
694 AddRecCost
+= 1; /// TODO: This should be a function of the stride.
696 // If this is an addrec for a loop that's already been visited by LSR,
697 // don't second-guess its addrec phi nodes. LSR isn't currently smart
698 // enough to reason about more than one loop at a time. Consider these
699 // registers free and leave them alone.
700 else if (L
->contains(AR
->getLoop()) ||
701 (!AR
->getLoop()->contains(L
) &&
702 DT
.dominates(L
->getHeader(), AR
->getLoop()->getHeader()))) {
703 for (BasicBlock::iterator I
= AR
->getLoop()->getHeader()->begin();
704 PHINode
*PN
= dyn_cast
<PHINode
>(I
); ++I
)
705 if (SE
.isSCEVable(PN
->getType()) &&
706 (SE
.getEffectiveSCEVType(PN
->getType()) ==
707 SE
.getEffectiveSCEVType(AR
->getType())) &&
708 SE
.getSCEV(PN
) == AR
)
711 // If this isn't one of the addrecs that the loop already has, it
712 // would require a costly new phi and add. TODO: This isn't
713 // precisely modeled right now.
715 if (!Regs
.count(AR
->getStart()))
716 RateRegister(AR
->getStart(), Regs
, L
, SE
, DT
);
719 // Add the step value register, if it needs one.
720 // TODO: The non-affine case isn't precisely modeled here.
721 if (!AR
->isAffine() || !isa
<SCEVConstant
>(AR
->getOperand(1)))
722 if (!Regs
.count(AR
->getStart()))
723 RateRegister(AR
->getOperand(1), Regs
, L
, SE
, DT
);
727 // Rough heuristic; favor registers which don't require extra setup
728 // instructions in the preheader.
729 if (!isa
<SCEVUnknown
>(Reg
) &&
730 !isa
<SCEVConstant
>(Reg
) &&
731 !(isa
<SCEVAddRecExpr
>(Reg
) &&
732 (isa
<SCEVUnknown
>(cast
<SCEVAddRecExpr
>(Reg
)->getStart()) ||
733 isa
<SCEVConstant
>(cast
<SCEVAddRecExpr
>(Reg
)->getStart()))))
736 NumIVMuls
+= isa
<SCEVMulExpr
>(Reg
) &&
737 SE
.hasComputableLoopEvolution(Reg
, L
);
740 /// RatePrimaryRegister - Record this register in the set. If we haven't seen it
742 void Cost::RatePrimaryRegister(const SCEV
*Reg
,
743 SmallPtrSet
<const SCEV
*, 16> &Regs
,
745 ScalarEvolution
&SE
, DominatorTree
&DT
) {
746 if (Regs
.insert(Reg
))
747 RateRegister(Reg
, Regs
, L
, SE
, DT
);
750 void Cost::RateFormula(const Formula
&F
,
751 SmallPtrSet
<const SCEV
*, 16> &Regs
,
752 const DenseSet
<const SCEV
*> &VisitedRegs
,
754 const SmallVectorImpl
<int64_t> &Offsets
,
755 ScalarEvolution
&SE
, DominatorTree
&DT
) {
756 // Tally up the registers.
757 if (const SCEV
*ScaledReg
= F
.ScaledReg
) {
758 if (VisitedRegs
.count(ScaledReg
)) {
762 RatePrimaryRegister(ScaledReg
, Regs
, L
, SE
, DT
);
764 for (SmallVectorImpl
<const SCEV
*>::const_iterator I
= F
.BaseRegs
.begin(),
765 E
= F
.BaseRegs
.end(); I
!= E
; ++I
) {
766 const SCEV
*BaseReg
= *I
;
767 if (VisitedRegs
.count(BaseReg
)) {
771 RatePrimaryRegister(BaseReg
, Regs
, L
, SE
, DT
);
774 if (F
.BaseRegs
.size() > 1)
775 NumBaseAdds
+= F
.BaseRegs
.size() - 1;
777 // Tally up the non-zero immediates.
778 for (SmallVectorImpl
<int64_t>::const_iterator I
= Offsets
.begin(),
779 E
= Offsets
.end(); I
!= E
; ++I
) {
780 int64_t Offset
= (uint64_t)*I
+ F
.AM
.BaseOffs
;
782 ImmCost
+= 64; // Handle symbolic values conservatively.
783 // TODO: This should probably be the pointer size.
784 else if (Offset
!= 0)
785 ImmCost
+= APInt(64, Offset
, true).getMinSignedBits();
789 /// Loose - Set this cost to a loosing value.
799 /// operator< - Choose the lower cost.
800 bool Cost::operator<(const Cost
&Other
) const {
801 if (NumRegs
!= Other
.NumRegs
)
802 return NumRegs
< Other
.NumRegs
;
803 if (AddRecCost
!= Other
.AddRecCost
)
804 return AddRecCost
< Other
.AddRecCost
;
805 if (NumIVMuls
!= Other
.NumIVMuls
)
806 return NumIVMuls
< Other
.NumIVMuls
;
807 if (NumBaseAdds
!= Other
.NumBaseAdds
)
808 return NumBaseAdds
< Other
.NumBaseAdds
;
809 if (ImmCost
!= Other
.ImmCost
)
810 return ImmCost
< Other
.ImmCost
;
811 if (SetupCost
!= Other
.SetupCost
)
812 return SetupCost
< Other
.SetupCost
;
816 void Cost::print(raw_ostream
&OS
) const {
817 OS
<< NumRegs
<< " reg" << (NumRegs
== 1 ? "" : "s");
819 OS
<< ", with addrec cost " << AddRecCost
;
821 OS
<< ", plus " << NumIVMuls
<< " IV mul" << (NumIVMuls
== 1 ? "" : "s");
822 if (NumBaseAdds
!= 0)
823 OS
<< ", plus " << NumBaseAdds
<< " base add"
824 << (NumBaseAdds
== 1 ? "" : "s");
826 OS
<< ", plus " << ImmCost
<< " imm cost";
828 OS
<< ", plus " << SetupCost
<< " setup cost";
831 void Cost::dump() const {
832 print(errs()); errs() << '\n';
837 /// LSRFixup - An operand value in an instruction which is to be replaced
838 /// with some equivalent, possibly strength-reduced, replacement.
840 /// UserInst - The instruction which will be updated.
841 Instruction
*UserInst
;
843 /// OperandValToReplace - The operand of the instruction which will
844 /// be replaced. The operand may be used more than once; every instance
845 /// will be replaced.
846 Value
*OperandValToReplace
;
848 /// PostIncLoops - If this user is to use the post-incremented value of an
849 /// induction variable, this variable is non-null and holds the loop
850 /// associated with the induction variable.
851 PostIncLoopSet PostIncLoops
;
853 /// LUIdx - The index of the LSRUse describing the expression which
854 /// this fixup needs, minus an offset (below).
857 /// Offset - A constant offset to be added to the LSRUse expression.
858 /// This allows multiple fixups to share the same LSRUse with different
859 /// offsets, for example in an unrolled loop.
862 bool isUseFullyOutsideLoop(const Loop
*L
) const;
866 void print(raw_ostream
&OS
) const;
873 : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {}
875 /// isUseFullyOutsideLoop - Test whether this fixup always uses its
876 /// value outside of the given loop.
877 bool LSRFixup::isUseFullyOutsideLoop(const Loop
*L
) const {
878 // PHI nodes use their value in their incoming blocks.
879 if (const PHINode
*PN
= dyn_cast
<PHINode
>(UserInst
)) {
880 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
881 if (PN
->getIncomingValue(i
) == OperandValToReplace
&&
882 L
->contains(PN
->getIncomingBlock(i
)))
887 return !L
->contains(UserInst
);
890 void LSRFixup::print(raw_ostream
&OS
) const {
892 // Store is common and interesting enough to be worth special-casing.
893 if (StoreInst
*Store
= dyn_cast
<StoreInst
>(UserInst
)) {
895 WriteAsOperand(OS
, Store
->getOperand(0), /*PrintType=*/false);
896 } else if (UserInst
->getType()->isVoidTy())
897 OS
<< UserInst
->getOpcodeName();
899 WriteAsOperand(OS
, UserInst
, /*PrintType=*/false);
901 OS
<< ", OperandValToReplace=";
902 WriteAsOperand(OS
, OperandValToReplace
, /*PrintType=*/false);
904 for (PostIncLoopSet::const_iterator I
= PostIncLoops
.begin(),
905 E
= PostIncLoops
.end(); I
!= E
; ++I
) {
906 OS
<< ", PostIncLoop=";
907 WriteAsOperand(OS
, (*I
)->getHeader(), /*PrintType=*/false);
910 if (LUIdx
!= ~size_t(0))
911 OS
<< ", LUIdx=" << LUIdx
;
914 OS
<< ", Offset=" << Offset
;
917 void LSRFixup::dump() const {
918 print(errs()); errs() << '\n';
923 /// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding
924 /// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*.
925 struct UniquifierDenseMapInfo
{
926 static SmallVector
<const SCEV
*, 2> getEmptyKey() {
927 SmallVector
<const SCEV
*, 2> V
;
928 V
.push_back(reinterpret_cast<const SCEV
*>(-1));
932 static SmallVector
<const SCEV
*, 2> getTombstoneKey() {
933 SmallVector
<const SCEV
*, 2> V
;
934 V
.push_back(reinterpret_cast<const SCEV
*>(-2));
938 static unsigned getHashValue(const SmallVector
<const SCEV
*, 2> &V
) {
940 for (SmallVectorImpl
<const SCEV
*>::const_iterator I
= V
.begin(),
941 E
= V
.end(); I
!= E
; ++I
)
942 Result
^= DenseMapInfo
<const SCEV
*>::getHashValue(*I
);
946 static bool isEqual(const SmallVector
<const SCEV
*, 2> &LHS
,
947 const SmallVector
<const SCEV
*, 2> &RHS
) {
952 /// LSRUse - This class holds the state that LSR keeps for each use in
953 /// IVUsers, as well as uses invented by LSR itself. It includes information
954 /// about what kinds of things can be folded into the user, information about
955 /// the user itself, and information about how the use may be satisfied.
956 /// TODO: Represent multiple users of the same expression in common?
958 DenseSet
<SmallVector
<const SCEV
*, 2>, UniquifierDenseMapInfo
> Uniquifier
;
961 /// KindType - An enum for a kind of use, indicating what types of
962 /// scaled and immediate operands it might support.
964 Basic
, ///< A normal use, with no folding.
965 Special
, ///< A special case of basic, allowing -1 scales.
966 Address
, ///< An address use; folding according to TargetLowering
967 ICmpZero
///< An equality icmp with both operands folded into one.
968 // TODO: Add a generic icmp too?
972 const Type
*AccessTy
;
974 SmallVector
<int64_t, 8> Offsets
;
978 /// AllFixupsOutsideLoop - This records whether all of the fixups using this
979 /// LSRUse are outside of the loop, in which case some special-case heuristics
981 bool AllFixupsOutsideLoop
;
983 /// WidestFixupType - This records the widest use type for any fixup using
984 /// this LSRUse. FindUseWithSimilarFormula can't consider uses with different
985 /// max fixup widths to be equivalent, because the narrower one may be relying
986 /// on the implicit truncation to truncate away bogus bits.
987 const Type
*WidestFixupType
;
989 /// Formulae - A list of ways to build a value that can satisfy this user.
990 /// After the list is populated, one of these is selected heuristically and
991 /// used to formulate a replacement for OperandValToReplace in UserInst.
992 SmallVector
<Formula
, 12> Formulae
;
994 /// Regs - The set of register candidates used by all formulae in this LSRUse.
995 SmallPtrSet
<const SCEV
*, 4> Regs
;
997 LSRUse(KindType K
, const Type
*T
) : Kind(K
), AccessTy(T
),
998 MinOffset(INT64_MAX
),
999 MaxOffset(INT64_MIN
),
1000 AllFixupsOutsideLoop(true),
1001 WidestFixupType(0) {}
1003 bool HasFormulaWithSameRegs(const Formula
&F
) const;
1004 bool InsertFormula(const Formula
&F
);
1005 void DeleteFormula(Formula
&F
);
1006 void RecomputeRegs(size_t LUIdx
, RegUseTracker
&Reguses
);
1008 void print(raw_ostream
&OS
) const;
1014 /// HasFormula - Test whether this use as a formula which has the same
1015 /// registers as the given formula.
1016 bool LSRUse::HasFormulaWithSameRegs(const Formula
&F
) const {
1017 SmallVector
<const SCEV
*, 2> Key
= F
.BaseRegs
;
1018 if (F
.ScaledReg
) Key
.push_back(F
.ScaledReg
);
1019 // Unstable sort by host order ok, because this is only used for uniquifying.
1020 std::sort(Key
.begin(), Key
.end());
1021 return Uniquifier
.count(Key
);
1024 /// InsertFormula - If the given formula has not yet been inserted, add it to
1025 /// the list, and return true. Return false otherwise.
1026 bool LSRUse::InsertFormula(const Formula
&F
) {
1027 SmallVector
<const SCEV
*, 2> Key
= F
.BaseRegs
;
1028 if (F
.ScaledReg
) Key
.push_back(F
.ScaledReg
);
1029 // Unstable sort by host order ok, because this is only used for uniquifying.
1030 std::sort(Key
.begin(), Key
.end());
1032 if (!Uniquifier
.insert(Key
).second
)
1035 // Using a register to hold the value of 0 is not profitable.
1036 assert((!F
.ScaledReg
|| !F
.ScaledReg
->isZero()) &&
1037 "Zero allocated in a scaled register!");
1039 for (SmallVectorImpl
<const SCEV
*>::const_iterator I
=
1040 F
.BaseRegs
.begin(), E
= F
.BaseRegs
.end(); I
!= E
; ++I
)
1041 assert(!(*I
)->isZero() && "Zero allocated in a base register!");
1044 // Add the formula to the list.
1045 Formulae
.push_back(F
);
1047 // Record registers now being used by this use.
1048 if (F
.ScaledReg
) Regs
.insert(F
.ScaledReg
);
1049 Regs
.insert(F
.BaseRegs
.begin(), F
.BaseRegs
.end());
1054 /// DeleteFormula - Remove the given formula from this use's list.
1055 void LSRUse::DeleteFormula(Formula
&F
) {
1056 if (&F
!= &Formulae
.back())
1057 std::swap(F
, Formulae
.back());
1058 Formulae
.pop_back();
1059 assert(!Formulae
.empty() && "LSRUse has no formulae left!");
1062 /// RecomputeRegs - Recompute the Regs field, and update RegUses.
1063 void LSRUse::RecomputeRegs(size_t LUIdx
, RegUseTracker
&RegUses
) {
1064 // Now that we've filtered out some formulae, recompute the Regs set.
1065 SmallPtrSet
<const SCEV
*, 4> OldRegs
= Regs
;
1067 for (SmallVectorImpl
<Formula
>::const_iterator I
= Formulae
.begin(),
1068 E
= Formulae
.end(); I
!= E
; ++I
) {
1069 const Formula
&F
= *I
;
1070 if (F
.ScaledReg
) Regs
.insert(F
.ScaledReg
);
1071 Regs
.insert(F
.BaseRegs
.begin(), F
.BaseRegs
.end());
1074 // Update the RegTracker.
1075 for (SmallPtrSet
<const SCEV
*, 4>::iterator I
= OldRegs
.begin(),
1076 E
= OldRegs
.end(); I
!= E
; ++I
)
1077 if (!Regs
.count(*I
))
1078 RegUses
.DropRegister(*I
, LUIdx
);
1081 void LSRUse::print(raw_ostream
&OS
) const {
1082 OS
<< "LSR Use: Kind=";
1084 case Basic
: OS
<< "Basic"; break;
1085 case Special
: OS
<< "Special"; break;
1086 case ICmpZero
: OS
<< "ICmpZero"; break;
1088 OS
<< "Address of ";
1089 if (AccessTy
->isPointerTy())
1090 OS
<< "pointer"; // the full pointer type could be really verbose
1095 OS
<< ", Offsets={";
1096 for (SmallVectorImpl
<int64_t>::const_iterator I
= Offsets
.begin(),
1097 E
= Offsets
.end(); I
!= E
; ++I
) {
1099 if (llvm::next(I
) != E
)
1104 if (AllFixupsOutsideLoop
)
1105 OS
<< ", all-fixups-outside-loop";
1107 if (WidestFixupType
)
1108 OS
<< ", widest fixup type: " << *WidestFixupType
;
1111 void LSRUse::dump() const {
1112 print(errs()); errs() << '\n';
1115 /// isLegalUse - Test whether the use described by AM is "legal", meaning it can
1116 /// be completely folded into the user instruction at isel time. This includes
1117 /// address-mode folding and special icmp tricks.
1118 static bool isLegalUse(const TargetLowering::AddrMode
&AM
,
1119 LSRUse::KindType Kind
, const Type
*AccessTy
,
1120 const TargetLowering
*TLI
) {
1122 case LSRUse::Address
:
1123 // If we have low-level target information, ask the target if it can
1124 // completely fold this address.
1125 if (TLI
) return TLI
->isLegalAddressingMode(AM
, AccessTy
);
1127 // Otherwise, just guess that reg+reg addressing is legal.
1128 return !AM
.BaseGV
&& AM
.BaseOffs
== 0 && AM
.Scale
<= 1;
1130 case LSRUse::ICmpZero
:
1131 // There's not even a target hook for querying whether it would be legal to
1132 // fold a GV into an ICmp.
1136 // ICmp only has two operands; don't allow more than two non-trivial parts.
1137 if (AM
.Scale
!= 0 && AM
.HasBaseReg
&& AM
.BaseOffs
!= 0)
1140 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by
1141 // putting the scaled register in the other operand of the icmp.
1142 if (AM
.Scale
!= 0 && AM
.Scale
!= -1)
1145 // If we have low-level target information, ask the target if it can fold an
1146 // integer immediate on an icmp.
1147 if (AM
.BaseOffs
!= 0) {
1148 if (TLI
) return TLI
->isLegalICmpImmediate(-AM
.BaseOffs
);
1155 // Only handle single-register values.
1156 return !AM
.BaseGV
&& AM
.Scale
== 0 && AM
.BaseOffs
== 0;
1158 case LSRUse::Special
:
1159 // Only handle -1 scales, or no scale.
1160 return AM
.Scale
== 0 || AM
.Scale
== -1;
1166 static bool isLegalUse(TargetLowering::AddrMode AM
,
1167 int64_t MinOffset
, int64_t MaxOffset
,
1168 LSRUse::KindType Kind
, const Type
*AccessTy
,
1169 const TargetLowering
*TLI
) {
1170 // Check for overflow.
1171 if (((int64_t)((uint64_t)AM
.BaseOffs
+ MinOffset
) > AM
.BaseOffs
) !=
1174 AM
.BaseOffs
= (uint64_t)AM
.BaseOffs
+ MinOffset
;
1175 if (isLegalUse(AM
, Kind
, AccessTy
, TLI
)) {
1176 AM
.BaseOffs
= (uint64_t)AM
.BaseOffs
- MinOffset
;
1177 // Check for overflow.
1178 if (((int64_t)((uint64_t)AM
.BaseOffs
+ MaxOffset
) > AM
.BaseOffs
) !=
1181 AM
.BaseOffs
= (uint64_t)AM
.BaseOffs
+ MaxOffset
;
1182 return isLegalUse(AM
, Kind
, AccessTy
, TLI
);
1187 static bool isAlwaysFoldable(int64_t BaseOffs
,
1188 GlobalValue
*BaseGV
,
1190 LSRUse::KindType Kind
, const Type
*AccessTy
,
1191 const TargetLowering
*TLI
) {
1192 // Fast-path: zero is always foldable.
1193 if (BaseOffs
== 0 && !BaseGV
) return true;
1195 // Conservatively, create an address with an immediate and a
1196 // base and a scale.
1197 TargetLowering::AddrMode AM
;
1198 AM
.BaseOffs
= BaseOffs
;
1200 AM
.HasBaseReg
= HasBaseReg
;
1201 AM
.Scale
= Kind
== LSRUse::ICmpZero
? -1 : 1;
1203 // Canonicalize a scale of 1 to a base register if the formula doesn't
1204 // already have a base register.
1205 if (!AM
.HasBaseReg
&& AM
.Scale
== 1) {
1207 AM
.HasBaseReg
= true;
1210 return isLegalUse(AM
, Kind
, AccessTy
, TLI
);
1213 static bool isAlwaysFoldable(const SCEV
*S
,
1214 int64_t MinOffset
, int64_t MaxOffset
,
1216 LSRUse::KindType Kind
, const Type
*AccessTy
,
1217 const TargetLowering
*TLI
,
1218 ScalarEvolution
&SE
) {
1219 // Fast-path: zero is always foldable.
1220 if (S
->isZero()) return true;
1222 // Conservatively, create an address with an immediate and a
1223 // base and a scale.
1224 int64_t BaseOffs
= ExtractImmediate(S
, SE
);
1225 GlobalValue
*BaseGV
= ExtractSymbol(S
, SE
);
1227 // If there's anything else involved, it's not foldable.
1228 if (!S
->isZero()) return false;
1230 // Fast-path: zero is always foldable.
1231 if (BaseOffs
== 0 && !BaseGV
) return true;
1233 // Conservatively, create an address with an immediate and a
1234 // base and a scale.
1235 TargetLowering::AddrMode AM
;
1236 AM
.BaseOffs
= BaseOffs
;
1238 AM
.HasBaseReg
= HasBaseReg
;
1239 AM
.Scale
= Kind
== LSRUse::ICmpZero
? -1 : 1;
1241 return isLegalUse(AM
, MinOffset
, MaxOffset
, Kind
, AccessTy
, TLI
);
1246 /// UseMapDenseMapInfo - A DenseMapInfo implementation for holding
1247 /// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind.
1248 struct UseMapDenseMapInfo
{
1249 static std::pair
<const SCEV
*, LSRUse::KindType
> getEmptyKey() {
1250 return std::make_pair(reinterpret_cast<const SCEV
*>(-1), LSRUse::Basic
);
1253 static std::pair
<const SCEV
*, LSRUse::KindType
> getTombstoneKey() {
1254 return std::make_pair(reinterpret_cast<const SCEV
*>(-2), LSRUse::Basic
);
1258 getHashValue(const std::pair
<const SCEV
*, LSRUse::KindType
> &V
) {
1259 unsigned Result
= DenseMapInfo
<const SCEV
*>::getHashValue(V
.first
);
1260 Result
^= DenseMapInfo
<unsigned>::getHashValue(unsigned(V
.second
));
1264 static bool isEqual(const std::pair
<const SCEV
*, LSRUse::KindType
> &LHS
,
1265 const std::pair
<const SCEV
*, LSRUse::KindType
> &RHS
) {
1270 /// LSRInstance - This class holds state for the main loop strength reduction
1274 ScalarEvolution
&SE
;
1277 const TargetLowering
*const TLI
;
1281 /// IVIncInsertPos - This is the insert position that the current loop's
1282 /// induction variable increment should be placed. In simple loops, this is
1283 /// the latch block's terminator. But in more complicated cases, this is a
1284 /// position which will dominate all the in-loop post-increment users.
1285 Instruction
*IVIncInsertPos
;
1287 /// Factors - Interesting factors between use strides.
1288 SmallSetVector
<int64_t, 8> Factors
;
1290 /// Types - Interesting use types, to facilitate truncation reuse.
1291 SmallSetVector
<const Type
*, 4> Types
;
1293 /// Fixups - The list of operands which are to be replaced.
1294 SmallVector
<LSRFixup
, 16> Fixups
;
1296 /// Uses - The list of interesting uses.
1297 SmallVector
<LSRUse
, 16> Uses
;
1299 /// RegUses - Track which uses use which register candidates.
1300 RegUseTracker RegUses
;
1302 void OptimizeShadowIV();
1303 bool FindIVUserForCond(ICmpInst
*Cond
, IVStrideUse
*&CondUse
);
1304 ICmpInst
*OptimizeMax(ICmpInst
*Cond
, IVStrideUse
* &CondUse
);
1305 void OptimizeLoopTermCond();
1307 void CollectInterestingTypesAndFactors();
1308 void CollectFixupsAndInitialFormulae();
1310 LSRFixup
&getNewFixup() {
1311 Fixups
.push_back(LSRFixup());
1312 return Fixups
.back();
1315 // Support for sharing of LSRUses between LSRFixups.
1316 typedef DenseMap
<std::pair
<const SCEV
*, LSRUse::KindType
>,
1318 UseMapDenseMapInfo
> UseMapTy
;
1321 bool reconcileNewOffset(LSRUse
&LU
, int64_t NewOffset
, bool HasBaseReg
,
1322 LSRUse::KindType Kind
, const Type
*AccessTy
);
1324 std::pair
<size_t, int64_t> getUse(const SCEV
*&Expr
,
1325 LSRUse::KindType Kind
,
1326 const Type
*AccessTy
);
1328 void DeleteUse(LSRUse
&LU
, size_t LUIdx
);
1330 LSRUse
*FindUseWithSimilarFormula(const Formula
&F
, const LSRUse
&OrigLU
);
1333 void InsertInitialFormula(const SCEV
*S
, LSRUse
&LU
, size_t LUIdx
);
1334 void InsertSupplementalFormula(const SCEV
*S
, LSRUse
&LU
, size_t LUIdx
);
1335 void CountRegisters(const Formula
&F
, size_t LUIdx
);
1336 bool InsertFormula(LSRUse
&LU
, unsigned LUIdx
, const Formula
&F
);
1338 void CollectLoopInvariantFixupsAndFormulae();
1340 void GenerateReassociations(LSRUse
&LU
, unsigned LUIdx
, Formula Base
,
1341 unsigned Depth
= 0);
1342 void GenerateCombinations(LSRUse
&LU
, unsigned LUIdx
, Formula Base
);
1343 void GenerateSymbolicOffsets(LSRUse
&LU
, unsigned LUIdx
, Formula Base
);
1344 void GenerateConstantOffsets(LSRUse
&LU
, unsigned LUIdx
, Formula Base
);
1345 void GenerateICmpZeroScales(LSRUse
&LU
, unsigned LUIdx
, Formula Base
);
1346 void GenerateScales(LSRUse
&LU
, unsigned LUIdx
, Formula Base
);
1347 void GenerateTruncates(LSRUse
&LU
, unsigned LUIdx
, Formula Base
);
1348 void GenerateCrossUseConstantOffsets();
1349 void GenerateAllReuseFormulae();
1351 void FilterOutUndesirableDedicatedRegisters();
1353 size_t EstimateSearchSpaceComplexity() const;
1354 void NarrowSearchSpaceByDetectingSupersets();
1355 void NarrowSearchSpaceByCollapsingUnrolledCode();
1356 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
1357 void NarrowSearchSpaceByPickingWinnerRegs();
1358 void NarrowSearchSpaceUsingHeuristics();
1360 void SolveRecurse(SmallVectorImpl
<const Formula
*> &Solution
,
1362 SmallVectorImpl
<const Formula
*> &Workspace
,
1363 const Cost
&CurCost
,
1364 const SmallPtrSet
<const SCEV
*, 16> &CurRegs
,
1365 DenseSet
<const SCEV
*> &VisitedRegs
) const;
1366 void Solve(SmallVectorImpl
<const Formula
*> &Solution
) const;
1368 BasicBlock::iterator
1369 HoistInsertPosition(BasicBlock::iterator IP
,
1370 const SmallVectorImpl
<Instruction
*> &Inputs
) const;
1371 BasicBlock::iterator
AdjustInsertPositionForExpand(BasicBlock::iterator IP
,
1373 const LSRUse
&LU
) const;
1375 Value
*Expand(const LSRFixup
&LF
,
1377 BasicBlock::iterator IP
,
1378 SCEVExpander
&Rewriter
,
1379 SmallVectorImpl
<WeakVH
> &DeadInsts
) const;
1380 void RewriteForPHI(PHINode
*PN
, const LSRFixup
&LF
,
1382 SCEVExpander
&Rewriter
,
1383 SmallVectorImpl
<WeakVH
> &DeadInsts
,
1385 void Rewrite(const LSRFixup
&LF
,
1387 SCEVExpander
&Rewriter
,
1388 SmallVectorImpl
<WeakVH
> &DeadInsts
,
1390 void ImplementSolution(const SmallVectorImpl
<const Formula
*> &Solution
,
1393 LSRInstance(const TargetLowering
*tli
, Loop
*l
, Pass
*P
);
1395 bool getChanged() const { return Changed
; }
1397 void print_factors_and_types(raw_ostream
&OS
) const;
1398 void print_fixups(raw_ostream
&OS
) const;
1399 void print_uses(raw_ostream
&OS
) const;
1400 void print(raw_ostream
&OS
) const;
1406 /// OptimizeShadowIV - If IV is used in a int-to-float cast
1407 /// inside the loop then try to eliminate the cast operation.
1408 void LSRInstance::OptimizeShadowIV() {
1409 const SCEV
*BackedgeTakenCount
= SE
.getBackedgeTakenCount(L
);
1410 if (isa
<SCEVCouldNotCompute
>(BackedgeTakenCount
))
1413 for (IVUsers::const_iterator UI
= IU
.begin(), E
= IU
.end();
1414 UI
!= E
; /* empty */) {
1415 IVUsers::const_iterator CandidateUI
= UI
;
1417 Instruction
*ShadowUse
= CandidateUI
->getUser();
1418 const Type
*DestTy
= NULL
;
1420 /* If shadow use is a int->float cast then insert a second IV
1421 to eliminate this cast.
1423 for (unsigned i = 0; i < n; ++i)
1429 for (unsigned i = 0; i < n; ++i, ++d)
1432 if (UIToFPInst
*UCast
= dyn_cast
<UIToFPInst
>(CandidateUI
->getUser()))
1433 DestTy
= UCast
->getDestTy();
1434 else if (SIToFPInst
*SCast
= dyn_cast
<SIToFPInst
>(CandidateUI
->getUser()))
1435 DestTy
= SCast
->getDestTy();
1436 if (!DestTy
) continue;
1439 // If target does not support DestTy natively then do not apply
1440 // this transformation.
1441 EVT DVT
= TLI
->getValueType(DestTy
);
1442 if (!TLI
->isTypeLegal(DVT
)) continue;
1445 PHINode
*PH
= dyn_cast
<PHINode
>(ShadowUse
->getOperand(0));
1447 if (PH
->getNumIncomingValues() != 2) continue;
1449 const Type
*SrcTy
= PH
->getType();
1450 int Mantissa
= DestTy
->getFPMantissaWidth();
1451 if (Mantissa
== -1) continue;
1452 if ((int)SE
.getTypeSizeInBits(SrcTy
) > Mantissa
)
1455 unsigned Entry
, Latch
;
1456 if (PH
->getIncomingBlock(0) == L
->getLoopPreheader()) {
1464 ConstantInt
*Init
= dyn_cast
<ConstantInt
>(PH
->getIncomingValue(Entry
));
1465 if (!Init
) continue;
1466 Constant
*NewInit
= ConstantFP::get(DestTy
, Init
->getZExtValue());
1468 BinaryOperator
*Incr
=
1469 dyn_cast
<BinaryOperator
>(PH
->getIncomingValue(Latch
));
1470 if (!Incr
) continue;
1471 if (Incr
->getOpcode() != Instruction::Add
1472 && Incr
->getOpcode() != Instruction::Sub
)
1475 /* Initialize new IV, double d = 0.0 in above example. */
1476 ConstantInt
*C
= NULL
;
1477 if (Incr
->getOperand(0) == PH
)
1478 C
= dyn_cast
<ConstantInt
>(Incr
->getOperand(1));
1479 else if (Incr
->getOperand(1) == PH
)
1480 C
= dyn_cast
<ConstantInt
>(Incr
->getOperand(0));
1486 // Ignore negative constants, as the code below doesn't handle them
1487 // correctly. TODO: Remove this restriction.
1488 if (!C
->getValue().isStrictlyPositive()) continue;
1490 /* Add new PHINode. */
1491 PHINode
*NewPH
= PHINode::Create(DestTy
, 2, "IV.S.", PH
);
1493 /* create new increment. '++d' in above example. */
1494 Constant
*CFP
= ConstantFP::get(DestTy
, C
->getZExtValue());
1495 BinaryOperator
*NewIncr
=
1496 BinaryOperator::Create(Incr
->getOpcode() == Instruction::Add
?
1497 Instruction::FAdd
: Instruction::FSub
,
1498 NewPH
, CFP
, "IV.S.next.", Incr
);
1500 NewPH
->addIncoming(NewInit
, PH
->getIncomingBlock(Entry
));
1501 NewPH
->addIncoming(NewIncr
, PH
->getIncomingBlock(Latch
));
1503 /* Remove cast operation */
1504 ShadowUse
->replaceAllUsesWith(NewPH
);
1505 ShadowUse
->eraseFromParent();
1511 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
1512 /// set the IV user and stride information and return true, otherwise return
1514 bool LSRInstance::FindIVUserForCond(ICmpInst
*Cond
, IVStrideUse
*&CondUse
) {
1515 for (IVUsers::iterator UI
= IU
.begin(), E
= IU
.end(); UI
!= E
; ++UI
)
1516 if (UI
->getUser() == Cond
) {
1517 // NOTE: we could handle setcc instructions with multiple uses here, but
1518 // InstCombine does it as well for simple uses, it's not clear that it
1519 // occurs enough in real life to handle.
1526 /// OptimizeMax - Rewrite the loop's terminating condition if it uses
1527 /// a max computation.
1529 /// This is a narrow solution to a specific, but acute, problem. For loops
1535 /// } while (++i < n);
1537 /// the trip count isn't just 'n', because 'n' might not be positive. And
1538 /// unfortunately this can come up even for loops where the user didn't use
1539 /// a C do-while loop. For example, seemingly well-behaved top-test loops
1540 /// will commonly be lowered like this:
1546 /// } while (++i < n);
1549 /// and then it's possible for subsequent optimization to obscure the if
1550 /// test in such a way that indvars can't find it.
1552 /// When indvars can't find the if test in loops like this, it creates a
1553 /// max expression, which allows it to give the loop a canonical
1554 /// induction variable:
1557 /// max = n < 1 ? 1 : n;
1560 /// } while (++i != max);
1562 /// Canonical induction variables are necessary because the loop passes
1563 /// are designed around them. The most obvious example of this is the
1564 /// LoopInfo analysis, which doesn't remember trip count values. It
1565 /// expects to be able to rediscover the trip count each time it is
1566 /// needed, and it does this using a simple analysis that only succeeds if
1567 /// the loop has a canonical induction variable.
1569 /// However, when it comes time to generate code, the maximum operation
1570 /// can be quite costly, especially if it's inside of an outer loop.
1572 /// This function solves this problem by detecting this type of loop and
1573 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
1574 /// the instructions for the maximum computation.
1576 ICmpInst
*LSRInstance::OptimizeMax(ICmpInst
*Cond
, IVStrideUse
* &CondUse
) {
1577 // Check that the loop matches the pattern we're looking for.
1578 if (Cond
->getPredicate() != CmpInst::ICMP_EQ
&&
1579 Cond
->getPredicate() != CmpInst::ICMP_NE
)
1582 SelectInst
*Sel
= dyn_cast
<SelectInst
>(Cond
->getOperand(1));
1583 if (!Sel
|| !Sel
->hasOneUse()) return Cond
;
1585 const SCEV
*BackedgeTakenCount
= SE
.getBackedgeTakenCount(L
);
1586 if (isa
<SCEVCouldNotCompute
>(BackedgeTakenCount
))
1588 const SCEV
*One
= SE
.getConstant(BackedgeTakenCount
->getType(), 1);
1590 // Add one to the backedge-taken count to get the trip count.
1591 const SCEV
*IterationCount
= SE
.getAddExpr(One
, BackedgeTakenCount
);
1592 if (IterationCount
!= SE
.getSCEV(Sel
)) return Cond
;
1594 // Check for a max calculation that matches the pattern. There's no check
1595 // for ICMP_ULE here because the comparison would be with zero, which
1596 // isn't interesting.
1597 CmpInst::Predicate Pred
= ICmpInst::BAD_ICMP_PREDICATE
;
1598 const SCEVNAryExpr
*Max
= 0;
1599 if (const SCEVSMaxExpr
*S
= dyn_cast
<SCEVSMaxExpr
>(BackedgeTakenCount
)) {
1600 Pred
= ICmpInst::ICMP_SLE
;
1602 } else if (const SCEVSMaxExpr
*S
= dyn_cast
<SCEVSMaxExpr
>(IterationCount
)) {
1603 Pred
= ICmpInst::ICMP_SLT
;
1605 } else if (const SCEVUMaxExpr
*U
= dyn_cast
<SCEVUMaxExpr
>(IterationCount
)) {
1606 Pred
= ICmpInst::ICMP_ULT
;
1613 // To handle a max with more than two operands, this optimization would
1614 // require additional checking and setup.
1615 if (Max
->getNumOperands() != 2)
1618 const SCEV
*MaxLHS
= Max
->getOperand(0);
1619 const SCEV
*MaxRHS
= Max
->getOperand(1);
1621 // ScalarEvolution canonicalizes constants to the left. For < and >, look
1622 // for a comparison with 1. For <= and >=, a comparison with zero.
1624 (ICmpInst::isTrueWhenEqual(Pred
) ? !MaxLHS
->isZero() : (MaxLHS
!= One
)))
1627 // Check the relevant induction variable for conformance to
1629 const SCEV
*IV
= SE
.getSCEV(Cond
->getOperand(0));
1630 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(IV
);
1631 if (!AR
|| !AR
->isAffine() ||
1632 AR
->getStart() != One
||
1633 AR
->getStepRecurrence(SE
) != One
)
1636 assert(AR
->getLoop() == L
&&
1637 "Loop condition operand is an addrec in a different loop!");
1639 // Check the right operand of the select, and remember it, as it will
1640 // be used in the new comparison instruction.
1642 if (ICmpInst::isTrueWhenEqual(Pred
)) {
1643 // Look for n+1, and grab n.
1644 if (AddOperator
*BO
= dyn_cast
<AddOperator
>(Sel
->getOperand(1)))
1645 if (isa
<ConstantInt
>(BO
->getOperand(1)) &&
1646 cast
<ConstantInt
>(BO
->getOperand(1))->isOne() &&
1647 SE
.getSCEV(BO
->getOperand(0)) == MaxRHS
)
1648 NewRHS
= BO
->getOperand(0);
1649 if (AddOperator
*BO
= dyn_cast
<AddOperator
>(Sel
->getOperand(2)))
1650 if (isa
<ConstantInt
>(BO
->getOperand(1)) &&
1651 cast
<ConstantInt
>(BO
->getOperand(1))->isOne() &&
1652 SE
.getSCEV(BO
->getOperand(0)) == MaxRHS
)
1653 NewRHS
= BO
->getOperand(0);
1656 } else if (SE
.getSCEV(Sel
->getOperand(1)) == MaxRHS
)
1657 NewRHS
= Sel
->getOperand(1);
1658 else if (SE
.getSCEV(Sel
->getOperand(2)) == MaxRHS
)
1659 NewRHS
= Sel
->getOperand(2);
1660 else if (const SCEVUnknown
*SU
= dyn_cast
<SCEVUnknown
>(MaxRHS
))
1661 NewRHS
= SU
->getValue();
1663 // Max doesn't match expected pattern.
1666 // Determine the new comparison opcode. It may be signed or unsigned,
1667 // and the original comparison may be either equality or inequality.
1668 if (Cond
->getPredicate() == CmpInst::ICMP_EQ
)
1669 Pred
= CmpInst::getInversePredicate(Pred
);
1671 // Ok, everything looks ok to change the condition into an SLT or SGE and
1672 // delete the max calculation.
1674 new ICmpInst(Cond
, Pred
, Cond
->getOperand(0), NewRHS
, "scmp");
1676 // Delete the max calculation instructions.
1677 Cond
->replaceAllUsesWith(NewCond
);
1678 CondUse
->setUser(NewCond
);
1679 Instruction
*Cmp
= cast
<Instruction
>(Sel
->getOperand(0));
1680 Cond
->eraseFromParent();
1681 Sel
->eraseFromParent();
1682 if (Cmp
->use_empty())
1683 Cmp
->eraseFromParent();
1687 /// OptimizeLoopTermCond - Change loop terminating condition to use the
1688 /// postinc iv when possible.
1690 LSRInstance::OptimizeLoopTermCond() {
1691 SmallPtrSet
<Instruction
*, 4> PostIncs
;
1693 BasicBlock
*LatchBlock
= L
->getLoopLatch();
1694 SmallVector
<BasicBlock
*, 8> ExitingBlocks
;
1695 L
->getExitingBlocks(ExitingBlocks
);
1697 for (unsigned i
= 0, e
= ExitingBlocks
.size(); i
!= e
; ++i
) {
1698 BasicBlock
*ExitingBlock
= ExitingBlocks
[i
];
1700 // Get the terminating condition for the loop if possible. If we
1701 // can, we want to change it to use a post-incremented version of its
1702 // induction variable, to allow coalescing the live ranges for the IV into
1703 // one register value.
1705 BranchInst
*TermBr
= dyn_cast
<BranchInst
>(ExitingBlock
->getTerminator());
1708 // FIXME: Overly conservative, termination condition could be an 'or' etc..
1709 if (TermBr
->isUnconditional() || !isa
<ICmpInst
>(TermBr
->getCondition()))
1712 // Search IVUsesByStride to find Cond's IVUse if there is one.
1713 IVStrideUse
*CondUse
= 0;
1714 ICmpInst
*Cond
= cast
<ICmpInst
>(TermBr
->getCondition());
1715 if (!FindIVUserForCond(Cond
, CondUse
))
1718 // If the trip count is computed in terms of a max (due to ScalarEvolution
1719 // being unable to find a sufficient guard, for example), change the loop
1720 // comparison to use SLT or ULT instead of NE.
1721 // One consequence of doing this now is that it disrupts the count-down
1722 // optimization. That's not always a bad thing though, because in such
1723 // cases it may still be worthwhile to avoid a max.
1724 Cond
= OptimizeMax(Cond
, CondUse
);
1726 // If this exiting block dominates the latch block, it may also use
1727 // the post-inc value if it won't be shared with other uses.
1728 // Check for dominance.
1729 if (!DT
.dominates(ExitingBlock
, LatchBlock
))
1732 // Conservatively avoid trying to use the post-inc value in non-latch
1733 // exits if there may be pre-inc users in intervening blocks.
1734 if (LatchBlock
!= ExitingBlock
)
1735 for (IVUsers::const_iterator UI
= IU
.begin(), E
= IU
.end(); UI
!= E
; ++UI
)
1736 // Test if the use is reachable from the exiting block. This dominator
1737 // query is a conservative approximation of reachability.
1738 if (&*UI
!= CondUse
&&
1739 !DT
.properlyDominates(UI
->getUser()->getParent(), ExitingBlock
)) {
1740 // Conservatively assume there may be reuse if the quotient of their
1741 // strides could be a legal scale.
1742 const SCEV
*A
= IU
.getStride(*CondUse
, L
);
1743 const SCEV
*B
= IU
.getStride(*UI
, L
);
1744 if (!A
|| !B
) continue;
1745 if (SE
.getTypeSizeInBits(A
->getType()) !=
1746 SE
.getTypeSizeInBits(B
->getType())) {
1747 if (SE
.getTypeSizeInBits(A
->getType()) >
1748 SE
.getTypeSizeInBits(B
->getType()))
1749 B
= SE
.getSignExtendExpr(B
, A
->getType());
1751 A
= SE
.getSignExtendExpr(A
, B
->getType());
1753 if (const SCEVConstant
*D
=
1754 dyn_cast_or_null
<SCEVConstant
>(getExactSDiv(B
, A
, SE
))) {
1755 const ConstantInt
*C
= D
->getValue();
1756 // Stride of one or negative one can have reuse with non-addresses.
1757 if (C
->isOne() || C
->isAllOnesValue())
1758 goto decline_post_inc
;
1759 // Avoid weird situations.
1760 if (C
->getValue().getMinSignedBits() >= 64 ||
1761 C
->getValue().isMinSignedValue())
1762 goto decline_post_inc
;
1763 // Without TLI, assume that any stride might be valid, and so any
1764 // use might be shared.
1766 goto decline_post_inc
;
1767 // Check for possible scaled-address reuse.
1768 const Type
*AccessTy
= getAccessType(UI
->getUser());
1769 TargetLowering::AddrMode AM
;
1770 AM
.Scale
= C
->getSExtValue();
1771 if (TLI
->isLegalAddressingMode(AM
, AccessTy
))
1772 goto decline_post_inc
;
1773 AM
.Scale
= -AM
.Scale
;
1774 if (TLI
->isLegalAddressingMode(AM
, AccessTy
))
1775 goto decline_post_inc
;
1779 DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: "
1782 // It's possible for the setcc instruction to be anywhere in the loop, and
1783 // possible for it to have multiple users. If it is not immediately before
1784 // the exiting block branch, move it.
1785 if (&*++BasicBlock::iterator(Cond
) != TermBr
) {
1786 if (Cond
->hasOneUse()) {
1787 Cond
->moveBefore(TermBr
);
1789 // Clone the terminating condition and insert into the loopend.
1790 ICmpInst
*OldCond
= Cond
;
1791 Cond
= cast
<ICmpInst
>(Cond
->clone());
1792 Cond
->setName(L
->getHeader()->getName() + ".termcond");
1793 ExitingBlock
->getInstList().insert(TermBr
, Cond
);
1795 // Clone the IVUse, as the old use still exists!
1796 CondUse
= &IU
.AddUser(Cond
, CondUse
->getOperandValToReplace());
1797 TermBr
->replaceUsesOfWith(OldCond
, Cond
);
1801 // If we get to here, we know that we can transform the setcc instruction to
1802 // use the post-incremented version of the IV, allowing us to coalesce the
1803 // live ranges for the IV correctly.
1804 CondUse
->transformToPostInc(L
);
1807 PostIncs
.insert(Cond
);
1811 // Determine an insertion point for the loop induction variable increment. It
1812 // must dominate all the post-inc comparisons we just set up, and it must
1813 // dominate the loop latch edge.
1814 IVIncInsertPos
= L
->getLoopLatch()->getTerminator();
1815 for (SmallPtrSet
<Instruction
*, 4>::const_iterator I
= PostIncs
.begin(),
1816 E
= PostIncs
.end(); I
!= E
; ++I
) {
1818 DT
.findNearestCommonDominator(IVIncInsertPos
->getParent(),
1820 if (BB
== (*I
)->getParent())
1821 IVIncInsertPos
= *I
;
1822 else if (BB
!= IVIncInsertPos
->getParent())
1823 IVIncInsertPos
= BB
->getTerminator();
1827 /// reconcileNewOffset - Determine if the given use can accomodate a fixup
1828 /// at the given offset and other details. If so, update the use and
1831 LSRInstance::reconcileNewOffset(LSRUse
&LU
, int64_t NewOffset
, bool HasBaseReg
,
1832 LSRUse::KindType Kind
, const Type
*AccessTy
) {
1833 int64_t NewMinOffset
= LU
.MinOffset
;
1834 int64_t NewMaxOffset
= LU
.MaxOffset
;
1835 const Type
*NewAccessTy
= AccessTy
;
1837 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to
1838 // something conservative, however this can pessimize in the case that one of
1839 // the uses will have all its uses outside the loop, for example.
1840 if (LU
.Kind
!= Kind
)
1842 // Conservatively assume HasBaseReg is true for now.
1843 if (NewOffset
< LU
.MinOffset
) {
1844 if (!isAlwaysFoldable(LU
.MaxOffset
- NewOffset
, 0, HasBaseReg
,
1845 Kind
, AccessTy
, TLI
))
1847 NewMinOffset
= NewOffset
;
1848 } else if (NewOffset
> LU
.MaxOffset
) {
1849 if (!isAlwaysFoldable(NewOffset
- LU
.MinOffset
, 0, HasBaseReg
,
1850 Kind
, AccessTy
, TLI
))
1852 NewMaxOffset
= NewOffset
;
1854 // Check for a mismatched access type, and fall back conservatively as needed.
1855 // TODO: Be less conservative when the type is similar and can use the same
1856 // addressing modes.
1857 if (Kind
== LSRUse::Address
&& AccessTy
!= LU
.AccessTy
)
1858 NewAccessTy
= Type::getVoidTy(AccessTy
->getContext());
1861 LU
.MinOffset
= NewMinOffset
;
1862 LU
.MaxOffset
= NewMaxOffset
;
1863 LU
.AccessTy
= NewAccessTy
;
1864 if (NewOffset
!= LU
.Offsets
.back())
1865 LU
.Offsets
.push_back(NewOffset
);
1869 /// getUse - Return an LSRUse index and an offset value for a fixup which
1870 /// needs the given expression, with the given kind and optional access type.
1871 /// Either reuse an existing use or create a new one, as needed.
1872 std::pair
<size_t, int64_t>
1873 LSRInstance::getUse(const SCEV
*&Expr
,
1874 LSRUse::KindType Kind
, const Type
*AccessTy
) {
1875 const SCEV
*Copy
= Expr
;
1876 int64_t Offset
= ExtractImmediate(Expr
, SE
);
1878 // Basic uses can't accept any offset, for example.
1879 if (!isAlwaysFoldable(Offset
, 0, /*HasBaseReg=*/true, Kind
, AccessTy
, TLI
)) {
1884 std::pair
<UseMapTy::iterator
, bool> P
=
1885 UseMap
.insert(std::make_pair(std::make_pair(Expr
, Kind
), 0));
1887 // A use already existed with this base.
1888 size_t LUIdx
= P
.first
->second
;
1889 LSRUse
&LU
= Uses
[LUIdx
];
1890 if (reconcileNewOffset(LU
, Offset
, /*HasBaseReg=*/true, Kind
, AccessTy
))
1892 return std::make_pair(LUIdx
, Offset
);
1895 // Create a new use.
1896 size_t LUIdx
= Uses
.size();
1897 P
.first
->second
= LUIdx
;
1898 Uses
.push_back(LSRUse(Kind
, AccessTy
));
1899 LSRUse
&LU
= Uses
[LUIdx
];
1901 // We don't need to track redundant offsets, but we don't need to go out
1902 // of our way here to avoid them.
1903 if (LU
.Offsets
.empty() || Offset
!= LU
.Offsets
.back())
1904 LU
.Offsets
.push_back(Offset
);
1906 LU
.MinOffset
= Offset
;
1907 LU
.MaxOffset
= Offset
;
1908 return std::make_pair(LUIdx
, Offset
);
1911 /// DeleteUse - Delete the given use from the Uses list.
1912 void LSRInstance::DeleteUse(LSRUse
&LU
, size_t LUIdx
) {
1913 if (&LU
!= &Uses
.back())
1914 std::swap(LU
, Uses
.back());
1918 RegUses
.SwapAndDropUse(LUIdx
, Uses
.size());
1921 /// FindUseWithFormula - Look for a use distinct from OrigLU which is has
1922 /// a formula that has the same registers as the given formula.
1924 LSRInstance::FindUseWithSimilarFormula(const Formula
&OrigF
,
1925 const LSRUse
&OrigLU
) {
1926 // Search all uses for the formula. This could be more clever.
1927 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
1928 LSRUse
&LU
= Uses
[LUIdx
];
1929 // Check whether this use is close enough to OrigLU, to see whether it's
1930 // worthwhile looking through its formulae.
1931 // Ignore ICmpZero uses because they may contain formulae generated by
1932 // GenerateICmpZeroScales, in which case adding fixup offsets may
1934 if (&LU
!= &OrigLU
&&
1935 LU
.Kind
!= LSRUse::ICmpZero
&&
1936 LU
.Kind
== OrigLU
.Kind
&& OrigLU
.AccessTy
== LU
.AccessTy
&&
1937 LU
.WidestFixupType
== OrigLU
.WidestFixupType
&&
1938 LU
.HasFormulaWithSameRegs(OrigF
)) {
1939 // Scan through this use's formulae.
1940 for (SmallVectorImpl
<Formula
>::const_iterator I
= LU
.Formulae
.begin(),
1941 E
= LU
.Formulae
.end(); I
!= E
; ++I
) {
1942 const Formula
&F
= *I
;
1943 // Check to see if this formula has the same registers and symbols
1945 if (F
.BaseRegs
== OrigF
.BaseRegs
&&
1946 F
.ScaledReg
== OrigF
.ScaledReg
&&
1947 F
.AM
.BaseGV
== OrigF
.AM
.BaseGV
&&
1948 F
.AM
.Scale
== OrigF
.AM
.Scale
) {
1949 if (F
.AM
.BaseOffs
== 0)
1951 // This is the formula where all the registers and symbols matched;
1952 // there aren't going to be any others. Since we declined it, we
1953 // can skip the rest of the formulae and procede to the next LSRUse.
1960 // Nothing looked good.
1964 void LSRInstance::CollectInterestingTypesAndFactors() {
1965 SmallSetVector
<const SCEV
*, 4> Strides
;
1967 // Collect interesting types and strides.
1968 SmallVector
<const SCEV
*, 4> Worklist
;
1969 for (IVUsers::const_iterator UI
= IU
.begin(), E
= IU
.end(); UI
!= E
; ++UI
) {
1970 const SCEV
*Expr
= IU
.getExpr(*UI
);
1972 // Collect interesting types.
1973 Types
.insert(SE
.getEffectiveSCEVType(Expr
->getType()));
1975 // Add strides for mentioned loops.
1976 Worklist
.push_back(Expr
);
1978 const SCEV
*S
= Worklist
.pop_back_val();
1979 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
1980 Strides
.insert(AR
->getStepRecurrence(SE
));
1981 Worklist
.push_back(AR
->getStart());
1982 } else if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
1983 Worklist
.append(Add
->op_begin(), Add
->op_end());
1985 } while (!Worklist
.empty());
1988 // Compute interesting factors from the set of interesting strides.
1989 for (SmallSetVector
<const SCEV
*, 4>::const_iterator
1990 I
= Strides
.begin(), E
= Strides
.end(); I
!= E
; ++I
)
1991 for (SmallSetVector
<const SCEV
*, 4>::const_iterator NewStrideIter
=
1992 llvm::next(I
); NewStrideIter
!= E
; ++NewStrideIter
) {
1993 const SCEV
*OldStride
= *I
;
1994 const SCEV
*NewStride
= *NewStrideIter
;
1996 if (SE
.getTypeSizeInBits(OldStride
->getType()) !=
1997 SE
.getTypeSizeInBits(NewStride
->getType())) {
1998 if (SE
.getTypeSizeInBits(OldStride
->getType()) >
1999 SE
.getTypeSizeInBits(NewStride
->getType()))
2000 NewStride
= SE
.getSignExtendExpr(NewStride
, OldStride
->getType());
2002 OldStride
= SE
.getSignExtendExpr(OldStride
, NewStride
->getType());
2004 if (const SCEVConstant
*Factor
=
2005 dyn_cast_or_null
<SCEVConstant
>(getExactSDiv(NewStride
, OldStride
,
2007 if (Factor
->getValue()->getValue().getMinSignedBits() <= 64)
2008 Factors
.insert(Factor
->getValue()->getValue().getSExtValue());
2009 } else if (const SCEVConstant
*Factor
=
2010 dyn_cast_or_null
<SCEVConstant
>(getExactSDiv(OldStride
,
2013 if (Factor
->getValue()->getValue().getMinSignedBits() <= 64)
2014 Factors
.insert(Factor
->getValue()->getValue().getSExtValue());
2018 // If all uses use the same type, don't bother looking for truncation-based
2020 if (Types
.size() == 1)
2023 DEBUG(print_factors_and_types(dbgs()));
2026 void LSRInstance::CollectFixupsAndInitialFormulae() {
2027 for (IVUsers::const_iterator UI
= IU
.begin(), E
= IU
.end(); UI
!= E
; ++UI
) {
2029 LSRFixup
&LF
= getNewFixup();
2030 LF
.UserInst
= UI
->getUser();
2031 LF
.OperandValToReplace
= UI
->getOperandValToReplace();
2032 LF
.PostIncLoops
= UI
->getPostIncLoops();
2034 LSRUse::KindType Kind
= LSRUse::Basic
;
2035 const Type
*AccessTy
= 0;
2036 if (isAddressUse(LF
.UserInst
, LF
.OperandValToReplace
)) {
2037 Kind
= LSRUse::Address
;
2038 AccessTy
= getAccessType(LF
.UserInst
);
2041 const SCEV
*S
= IU
.getExpr(*UI
);
2043 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as
2044 // (N - i == 0), and this allows (N - i) to be the expression that we work
2045 // with rather than just N or i, so we can consider the register
2046 // requirements for both N and i at the same time. Limiting this code to
2047 // equality icmps is not a problem because all interesting loops use
2048 // equality icmps, thanks to IndVarSimplify.
2049 if (ICmpInst
*CI
= dyn_cast
<ICmpInst
>(LF
.UserInst
))
2050 if (CI
->isEquality()) {
2051 // Swap the operands if needed to put the OperandValToReplace on the
2052 // left, for consistency.
2053 Value
*NV
= CI
->getOperand(1);
2054 if (NV
== LF
.OperandValToReplace
) {
2055 CI
->setOperand(1, CI
->getOperand(0));
2056 CI
->setOperand(0, NV
);
2057 NV
= CI
->getOperand(1);
2061 // x == y --> x - y == 0
2062 const SCEV
*N
= SE
.getSCEV(NV
);
2063 if (SE
.isLoopInvariant(N
, L
)) {
2064 Kind
= LSRUse::ICmpZero
;
2065 S
= SE
.getMinusSCEV(N
, S
);
2068 // -1 and the negations of all interesting strides (except the negation
2069 // of -1) are now also interesting.
2070 for (size_t i
= 0, e
= Factors
.size(); i
!= e
; ++i
)
2071 if (Factors
[i
] != -1)
2072 Factors
.insert(-(uint64_t)Factors
[i
]);
2076 // Set up the initial formula for this use.
2077 std::pair
<size_t, int64_t> P
= getUse(S
, Kind
, AccessTy
);
2079 LF
.Offset
= P
.second
;
2080 LSRUse
&LU
= Uses
[LF
.LUIdx
];
2081 LU
.AllFixupsOutsideLoop
&= LF
.isUseFullyOutsideLoop(L
);
2082 if (!LU
.WidestFixupType
||
2083 SE
.getTypeSizeInBits(LU
.WidestFixupType
) <
2084 SE
.getTypeSizeInBits(LF
.OperandValToReplace
->getType()))
2085 LU
.WidestFixupType
= LF
.OperandValToReplace
->getType();
2087 // If this is the first use of this LSRUse, give it a formula.
2088 if (LU
.Formulae
.empty()) {
2089 InsertInitialFormula(S
, LU
, LF
.LUIdx
);
2090 CountRegisters(LU
.Formulae
.back(), LF
.LUIdx
);
2094 DEBUG(print_fixups(dbgs()));
2097 /// InsertInitialFormula - Insert a formula for the given expression into
2098 /// the given use, separating out loop-variant portions from loop-invariant
2099 /// and loop-computable portions.
2101 LSRInstance::InsertInitialFormula(const SCEV
*S
, LSRUse
&LU
, size_t LUIdx
) {
2103 F
.InitialMatch(S
, L
, SE
);
2104 bool Inserted
= InsertFormula(LU
, LUIdx
, F
);
2105 assert(Inserted
&& "Initial formula already exists!"); (void)Inserted
;
2108 /// InsertSupplementalFormula - Insert a simple single-register formula for
2109 /// the given expression into the given use.
2111 LSRInstance::InsertSupplementalFormula(const SCEV
*S
,
2112 LSRUse
&LU
, size_t LUIdx
) {
2114 F
.BaseRegs
.push_back(S
);
2115 F
.AM
.HasBaseReg
= true;
2116 bool Inserted
= InsertFormula(LU
, LUIdx
, F
);
2117 assert(Inserted
&& "Supplemental formula already exists!"); (void)Inserted
;
2120 /// CountRegisters - Note which registers are used by the given formula,
2121 /// updating RegUses.
2122 void LSRInstance::CountRegisters(const Formula
&F
, size_t LUIdx
) {
2124 RegUses
.CountRegister(F
.ScaledReg
, LUIdx
);
2125 for (SmallVectorImpl
<const SCEV
*>::const_iterator I
= F
.BaseRegs
.begin(),
2126 E
= F
.BaseRegs
.end(); I
!= E
; ++I
)
2127 RegUses
.CountRegister(*I
, LUIdx
);
2130 /// InsertFormula - If the given formula has not yet been inserted, add it to
2131 /// the list, and return true. Return false otherwise.
2132 bool LSRInstance::InsertFormula(LSRUse
&LU
, unsigned LUIdx
, const Formula
&F
) {
2133 if (!LU
.InsertFormula(F
))
2136 CountRegisters(F
, LUIdx
);
2140 /// CollectLoopInvariantFixupsAndFormulae - Check for other uses of
2141 /// loop-invariant values which we're tracking. These other uses will pin these
2142 /// values in registers, making them less profitable for elimination.
2143 /// TODO: This currently misses non-constant addrec step registers.
2144 /// TODO: Should this give more weight to users inside the loop?
2146 LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
2147 SmallVector
<const SCEV
*, 8> Worklist(RegUses
.begin(), RegUses
.end());
2148 SmallPtrSet
<const SCEV
*, 8> Inserted
;
2150 while (!Worklist
.empty()) {
2151 const SCEV
*S
= Worklist
.pop_back_val();
2153 if (const SCEVNAryExpr
*N
= dyn_cast
<SCEVNAryExpr
>(S
))
2154 Worklist
.append(N
->op_begin(), N
->op_end());
2155 else if (const SCEVCastExpr
*C
= dyn_cast
<SCEVCastExpr
>(S
))
2156 Worklist
.push_back(C
->getOperand());
2157 else if (const SCEVUDivExpr
*D
= dyn_cast
<SCEVUDivExpr
>(S
)) {
2158 Worklist
.push_back(D
->getLHS());
2159 Worklist
.push_back(D
->getRHS());
2160 } else if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
2161 if (!Inserted
.insert(U
)) continue;
2162 const Value
*V
= U
->getValue();
2163 if (const Instruction
*Inst
= dyn_cast
<Instruction
>(V
)) {
2164 // Look for instructions defined outside the loop.
2165 if (L
->contains(Inst
)) continue;
2166 } else if (isa
<UndefValue
>(V
))
2167 // Undef doesn't have a live range, so it doesn't matter.
2169 for (Value::const_use_iterator UI
= V
->use_begin(), UE
= V
->use_end();
2171 const Instruction
*UserInst
= dyn_cast
<Instruction
>(*UI
);
2172 // Ignore non-instructions.
2175 // Ignore instructions in other functions (as can happen with
2177 if (UserInst
->getParent()->getParent() != L
->getHeader()->getParent())
2179 // Ignore instructions not dominated by the loop.
2180 const BasicBlock
*UseBB
= !isa
<PHINode
>(UserInst
) ?
2181 UserInst
->getParent() :
2182 cast
<PHINode
>(UserInst
)->getIncomingBlock(
2183 PHINode::getIncomingValueNumForOperand(UI
.getOperandNo()));
2184 if (!DT
.dominates(L
->getHeader(), UseBB
))
2186 // Ignore uses which are part of other SCEV expressions, to avoid
2187 // analyzing them multiple times.
2188 if (SE
.isSCEVable(UserInst
->getType())) {
2189 const SCEV
*UserS
= SE
.getSCEV(const_cast<Instruction
*>(UserInst
));
2190 // If the user is a no-op, look through to its uses.
2191 if (!isa
<SCEVUnknown
>(UserS
))
2195 SE
.getUnknown(const_cast<Instruction
*>(UserInst
)));
2199 // Ignore icmp instructions which are already being analyzed.
2200 if (const ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(UserInst
)) {
2201 unsigned OtherIdx
= !UI
.getOperandNo();
2202 Value
*OtherOp
= const_cast<Value
*>(ICI
->getOperand(OtherIdx
));
2203 if (SE
.hasComputableLoopEvolution(SE
.getSCEV(OtherOp
), L
))
2207 LSRFixup
&LF
= getNewFixup();
2208 LF
.UserInst
= const_cast<Instruction
*>(UserInst
);
2209 LF
.OperandValToReplace
= UI
.getUse();
2210 std::pair
<size_t, int64_t> P
= getUse(S
, LSRUse::Basic
, 0);
2212 LF
.Offset
= P
.second
;
2213 LSRUse
&LU
= Uses
[LF
.LUIdx
];
2214 LU
.AllFixupsOutsideLoop
&= LF
.isUseFullyOutsideLoop(L
);
2215 if (!LU
.WidestFixupType
||
2216 SE
.getTypeSizeInBits(LU
.WidestFixupType
) <
2217 SE
.getTypeSizeInBits(LF
.OperandValToReplace
->getType()))
2218 LU
.WidestFixupType
= LF
.OperandValToReplace
->getType();
2219 InsertSupplementalFormula(U
, LU
, LF
.LUIdx
);
2220 CountRegisters(LU
.Formulae
.back(), Uses
.size() - 1);
2227 /// CollectSubexprs - Split S into subexpressions which can be pulled out into
2228 /// separate registers. If C is non-null, multiply each subexpression by C.
2229 static void CollectSubexprs(const SCEV
*S
, const SCEVConstant
*C
,
2230 SmallVectorImpl
<const SCEV
*> &Ops
,
2232 ScalarEvolution
&SE
) {
2233 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
2234 // Break out add operands.
2235 for (SCEVAddExpr::op_iterator I
= Add
->op_begin(), E
= Add
->op_end();
2237 CollectSubexprs(*I
, C
, Ops
, L
, SE
);
2239 } else if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
2240 // Split a non-zero base out of an addrec.
2241 if (!AR
->getStart()->isZero()) {
2242 CollectSubexprs(SE
.getAddRecExpr(SE
.getConstant(AR
->getType(), 0),
2243 AR
->getStepRecurrence(SE
),
2245 //FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
2248 CollectSubexprs(AR
->getStart(), C
, Ops
, L
, SE
);
2251 } else if (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(S
)) {
2252 // Break (C * (a + b + c)) into C*a + C*b + C*c.
2253 if (Mul
->getNumOperands() == 2)
2254 if (const SCEVConstant
*Op0
=
2255 dyn_cast
<SCEVConstant
>(Mul
->getOperand(0))) {
2256 CollectSubexprs(Mul
->getOperand(1),
2257 C
? cast
<SCEVConstant
>(SE
.getMulExpr(C
, Op0
)) : Op0
,
2263 // Otherwise use the value itself, optionally with a scale applied.
2264 Ops
.push_back(C
? SE
.getMulExpr(C
, S
) : S
);
2267 /// GenerateReassociations - Split out subexpressions from adds and the bases of
2269 void LSRInstance::GenerateReassociations(LSRUse
&LU
, unsigned LUIdx
,
2272 // Arbitrarily cap recursion to protect compile time.
2273 if (Depth
>= 3) return;
2275 for (size_t i
= 0, e
= Base
.BaseRegs
.size(); i
!= e
; ++i
) {
2276 const SCEV
*BaseReg
= Base
.BaseRegs
[i
];
2278 SmallVector
<const SCEV
*, 8> AddOps
;
2279 CollectSubexprs(BaseReg
, 0, AddOps
, L
, SE
);
2281 if (AddOps
.size() == 1) continue;
2283 for (SmallVectorImpl
<const SCEV
*>::const_iterator J
= AddOps
.begin(),
2284 JE
= AddOps
.end(); J
!= JE
; ++J
) {
2286 // Loop-variant "unknown" values are uninteresting; we won't be able to
2287 // do anything meaningful with them.
2288 if (isa
<SCEVUnknown
>(*J
) && !SE
.isLoopInvariant(*J
, L
))
2291 // Don't pull a constant into a register if the constant could be folded
2292 // into an immediate field.
2293 if (isAlwaysFoldable(*J
, LU
.MinOffset
, LU
.MaxOffset
,
2294 Base
.getNumRegs() > 1,
2295 LU
.Kind
, LU
.AccessTy
, TLI
, SE
))
2298 // Collect all operands except *J.
2299 SmallVector
<const SCEV
*, 8> InnerAddOps
2300 (((const SmallVector
<const SCEV
*, 8> &)AddOps
).begin(), J
);
2302 (llvm::next(J
), ((const SmallVector
<const SCEV
*, 8> &)AddOps
).end());
2304 // Don't leave just a constant behind in a register if the constant could
2305 // be folded into an immediate field.
2306 if (InnerAddOps
.size() == 1 &&
2307 isAlwaysFoldable(InnerAddOps
[0], LU
.MinOffset
, LU
.MaxOffset
,
2308 Base
.getNumRegs() > 1,
2309 LU
.Kind
, LU
.AccessTy
, TLI
, SE
))
2312 const SCEV
*InnerSum
= SE
.getAddExpr(InnerAddOps
);
2313 if (InnerSum
->isZero())
2316 F
.BaseRegs
[i
] = InnerSum
;
2317 F
.BaseRegs
.push_back(*J
);
2318 if (InsertFormula(LU
, LUIdx
, F
))
2319 // If that formula hadn't been seen before, recurse to find more like
2321 GenerateReassociations(LU
, LUIdx
, LU
.Formulae
.back(), Depth
+1);
2326 /// GenerateCombinations - Generate a formula consisting of all of the
2327 /// loop-dominating registers added into a single register.
2328 void LSRInstance::GenerateCombinations(LSRUse
&LU
, unsigned LUIdx
,
2330 // This method is only interesting on a plurality of registers.
2331 if (Base
.BaseRegs
.size() <= 1) return;
2335 SmallVector
<const SCEV
*, 4> Ops
;
2336 for (SmallVectorImpl
<const SCEV
*>::const_iterator
2337 I
= Base
.BaseRegs
.begin(), E
= Base
.BaseRegs
.end(); I
!= E
; ++I
) {
2338 const SCEV
*BaseReg
= *I
;
2339 if (SE
.properlyDominates(BaseReg
, L
->getHeader()) &&
2340 !SE
.hasComputableLoopEvolution(BaseReg
, L
))
2341 Ops
.push_back(BaseReg
);
2343 F
.BaseRegs
.push_back(BaseReg
);
2345 if (Ops
.size() > 1) {
2346 const SCEV
*Sum
= SE
.getAddExpr(Ops
);
2347 // TODO: If Sum is zero, it probably means ScalarEvolution missed an
2348 // opportunity to fold something. For now, just ignore such cases
2349 // rather than proceed with zero in a register.
2350 if (!Sum
->isZero()) {
2351 F
.BaseRegs
.push_back(Sum
);
2352 (void)InsertFormula(LU
, LUIdx
, F
);
2357 /// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets.
2358 void LSRInstance::GenerateSymbolicOffsets(LSRUse
&LU
, unsigned LUIdx
,
2360 // We can't add a symbolic offset if the address already contains one.
2361 if (Base
.AM
.BaseGV
) return;
2363 for (size_t i
= 0, e
= Base
.BaseRegs
.size(); i
!= e
; ++i
) {
2364 const SCEV
*G
= Base
.BaseRegs
[i
];
2365 GlobalValue
*GV
= ExtractSymbol(G
, SE
);
2366 if (G
->isZero() || !GV
)
2370 if (!isLegalUse(F
.AM
, LU
.MinOffset
, LU
.MaxOffset
,
2371 LU
.Kind
, LU
.AccessTy
, TLI
))
2374 (void)InsertFormula(LU
, LUIdx
, F
);
2378 /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets.
2379 void LSRInstance::GenerateConstantOffsets(LSRUse
&LU
, unsigned LUIdx
,
2381 // TODO: For now, just add the min and max offset, because it usually isn't
2382 // worthwhile looking at everything inbetween.
2383 SmallVector
<int64_t, 2> Worklist
;
2384 Worklist
.push_back(LU
.MinOffset
);
2385 if (LU
.MaxOffset
!= LU
.MinOffset
)
2386 Worklist
.push_back(LU
.MaxOffset
);
2388 for (size_t i
= 0, e
= Base
.BaseRegs
.size(); i
!= e
; ++i
) {
2389 const SCEV
*G
= Base
.BaseRegs
[i
];
2391 for (SmallVectorImpl
<int64_t>::const_iterator I
= Worklist
.begin(),
2392 E
= Worklist
.end(); I
!= E
; ++I
) {
2394 F
.AM
.BaseOffs
= (uint64_t)Base
.AM
.BaseOffs
- *I
;
2395 if (isLegalUse(F
.AM
, LU
.MinOffset
- *I
, LU
.MaxOffset
- *I
,
2396 LU
.Kind
, LU
.AccessTy
, TLI
)) {
2397 // Add the offset to the base register.
2398 const SCEV
*NewG
= SE
.getAddExpr(SE
.getConstant(G
->getType(), *I
), G
);
2399 // If it cancelled out, drop the base register, otherwise update it.
2400 if (NewG
->isZero()) {
2401 std::swap(F
.BaseRegs
[i
], F
.BaseRegs
.back());
2402 F
.BaseRegs
.pop_back();
2404 F
.BaseRegs
[i
] = NewG
;
2406 (void)InsertFormula(LU
, LUIdx
, F
);
2410 int64_t Imm
= ExtractImmediate(G
, SE
);
2411 if (G
->isZero() || Imm
== 0)
2414 F
.AM
.BaseOffs
= (uint64_t)F
.AM
.BaseOffs
+ Imm
;
2415 if (!isLegalUse(F
.AM
, LU
.MinOffset
, LU
.MaxOffset
,
2416 LU
.Kind
, LU
.AccessTy
, TLI
))
2419 (void)InsertFormula(LU
, LUIdx
, F
);
2423 /// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up
2424 /// the comparison. For example, x == y -> x*c == y*c.
2425 void LSRInstance::GenerateICmpZeroScales(LSRUse
&LU
, unsigned LUIdx
,
2427 if (LU
.Kind
!= LSRUse::ICmpZero
) return;
2429 // Determine the integer type for the base formula.
2430 const Type
*IntTy
= Base
.getType();
2432 if (SE
.getTypeSizeInBits(IntTy
) > 64) return;
2434 // Don't do this if there is more than one offset.
2435 if (LU
.MinOffset
!= LU
.MaxOffset
) return;
2437 assert(!Base
.AM
.BaseGV
&& "ICmpZero use is not legal!");
2439 // Check each interesting stride.
2440 for (SmallSetVector
<int64_t, 8>::const_iterator
2441 I
= Factors
.begin(), E
= Factors
.end(); I
!= E
; ++I
) {
2442 int64_t Factor
= *I
;
2444 // Check that the multiplication doesn't overflow.
2445 if (Base
.AM
.BaseOffs
== INT64_MIN
&& Factor
== -1)
2447 int64_t NewBaseOffs
= (uint64_t)Base
.AM
.BaseOffs
* Factor
;
2448 if (NewBaseOffs
/ Factor
!= Base
.AM
.BaseOffs
)
2451 // Check that multiplying with the use offset doesn't overflow.
2452 int64_t Offset
= LU
.MinOffset
;
2453 if (Offset
== INT64_MIN
&& Factor
== -1)
2455 Offset
= (uint64_t)Offset
* Factor
;
2456 if (Offset
/ Factor
!= LU
.MinOffset
)
2460 F
.AM
.BaseOffs
= NewBaseOffs
;
2462 // Check that this scale is legal.
2463 if (!isLegalUse(F
.AM
, Offset
, Offset
, LU
.Kind
, LU
.AccessTy
, TLI
))
2466 // Compensate for the use having MinOffset built into it.
2467 F
.AM
.BaseOffs
= (uint64_t)F
.AM
.BaseOffs
+ Offset
- LU
.MinOffset
;
2469 const SCEV
*FactorS
= SE
.getConstant(IntTy
, Factor
);
2471 // Check that multiplying with each base register doesn't overflow.
2472 for (size_t i
= 0, e
= F
.BaseRegs
.size(); i
!= e
; ++i
) {
2473 F
.BaseRegs
[i
] = SE
.getMulExpr(F
.BaseRegs
[i
], FactorS
);
2474 if (getExactSDiv(F
.BaseRegs
[i
], FactorS
, SE
) != Base
.BaseRegs
[i
])
2478 // Check that multiplying with the scaled register doesn't overflow.
2480 F
.ScaledReg
= SE
.getMulExpr(F
.ScaledReg
, FactorS
);
2481 if (getExactSDiv(F
.ScaledReg
, FactorS
, SE
) != Base
.ScaledReg
)
2485 // If we make it here and it's legal, add it.
2486 (void)InsertFormula(LU
, LUIdx
, F
);
2491 /// GenerateScales - Generate stride factor reuse formulae by making use of
2492 /// scaled-offset address modes, for example.
2493 void LSRInstance::GenerateScales(LSRUse
&LU
, unsigned LUIdx
, Formula Base
) {
2494 // Determine the integer type for the base formula.
2495 const Type
*IntTy
= Base
.getType();
2498 // If this Formula already has a scaled register, we can't add another one.
2499 if (Base
.AM
.Scale
!= 0) return;
2501 // Check each interesting stride.
2502 for (SmallSetVector
<int64_t, 8>::const_iterator
2503 I
= Factors
.begin(), E
= Factors
.end(); I
!= E
; ++I
) {
2504 int64_t Factor
= *I
;
2506 Base
.AM
.Scale
= Factor
;
2507 Base
.AM
.HasBaseReg
= Base
.BaseRegs
.size() > 1;
2508 // Check whether this scale is going to be legal.
2509 if (!isLegalUse(Base
.AM
, LU
.MinOffset
, LU
.MaxOffset
,
2510 LU
.Kind
, LU
.AccessTy
, TLI
)) {
2511 // As a special-case, handle special out-of-loop Basic users specially.
2512 // TODO: Reconsider this special case.
2513 if (LU
.Kind
== LSRUse::Basic
&&
2514 isLegalUse(Base
.AM
, LU
.MinOffset
, LU
.MaxOffset
,
2515 LSRUse::Special
, LU
.AccessTy
, TLI
) &&
2516 LU
.AllFixupsOutsideLoop
)
2517 LU
.Kind
= LSRUse::Special
;
2521 // For an ICmpZero, negating a solitary base register won't lead to
2523 if (LU
.Kind
== LSRUse::ICmpZero
&&
2524 !Base
.AM
.HasBaseReg
&& Base
.AM
.BaseOffs
== 0 && !Base
.AM
.BaseGV
)
2526 // For each addrec base reg, apply the scale, if possible.
2527 for (size_t i
= 0, e
= Base
.BaseRegs
.size(); i
!= e
; ++i
)
2528 if (const SCEVAddRecExpr
*AR
=
2529 dyn_cast
<SCEVAddRecExpr
>(Base
.BaseRegs
[i
])) {
2530 const SCEV
*FactorS
= SE
.getConstant(IntTy
, Factor
);
2531 if (FactorS
->isZero())
2533 // Divide out the factor, ignoring high bits, since we'll be
2534 // scaling the value back up in the end.
2535 if (const SCEV
*Quotient
= getExactSDiv(AR
, FactorS
, SE
, true)) {
2536 // TODO: This could be optimized to avoid all the copying.
2538 F
.ScaledReg
= Quotient
;
2539 F
.DeleteBaseReg(F
.BaseRegs
[i
]);
2540 (void)InsertFormula(LU
, LUIdx
, F
);
2546 /// GenerateTruncates - Generate reuse formulae from different IV types.
2547 void LSRInstance::GenerateTruncates(LSRUse
&LU
, unsigned LUIdx
, Formula Base
) {
2548 // This requires TargetLowering to tell us which truncates are free.
2551 // Don't bother truncating symbolic values.
2552 if (Base
.AM
.BaseGV
) return;
2554 // Determine the integer type for the base formula.
2555 const Type
*DstTy
= Base
.getType();
2557 DstTy
= SE
.getEffectiveSCEVType(DstTy
);
2559 for (SmallSetVector
<const Type
*, 4>::const_iterator
2560 I
= Types
.begin(), E
= Types
.end(); I
!= E
; ++I
) {
2561 const Type
*SrcTy
= *I
;
2562 if (SrcTy
!= DstTy
&& TLI
->isTruncateFree(SrcTy
, DstTy
)) {
2565 if (F
.ScaledReg
) F
.ScaledReg
= SE
.getAnyExtendExpr(F
.ScaledReg
, *I
);
2566 for (SmallVectorImpl
<const SCEV
*>::iterator J
= F
.BaseRegs
.begin(),
2567 JE
= F
.BaseRegs
.end(); J
!= JE
; ++J
)
2568 *J
= SE
.getAnyExtendExpr(*J
, SrcTy
);
2570 // TODO: This assumes we've done basic processing on all uses and
2571 // have an idea what the register usage is.
2572 if (!F
.hasRegsUsedByUsesOtherThan(LUIdx
, RegUses
))
2575 (void)InsertFormula(LU
, LUIdx
, F
);
2582 /// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to
2583 /// defer modifications so that the search phase doesn't have to worry about
2584 /// the data structures moving underneath it.
2588 const SCEV
*OrigReg
;
2590 WorkItem(size_t LI
, int64_t I
, const SCEV
*R
)
2591 : LUIdx(LI
), Imm(I
), OrigReg(R
) {}
2593 void print(raw_ostream
&OS
) const;
2599 void WorkItem::print(raw_ostream
&OS
) const {
2600 OS
<< "in formulae referencing " << *OrigReg
<< " in use " << LUIdx
2601 << " , add offset " << Imm
;
2604 void WorkItem::dump() const {
2605 print(errs()); errs() << '\n';
2608 /// GenerateCrossUseConstantOffsets - Look for registers which are a constant
2609 /// distance apart and try to form reuse opportunities between them.
2610 void LSRInstance::GenerateCrossUseConstantOffsets() {
2611 // Group the registers by their value without any added constant offset.
2612 typedef std::map
<int64_t, const SCEV
*> ImmMapTy
;
2613 typedef DenseMap
<const SCEV
*, ImmMapTy
> RegMapTy
;
2615 DenseMap
<const SCEV
*, SmallBitVector
> UsedByIndicesMap
;
2616 SmallVector
<const SCEV
*, 8> Sequence
;
2617 for (RegUseTracker::const_iterator I
= RegUses
.begin(), E
= RegUses
.end();
2619 const SCEV
*Reg
= *I
;
2620 int64_t Imm
= ExtractImmediate(Reg
, SE
);
2621 std::pair
<RegMapTy::iterator
, bool> Pair
=
2622 Map
.insert(std::make_pair(Reg
, ImmMapTy()));
2624 Sequence
.push_back(Reg
);
2625 Pair
.first
->second
.insert(std::make_pair(Imm
, *I
));
2626 UsedByIndicesMap
[Reg
] |= RegUses
.getUsedByIndices(*I
);
2629 // Now examine each set of registers with the same base value. Build up
2630 // a list of work to do and do the work in a separate step so that we're
2631 // not adding formulae and register counts while we're searching.
2632 SmallVector
<WorkItem
, 32> WorkItems
;
2633 SmallSet
<std::pair
<size_t, int64_t>, 32> UniqueItems
;
2634 for (SmallVectorImpl
<const SCEV
*>::const_iterator I
= Sequence
.begin(),
2635 E
= Sequence
.end(); I
!= E
; ++I
) {
2636 const SCEV
*Reg
= *I
;
2637 const ImmMapTy
&Imms
= Map
.find(Reg
)->second
;
2639 // It's not worthwhile looking for reuse if there's only one offset.
2640 if (Imms
.size() == 1)
2643 DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg
<< ':';
2644 for (ImmMapTy::const_iterator J
= Imms
.begin(), JE
= Imms
.end();
2646 dbgs() << ' ' << J
->first
;
2649 // Examine each offset.
2650 for (ImmMapTy::const_iterator J
= Imms
.begin(), JE
= Imms
.end();
2652 const SCEV
*OrigReg
= J
->second
;
2654 int64_t JImm
= J
->first
;
2655 const SmallBitVector
&UsedByIndices
= RegUses
.getUsedByIndices(OrigReg
);
2657 if (!isa
<SCEVConstant
>(OrigReg
) &&
2658 UsedByIndicesMap
[Reg
].count() == 1) {
2659 DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg
<< '\n');
2663 // Conservatively examine offsets between this orig reg a few selected
2665 ImmMapTy::const_iterator OtherImms
[] = {
2666 Imms
.begin(), prior(Imms
.end()),
2667 Imms
.upper_bound((Imms
.begin()->first
+ prior(Imms
.end())->first
) / 2)
2669 for (size_t i
= 0, e
= array_lengthof(OtherImms
); i
!= e
; ++i
) {
2670 ImmMapTy::const_iterator M
= OtherImms
[i
];
2671 if (M
== J
|| M
== JE
) continue;
2673 // Compute the difference between the two.
2674 int64_t Imm
= (uint64_t)JImm
- M
->first
;
2675 for (int LUIdx
= UsedByIndices
.find_first(); LUIdx
!= -1;
2676 LUIdx
= UsedByIndices
.find_next(LUIdx
))
2677 // Make a memo of this use, offset, and register tuple.
2678 if (UniqueItems
.insert(std::make_pair(LUIdx
, Imm
)))
2679 WorkItems
.push_back(WorkItem(LUIdx
, Imm
, OrigReg
));
2686 UsedByIndicesMap
.clear();
2687 UniqueItems
.clear();
2689 // Now iterate through the worklist and add new formulae.
2690 for (SmallVectorImpl
<WorkItem
>::const_iterator I
= WorkItems
.begin(),
2691 E
= WorkItems
.end(); I
!= E
; ++I
) {
2692 const WorkItem
&WI
= *I
;
2693 size_t LUIdx
= WI
.LUIdx
;
2694 LSRUse
&LU
= Uses
[LUIdx
];
2695 int64_t Imm
= WI
.Imm
;
2696 const SCEV
*OrigReg
= WI
.OrigReg
;
2698 const Type
*IntTy
= SE
.getEffectiveSCEVType(OrigReg
->getType());
2699 const SCEV
*NegImmS
= SE
.getSCEV(ConstantInt::get(IntTy
, -(uint64_t)Imm
));
2700 unsigned BitWidth
= SE
.getTypeSizeInBits(IntTy
);
2702 // TODO: Use a more targeted data structure.
2703 for (size_t L
= 0, LE
= LU
.Formulae
.size(); L
!= LE
; ++L
) {
2704 const Formula
&F
= LU
.Formulae
[L
];
2705 // Use the immediate in the scaled register.
2706 if (F
.ScaledReg
== OrigReg
) {
2707 int64_t Offs
= (uint64_t)F
.AM
.BaseOffs
+
2708 Imm
* (uint64_t)F
.AM
.Scale
;
2709 // Don't create 50 + reg(-50).
2710 if (F
.referencesReg(SE
.getSCEV(
2711 ConstantInt::get(IntTy
, -(uint64_t)Offs
))))
2714 NewF
.AM
.BaseOffs
= Offs
;
2715 if (!isLegalUse(NewF
.AM
, LU
.MinOffset
, LU
.MaxOffset
,
2716 LU
.Kind
, LU
.AccessTy
, TLI
))
2718 NewF
.ScaledReg
= SE
.getAddExpr(NegImmS
, NewF
.ScaledReg
);
2720 // If the new scale is a constant in a register, and adding the constant
2721 // value to the immediate would produce a value closer to zero than the
2722 // immediate itself, then the formula isn't worthwhile.
2723 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(NewF
.ScaledReg
))
2724 if (C
->getValue()->getValue().isNegative() !=
2725 (NewF
.AM
.BaseOffs
< 0) &&
2726 (C
->getValue()->getValue().abs() * APInt(BitWidth
, F
.AM
.Scale
))
2727 .ule(abs64(NewF
.AM
.BaseOffs
)))
2731 (void)InsertFormula(LU
, LUIdx
, NewF
);
2733 // Use the immediate in a base register.
2734 for (size_t N
= 0, NE
= F
.BaseRegs
.size(); N
!= NE
; ++N
) {
2735 const SCEV
*BaseReg
= F
.BaseRegs
[N
];
2736 if (BaseReg
!= OrigReg
)
2739 NewF
.AM
.BaseOffs
= (uint64_t)NewF
.AM
.BaseOffs
+ Imm
;
2740 if (!isLegalUse(NewF
.AM
, LU
.MinOffset
, LU
.MaxOffset
,
2741 LU
.Kind
, LU
.AccessTy
, TLI
))
2743 NewF
.BaseRegs
[N
] = SE
.getAddExpr(NegImmS
, BaseReg
);
2745 // If the new formula has a constant in a register, and adding the
2746 // constant value to the immediate would produce a value closer to
2747 // zero than the immediate itself, then the formula isn't worthwhile.
2748 for (SmallVectorImpl
<const SCEV
*>::const_iterator
2749 J
= NewF
.BaseRegs
.begin(), JE
= NewF
.BaseRegs
.end();
2751 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(*J
))
2752 if ((C
->getValue()->getValue() + NewF
.AM
.BaseOffs
).abs().slt(
2753 abs64(NewF
.AM
.BaseOffs
)) &&
2754 (C
->getValue()->getValue() +
2755 NewF
.AM
.BaseOffs
).countTrailingZeros() >=
2756 CountTrailingZeros_64(NewF
.AM
.BaseOffs
))
2760 (void)InsertFormula(LU
, LUIdx
, NewF
);
2769 /// GenerateAllReuseFormulae - Generate formulae for each use.
2771 LSRInstance::GenerateAllReuseFormulae() {
2772 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan
2773 // queries are more precise.
2774 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
2775 LSRUse
&LU
= Uses
[LUIdx
];
2776 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
2777 GenerateReassociations(LU
, LUIdx
, LU
.Formulae
[i
]);
2778 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
2779 GenerateCombinations(LU
, LUIdx
, LU
.Formulae
[i
]);
2781 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
2782 LSRUse
&LU
= Uses
[LUIdx
];
2783 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
2784 GenerateSymbolicOffsets(LU
, LUIdx
, LU
.Formulae
[i
]);
2785 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
2786 GenerateConstantOffsets(LU
, LUIdx
, LU
.Formulae
[i
]);
2787 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
2788 GenerateICmpZeroScales(LU
, LUIdx
, LU
.Formulae
[i
]);
2789 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
2790 GenerateScales(LU
, LUIdx
, LU
.Formulae
[i
]);
2792 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
2793 LSRUse
&LU
= Uses
[LUIdx
];
2794 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
2795 GenerateTruncates(LU
, LUIdx
, LU
.Formulae
[i
]);
2798 GenerateCrossUseConstantOffsets();
2800 DEBUG(dbgs() << "\n"
2801 "After generating reuse formulae:\n";
2802 print_uses(dbgs()));
2805 /// If there are multiple formulae with the same set of registers used
2806 /// by other uses, pick the best one and delete the others.
2807 void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
2808 DenseSet
<const SCEV
*> VisitedRegs
;
2809 SmallPtrSet
<const SCEV
*, 16> Regs
;
2811 bool ChangedFormulae
= false;
2814 // Collect the best formula for each unique set of shared registers. This
2815 // is reset for each use.
2816 typedef DenseMap
<SmallVector
<const SCEV
*, 2>, size_t, UniquifierDenseMapInfo
>
2818 BestFormulaeTy BestFormulae
;
2820 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
2821 LSRUse
&LU
= Uses
[LUIdx
];
2822 DEBUG(dbgs() << "Filtering for use "; LU
.print(dbgs()); dbgs() << '\n');
2825 for (size_t FIdx
= 0, NumForms
= LU
.Formulae
.size();
2826 FIdx
!= NumForms
; ++FIdx
) {
2827 Formula
&F
= LU
.Formulae
[FIdx
];
2829 SmallVector
<const SCEV
*, 2> Key
;
2830 for (SmallVectorImpl
<const SCEV
*>::const_iterator J
= F
.BaseRegs
.begin(),
2831 JE
= F
.BaseRegs
.end(); J
!= JE
; ++J
) {
2832 const SCEV
*Reg
= *J
;
2833 if (RegUses
.isRegUsedByUsesOtherThan(Reg
, LUIdx
))
2837 RegUses
.isRegUsedByUsesOtherThan(F
.ScaledReg
, LUIdx
))
2838 Key
.push_back(F
.ScaledReg
);
2839 // Unstable sort by host order ok, because this is only used for
2841 std::sort(Key
.begin(), Key
.end());
2843 std::pair
<BestFormulaeTy::const_iterator
, bool> P
=
2844 BestFormulae
.insert(std::make_pair(Key
, FIdx
));
2846 Formula
&Best
= LU
.Formulae
[P
.first
->second
];
2849 CostF
.RateFormula(F
, Regs
, VisitedRegs
, L
, LU
.Offsets
, SE
, DT
);
2852 CostBest
.RateFormula(Best
, Regs
, VisitedRegs
, L
, LU
.Offsets
, SE
, DT
);
2854 if (CostF
< CostBest
)
2856 DEBUG(dbgs() << " Filtering out formula "; F
.print(dbgs());
2858 " in favor of formula "; Best
.print(dbgs());
2861 ChangedFormulae
= true;
2863 LU
.DeleteFormula(F
);
2871 // Now that we've filtered out some formulae, recompute the Regs set.
2873 LU
.RecomputeRegs(LUIdx
, RegUses
);
2875 // Reset this to prepare for the next use.
2876 BestFormulae
.clear();
2879 DEBUG(if (ChangedFormulae
) {
2881 "After filtering out undesirable candidates:\n";
2886 // This is a rough guess that seems to work fairly well.
2887 static const size_t ComplexityLimit
= UINT16_MAX
;
2889 /// EstimateSearchSpaceComplexity - Estimate the worst-case number of
2890 /// solutions the solver might have to consider. It almost never considers
2891 /// this many solutions because it prune the search space, but the pruning
2892 /// isn't always sufficient.
2893 size_t LSRInstance::EstimateSearchSpaceComplexity() const {
2895 for (SmallVectorImpl
<LSRUse
>::const_iterator I
= Uses
.begin(),
2896 E
= Uses
.end(); I
!= E
; ++I
) {
2897 size_t FSize
= I
->Formulae
.size();
2898 if (FSize
>= ComplexityLimit
) {
2899 Power
= ComplexityLimit
;
2903 if (Power
>= ComplexityLimit
)
2909 /// NarrowSearchSpaceByDetectingSupersets - When one formula uses a superset
2910 /// of the registers of another formula, it won't help reduce register
2911 /// pressure (though it may not necessarily hurt register pressure); remove
2912 /// it to simplify the system.
2913 void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
2914 if (EstimateSearchSpaceComplexity() >= ComplexityLimit
) {
2915 DEBUG(dbgs() << "The search space is too complex.\n");
2917 DEBUG(dbgs() << "Narrowing the search space by eliminating formulae "
2918 "which use a superset of registers used by other "
2921 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
2922 LSRUse
&LU
= Uses
[LUIdx
];
2924 for (size_t i
= 0, e
= LU
.Formulae
.size(); i
!= e
; ++i
) {
2925 Formula
&F
= LU
.Formulae
[i
];
2926 // Look for a formula with a constant or GV in a register. If the use
2927 // also has a formula with that same value in an immediate field,
2928 // delete the one that uses a register.
2929 for (SmallVectorImpl
<const SCEV
*>::const_iterator
2930 I
= F
.BaseRegs
.begin(), E
= F
.BaseRegs
.end(); I
!= E
; ++I
) {
2931 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(*I
)) {
2933 NewF
.AM
.BaseOffs
+= C
->getValue()->getSExtValue();
2934 NewF
.BaseRegs
.erase(NewF
.BaseRegs
.begin() +
2935 (I
- F
.BaseRegs
.begin()));
2936 if (LU
.HasFormulaWithSameRegs(NewF
)) {
2937 DEBUG(dbgs() << " Deleting "; F
.print(dbgs()); dbgs() << '\n');
2938 LU
.DeleteFormula(F
);
2944 } else if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(*I
)) {
2945 if (GlobalValue
*GV
= dyn_cast
<GlobalValue
>(U
->getValue()))
2948 NewF
.AM
.BaseGV
= GV
;
2949 NewF
.BaseRegs
.erase(NewF
.BaseRegs
.begin() +
2950 (I
- F
.BaseRegs
.begin()));
2951 if (LU
.HasFormulaWithSameRegs(NewF
)) {
2952 DEBUG(dbgs() << " Deleting "; F
.print(dbgs());
2954 LU
.DeleteFormula(F
);
2965 LU
.RecomputeRegs(LUIdx
, RegUses
);
2968 DEBUG(dbgs() << "After pre-selection:\n";
2969 print_uses(dbgs()));
2973 /// NarrowSearchSpaceByCollapsingUnrolledCode - When there are many registers
2974 /// for expressions like A, A+1, A+2, etc., allocate a single register for
2976 void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
2977 if (EstimateSearchSpaceComplexity() >= ComplexityLimit
) {
2978 DEBUG(dbgs() << "The search space is too complex.\n");
2980 DEBUG(dbgs() << "Narrowing the search space by assuming that uses "
2981 "separated by a constant offset will use the same "
2984 // This is especially useful for unrolled loops.
2986 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
2987 LSRUse
&LU
= Uses
[LUIdx
];
2988 for (SmallVectorImpl
<Formula
>::const_iterator I
= LU
.Formulae
.begin(),
2989 E
= LU
.Formulae
.end(); I
!= E
; ++I
) {
2990 const Formula
&F
= *I
;
2991 if (F
.AM
.BaseOffs
!= 0 && F
.AM
.Scale
== 0) {
2992 if (LSRUse
*LUThatHas
= FindUseWithSimilarFormula(F
, LU
)) {
2993 if (reconcileNewOffset(*LUThatHas
, F
.AM
.BaseOffs
,
2994 /*HasBaseReg=*/false,
2995 LU
.Kind
, LU
.AccessTy
)) {
2996 DEBUG(dbgs() << " Deleting use "; LU
.print(dbgs());
2999 LUThatHas
->AllFixupsOutsideLoop
&= LU
.AllFixupsOutsideLoop
;
3001 // Update the relocs to reference the new use.
3002 for (SmallVectorImpl
<LSRFixup
>::iterator I
= Fixups
.begin(),
3003 E
= Fixups
.end(); I
!= E
; ++I
) {
3004 LSRFixup
&Fixup
= *I
;
3005 if (Fixup
.LUIdx
== LUIdx
) {
3006 Fixup
.LUIdx
= LUThatHas
- &Uses
.front();
3007 Fixup
.Offset
+= F
.AM
.BaseOffs
;
3008 // Add the new offset to LUThatHas' offset list.
3009 if (LUThatHas
->Offsets
.back() != Fixup
.Offset
) {
3010 LUThatHas
->Offsets
.push_back(Fixup
.Offset
);
3011 if (Fixup
.Offset
> LUThatHas
->MaxOffset
)
3012 LUThatHas
->MaxOffset
= Fixup
.Offset
;
3013 if (Fixup
.Offset
< LUThatHas
->MinOffset
)
3014 LUThatHas
->MinOffset
= Fixup
.Offset
;
3016 DEBUG(dbgs() << "New fixup has offset "
3017 << Fixup
.Offset
<< '\n');
3019 if (Fixup
.LUIdx
== NumUses
-1)
3020 Fixup
.LUIdx
= LUIdx
;
3023 // Delete formulae from the new use which are no longer legal.
3025 for (size_t i
= 0, e
= LUThatHas
->Formulae
.size(); i
!= e
; ++i
) {
3026 Formula
&F
= LUThatHas
->Formulae
[i
];
3027 if (!isLegalUse(F
.AM
,
3028 LUThatHas
->MinOffset
, LUThatHas
->MaxOffset
,
3029 LUThatHas
->Kind
, LUThatHas
->AccessTy
, TLI
)) {
3030 DEBUG(dbgs() << " Deleting "; F
.print(dbgs());
3032 LUThatHas
->DeleteFormula(F
);
3039 LUThatHas
->RecomputeRegs(LUThatHas
- &Uses
.front(), RegUses
);
3041 // Delete the old use.
3042 DeleteUse(LU
, LUIdx
);
3052 DEBUG(dbgs() << "After pre-selection:\n";
3053 print_uses(dbgs()));
3057 /// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call
3058 /// FilterOutUndesirableDedicatedRegisters again, if necessary, now that
3059 /// we've done more filtering, as it may be able to find more formulae to
3061 void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){
3062 if (EstimateSearchSpaceComplexity() >= ComplexityLimit
) {
3063 DEBUG(dbgs() << "The search space is too complex.\n");
3065 DEBUG(dbgs() << "Narrowing the search space by re-filtering out "
3066 "undesirable dedicated registers.\n");
3068 FilterOutUndesirableDedicatedRegisters();
3070 DEBUG(dbgs() << "After pre-selection:\n";
3071 print_uses(dbgs()));
3075 /// NarrowSearchSpaceByPickingWinnerRegs - Pick a register which seems likely
3076 /// to be profitable, and then in any use which has any reference to that
3077 /// register, delete all formulae which do not reference that register.
3078 void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
3079 // With all other options exhausted, loop until the system is simple
3080 // enough to handle.
3081 SmallPtrSet
<const SCEV
*, 4> Taken
;
3082 while (EstimateSearchSpaceComplexity() >= ComplexityLimit
) {
3083 // Ok, we have too many of formulae on our hands to conveniently handle.
3084 // Use a rough heuristic to thin out the list.
3085 DEBUG(dbgs() << "The search space is too complex.\n");
3087 // Pick the register which is used by the most LSRUses, which is likely
3088 // to be a good reuse register candidate.
3089 const SCEV
*Best
= 0;
3090 unsigned BestNum
= 0;
3091 for (RegUseTracker::const_iterator I
= RegUses
.begin(), E
= RegUses
.end();
3093 const SCEV
*Reg
= *I
;
3094 if (Taken
.count(Reg
))
3099 unsigned Count
= RegUses
.getUsedByIndices(Reg
).count();
3100 if (Count
> BestNum
) {
3107 DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best
3108 << " will yield profitable reuse.\n");
3111 // In any use with formulae which references this register, delete formulae
3112 // which don't reference it.
3113 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
3114 LSRUse
&LU
= Uses
[LUIdx
];
3115 if (!LU
.Regs
.count(Best
)) continue;
3118 for (size_t i
= 0, e
= LU
.Formulae
.size(); i
!= e
; ++i
) {
3119 Formula
&F
= LU
.Formulae
[i
];
3120 if (!F
.referencesReg(Best
)) {
3121 DEBUG(dbgs() << " Deleting "; F
.print(dbgs()); dbgs() << '\n');
3122 LU
.DeleteFormula(F
);
3126 assert(e
!= 0 && "Use has no formulae left! Is Regs inconsistent?");
3132 LU
.RecomputeRegs(LUIdx
, RegUses
);
3135 DEBUG(dbgs() << "After pre-selection:\n";
3136 print_uses(dbgs()));
3140 /// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of
3141 /// formulae to choose from, use some rough heuristics to prune down the number
3142 /// of formulae. This keeps the main solver from taking an extraordinary amount
3143 /// of time in some worst-case scenarios.
3144 void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
3145 NarrowSearchSpaceByDetectingSupersets();
3146 NarrowSearchSpaceByCollapsingUnrolledCode();
3147 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
3148 NarrowSearchSpaceByPickingWinnerRegs();
3151 /// SolveRecurse - This is the recursive solver.
3152 void LSRInstance::SolveRecurse(SmallVectorImpl
<const Formula
*> &Solution
,
3154 SmallVectorImpl
<const Formula
*> &Workspace
,
3155 const Cost
&CurCost
,
3156 const SmallPtrSet
<const SCEV
*, 16> &CurRegs
,
3157 DenseSet
<const SCEV
*> &VisitedRegs
) const {
3160 // - use more aggressive filtering
3161 // - sort the formula so that the most profitable solutions are found first
3162 // - sort the uses too
3164 // - don't compute a cost, and then compare. compare while computing a cost
3166 // - track register sets with SmallBitVector
3168 const LSRUse
&LU
= Uses
[Workspace
.size()];
3170 // If this use references any register that's already a part of the
3171 // in-progress solution, consider it a requirement that a formula must
3172 // reference that register in order to be considered. This prunes out
3173 // unprofitable searching.
3174 SmallSetVector
<const SCEV
*, 4> ReqRegs
;
3175 for (SmallPtrSet
<const SCEV
*, 16>::const_iterator I
= CurRegs
.begin(),
3176 E
= CurRegs
.end(); I
!= E
; ++I
)
3177 if (LU
.Regs
.count(*I
))
3180 bool AnySatisfiedReqRegs
= false;
3181 SmallPtrSet
<const SCEV
*, 16> NewRegs
;
3184 for (SmallVectorImpl
<Formula
>::const_iterator I
= LU
.Formulae
.begin(),
3185 E
= LU
.Formulae
.end(); I
!= E
; ++I
) {
3186 const Formula
&F
= *I
;
3188 // Ignore formulae which do not use any of the required registers.
3189 for (SmallSetVector
<const SCEV
*, 4>::const_iterator J
= ReqRegs
.begin(),
3190 JE
= ReqRegs
.end(); J
!= JE
; ++J
) {
3191 const SCEV
*Reg
= *J
;
3192 if ((!F
.ScaledReg
|| F
.ScaledReg
!= Reg
) &&
3193 std::find(F
.BaseRegs
.begin(), F
.BaseRegs
.end(), Reg
) ==
3197 AnySatisfiedReqRegs
= true;
3199 // Evaluate the cost of the current formula. If it's already worse than
3200 // the current best, prune the search at that point.
3203 NewCost
.RateFormula(F
, NewRegs
, VisitedRegs
, L
, LU
.Offsets
, SE
, DT
);
3204 if (NewCost
< SolutionCost
) {
3205 Workspace
.push_back(&F
);
3206 if (Workspace
.size() != Uses
.size()) {
3207 SolveRecurse(Solution
, SolutionCost
, Workspace
, NewCost
,
3208 NewRegs
, VisitedRegs
);
3209 if (F
.getNumRegs() == 1 && Workspace
.size() == 1)
3210 VisitedRegs
.insert(F
.ScaledReg
? F
.ScaledReg
: F
.BaseRegs
[0]);
3212 DEBUG(dbgs() << "New best at "; NewCost
.print(dbgs());
3213 dbgs() << ". Regs:";
3214 for (SmallPtrSet
<const SCEV
*, 16>::const_iterator
3215 I
= NewRegs
.begin(), E
= NewRegs
.end(); I
!= E
; ++I
)
3216 dbgs() << ' ' << **I
;
3219 SolutionCost
= NewCost
;
3220 Solution
= Workspace
;
3222 Workspace
.pop_back();
3227 // If none of the formulae had all of the required registers, relax the
3228 // constraint so that we don't exclude all formulae.
3229 if (!AnySatisfiedReqRegs
) {
3230 assert(!ReqRegs
.empty() && "Solver failed even without required registers");
3236 /// Solve - Choose one formula from each use. Return the results in the given
3237 /// Solution vector.
3238 void LSRInstance::Solve(SmallVectorImpl
<const Formula
*> &Solution
) const {
3239 SmallVector
<const Formula
*, 8> Workspace
;
3241 SolutionCost
.Loose();
3243 SmallPtrSet
<const SCEV
*, 16> CurRegs
;
3244 DenseSet
<const SCEV
*> VisitedRegs
;
3245 Workspace
.reserve(Uses
.size());
3247 // SolveRecurse does all the work.
3248 SolveRecurse(Solution
, SolutionCost
, Workspace
, CurCost
,
3249 CurRegs
, VisitedRegs
);
3251 // Ok, we've now made all our decisions.
3252 DEBUG(dbgs() << "\n"
3253 "The chosen solution requires "; SolutionCost
.print(dbgs());
3255 for (size_t i
= 0, e
= Uses
.size(); i
!= e
; ++i
) {
3257 Uses
[i
].print(dbgs());
3260 Solution
[i
]->print(dbgs());
3264 assert(Solution
.size() == Uses
.size() && "Malformed solution!");
3267 /// HoistInsertPosition - Helper for AdjustInsertPositionForExpand. Climb up
3268 /// the dominator tree far as we can go while still being dominated by the
3269 /// input positions. This helps canonicalize the insert position, which
3270 /// encourages sharing.
3271 BasicBlock::iterator
3272 LSRInstance::HoistInsertPosition(BasicBlock::iterator IP
,
3273 const SmallVectorImpl
<Instruction
*> &Inputs
)
3276 const Loop
*IPLoop
= LI
.getLoopFor(IP
->getParent());
3277 unsigned IPLoopDepth
= IPLoop
? IPLoop
->getLoopDepth() : 0;
3280 for (DomTreeNode
*Rung
= DT
.getNode(IP
->getParent()); ; ) {
3281 if (!Rung
) return IP
;
3282 Rung
= Rung
->getIDom();
3283 if (!Rung
) return IP
;
3284 IDom
= Rung
->getBlock();
3286 // Don't climb into a loop though.
3287 const Loop
*IDomLoop
= LI
.getLoopFor(IDom
);
3288 unsigned IDomDepth
= IDomLoop
? IDomLoop
->getLoopDepth() : 0;
3289 if (IDomDepth
<= IPLoopDepth
&&
3290 (IDomDepth
!= IPLoopDepth
|| IDomLoop
== IPLoop
))
3294 bool AllDominate
= true;
3295 Instruction
*BetterPos
= 0;
3296 Instruction
*Tentative
= IDom
->getTerminator();
3297 for (SmallVectorImpl
<Instruction
*>::const_iterator I
= Inputs
.begin(),
3298 E
= Inputs
.end(); I
!= E
; ++I
) {
3299 Instruction
*Inst
= *I
;
3300 if (Inst
== Tentative
|| !DT
.dominates(Inst
, Tentative
)) {
3301 AllDominate
= false;
3304 // Attempt to find an insert position in the middle of the block,
3305 // instead of at the end, so that it can be used for other expansions.
3306 if (IDom
== Inst
->getParent() &&
3307 (!BetterPos
|| DT
.dominates(BetterPos
, Inst
)))
3308 BetterPos
= llvm::next(BasicBlock::iterator(Inst
));
3321 /// AdjustInsertPositionForExpand - Determine an input position which will be
3322 /// dominated by the operands and which will dominate the result.
3323 BasicBlock::iterator
3324 LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP
,
3326 const LSRUse
&LU
) const {
3327 // Collect some instructions which must be dominated by the
3328 // expanding replacement. These must be dominated by any operands that
3329 // will be required in the expansion.
3330 SmallVector
<Instruction
*, 4> Inputs
;
3331 if (Instruction
*I
= dyn_cast
<Instruction
>(LF
.OperandValToReplace
))
3332 Inputs
.push_back(I
);
3333 if (LU
.Kind
== LSRUse::ICmpZero
)
3334 if (Instruction
*I
=
3335 dyn_cast
<Instruction
>(cast
<ICmpInst
>(LF
.UserInst
)->getOperand(1)))
3336 Inputs
.push_back(I
);
3337 if (LF
.PostIncLoops
.count(L
)) {
3338 if (LF
.isUseFullyOutsideLoop(L
))
3339 Inputs
.push_back(L
->getLoopLatch()->getTerminator());
3341 Inputs
.push_back(IVIncInsertPos
);
3343 // The expansion must also be dominated by the increment positions of any
3344 // loops it for which it is using post-inc mode.
3345 for (PostIncLoopSet::const_iterator I
= LF
.PostIncLoops
.begin(),
3346 E
= LF
.PostIncLoops
.end(); I
!= E
; ++I
) {
3347 const Loop
*PIL
= *I
;
3348 if (PIL
== L
) continue;
3350 // Be dominated by the loop exit.
3351 SmallVector
<BasicBlock
*, 4> ExitingBlocks
;
3352 PIL
->getExitingBlocks(ExitingBlocks
);
3353 if (!ExitingBlocks
.empty()) {
3354 BasicBlock
*BB
= ExitingBlocks
[0];
3355 for (unsigned i
= 1, e
= ExitingBlocks
.size(); i
!= e
; ++i
)
3356 BB
= DT
.findNearestCommonDominator(BB
, ExitingBlocks
[i
]);
3357 Inputs
.push_back(BB
->getTerminator());
3361 // Then, climb up the immediate dominator tree as far as we can go while
3362 // still being dominated by the input positions.
3363 IP
= HoistInsertPosition(IP
, Inputs
);
3365 // Don't insert instructions before PHI nodes.
3366 while (isa
<PHINode
>(IP
)) ++IP
;
3368 // Ignore debug intrinsics.
3369 while (isa
<DbgInfoIntrinsic
>(IP
)) ++IP
;
3374 /// Expand - Emit instructions for the leading candidate expression for this
3375 /// LSRUse (this is called "expanding").
3376 Value
*LSRInstance::Expand(const LSRFixup
&LF
,
3378 BasicBlock::iterator IP
,
3379 SCEVExpander
&Rewriter
,
3380 SmallVectorImpl
<WeakVH
> &DeadInsts
) const {
3381 const LSRUse
&LU
= Uses
[LF
.LUIdx
];
3383 // Determine an input position which will be dominated by the operands and
3384 // which will dominate the result.
3385 IP
= AdjustInsertPositionForExpand(IP
, LF
, LU
);
3387 // Inform the Rewriter if we have a post-increment use, so that it can
3388 // perform an advantageous expansion.
3389 Rewriter
.setPostInc(LF
.PostIncLoops
);
3391 // This is the type that the user actually needs.
3392 const Type
*OpTy
= LF
.OperandValToReplace
->getType();
3393 // This will be the type that we'll initially expand to.
3394 const Type
*Ty
= F
.getType();
3396 // No type known; just expand directly to the ultimate type.
3398 else if (SE
.getEffectiveSCEVType(Ty
) == SE
.getEffectiveSCEVType(OpTy
))
3399 // Expand directly to the ultimate type if it's the right size.
3401 // This is the type to do integer arithmetic in.
3402 const Type
*IntTy
= SE
.getEffectiveSCEVType(Ty
);
3404 // Build up a list of operands to add together to form the full base.
3405 SmallVector
<const SCEV
*, 8> Ops
;
3407 // Expand the BaseRegs portion.
3408 for (SmallVectorImpl
<const SCEV
*>::const_iterator I
= F
.BaseRegs
.begin(),
3409 E
= F
.BaseRegs
.end(); I
!= E
; ++I
) {
3410 const SCEV
*Reg
= *I
;
3411 assert(!Reg
->isZero() && "Zero allocated in a base register!");
3413 // If we're expanding for a post-inc user, make the post-inc adjustment.
3414 PostIncLoopSet
&Loops
= const_cast<PostIncLoopSet
&>(LF
.PostIncLoops
);
3415 Reg
= TransformForPostIncUse(Denormalize
, Reg
,
3416 LF
.UserInst
, LF
.OperandValToReplace
,
3419 Ops
.push_back(SE
.getUnknown(Rewriter
.expandCodeFor(Reg
, 0, IP
)));
3422 // Flush the operand list to suppress SCEVExpander hoisting.
3424 Value
*FullV
= Rewriter
.expandCodeFor(SE
.getAddExpr(Ops
), Ty
, IP
);
3426 Ops
.push_back(SE
.getUnknown(FullV
));
3429 // Expand the ScaledReg portion.
3430 Value
*ICmpScaledV
= 0;
3431 if (F
.AM
.Scale
!= 0) {
3432 const SCEV
*ScaledS
= F
.ScaledReg
;
3434 // If we're expanding for a post-inc user, make the post-inc adjustment.
3435 PostIncLoopSet
&Loops
= const_cast<PostIncLoopSet
&>(LF
.PostIncLoops
);
3436 ScaledS
= TransformForPostIncUse(Denormalize
, ScaledS
,
3437 LF
.UserInst
, LF
.OperandValToReplace
,
3440 if (LU
.Kind
== LSRUse::ICmpZero
) {
3441 // An interesting way of "folding" with an icmp is to use a negated
3442 // scale, which we'll implement by inserting it into the other operand
3444 assert(F
.AM
.Scale
== -1 &&
3445 "The only scale supported by ICmpZero uses is -1!");
3446 ICmpScaledV
= Rewriter
.expandCodeFor(ScaledS
, 0, IP
);
3448 // Otherwise just expand the scaled register and an explicit scale,
3449 // which is expected to be matched as part of the address.
3450 ScaledS
= SE
.getUnknown(Rewriter
.expandCodeFor(ScaledS
, 0, IP
));
3451 ScaledS
= SE
.getMulExpr(ScaledS
,
3452 SE
.getConstant(ScaledS
->getType(), F
.AM
.Scale
));
3453 Ops
.push_back(ScaledS
);
3455 // Flush the operand list to suppress SCEVExpander hoisting.
3456 Value
*FullV
= Rewriter
.expandCodeFor(SE
.getAddExpr(Ops
), Ty
, IP
);
3458 Ops
.push_back(SE
.getUnknown(FullV
));
3462 // Expand the GV portion.
3464 Ops
.push_back(SE
.getUnknown(F
.AM
.BaseGV
));
3466 // Flush the operand list to suppress SCEVExpander hoisting.
3467 Value
*FullV
= Rewriter
.expandCodeFor(SE
.getAddExpr(Ops
), Ty
, IP
);
3469 Ops
.push_back(SE
.getUnknown(FullV
));
3472 // Expand the immediate portion.
3473 int64_t Offset
= (uint64_t)F
.AM
.BaseOffs
+ LF
.Offset
;
3475 if (LU
.Kind
== LSRUse::ICmpZero
) {
3476 // The other interesting way of "folding" with an ICmpZero is to use a
3477 // negated immediate.
3479 ICmpScaledV
= ConstantInt::get(IntTy
, -Offset
);
3481 Ops
.push_back(SE
.getUnknown(ICmpScaledV
));
3482 ICmpScaledV
= ConstantInt::get(IntTy
, Offset
);
3485 // Just add the immediate values. These again are expected to be matched
3486 // as part of the address.
3487 Ops
.push_back(SE
.getUnknown(ConstantInt::getSigned(IntTy
, Offset
)));
3491 // Emit instructions summing all the operands.
3492 const SCEV
*FullS
= Ops
.empty() ?
3493 SE
.getConstant(IntTy
, 0) :
3495 Value
*FullV
= Rewriter
.expandCodeFor(FullS
, Ty
, IP
);
3497 // We're done expanding now, so reset the rewriter.
3498 Rewriter
.clearPostInc();
3500 // An ICmpZero Formula represents an ICmp which we're handling as a
3501 // comparison against zero. Now that we've expanded an expression for that
3502 // form, update the ICmp's other operand.
3503 if (LU
.Kind
== LSRUse::ICmpZero
) {
3504 ICmpInst
*CI
= cast
<ICmpInst
>(LF
.UserInst
);
3505 DeadInsts
.push_back(CI
->getOperand(1));
3506 assert(!F
.AM
.BaseGV
&& "ICmp does not support folding a global value and "
3507 "a scale at the same time!");
3508 if (F
.AM
.Scale
== -1) {
3509 if (ICmpScaledV
->getType() != OpTy
) {
3511 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV
, false,
3513 ICmpScaledV
, OpTy
, "tmp", CI
);
3516 CI
->setOperand(1, ICmpScaledV
);
3518 assert(F
.AM
.Scale
== 0 &&
3519 "ICmp does not support folding a global value and "
3520 "a scale at the same time!");
3521 Constant
*C
= ConstantInt::getSigned(SE
.getEffectiveSCEVType(OpTy
),
3523 if (C
->getType() != OpTy
)
3524 C
= ConstantExpr::getCast(CastInst::getCastOpcode(C
, false,
3528 CI
->setOperand(1, C
);
3535 /// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use
3536 /// of their operands effectively happens in their predecessor blocks, so the
3537 /// expression may need to be expanded in multiple places.
3538 void LSRInstance::RewriteForPHI(PHINode
*PN
,
3541 SCEVExpander
&Rewriter
,
3542 SmallVectorImpl
<WeakVH
> &DeadInsts
,
3544 DenseMap
<BasicBlock
*, Value
*> Inserted
;
3545 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
3546 if (PN
->getIncomingValue(i
) == LF
.OperandValToReplace
) {
3547 BasicBlock
*BB
= PN
->getIncomingBlock(i
);
3549 // If this is a critical edge, split the edge so that we do not insert
3550 // the code on all predecessor/successor paths. We do this unless this
3551 // is the canonical backedge for this loop, which complicates post-inc
3553 if (e
!= 1 && BB
->getTerminator()->getNumSuccessors() > 1 &&
3554 !isa
<IndirectBrInst
>(BB
->getTerminator())) {
3555 Loop
*PNLoop
= LI
.getLoopFor(PN
->getParent());
3556 if (!PNLoop
|| PN
->getParent() != PNLoop
->getHeader()) {
3557 // Split the critical edge.
3558 BasicBlock
*NewBB
= SplitCriticalEdge(BB
, PN
->getParent(), P
);
3560 // If PN is outside of the loop and BB is in the loop, we want to
3561 // move the block to be immediately before the PHI block, not
3562 // immediately after BB.
3563 if (L
->contains(BB
) && !L
->contains(PN
))
3564 NewBB
->moveBefore(PN
->getParent());
3566 // Splitting the edge can reduce the number of PHI entries we have.
3567 e
= PN
->getNumIncomingValues();
3569 i
= PN
->getBasicBlockIndex(BB
);
3573 std::pair
<DenseMap
<BasicBlock
*, Value
*>::iterator
, bool> Pair
=
3574 Inserted
.insert(std::make_pair(BB
, static_cast<Value
*>(0)));
3576 PN
->setIncomingValue(i
, Pair
.first
->second
);
3578 Value
*FullV
= Expand(LF
, F
, BB
->getTerminator(), Rewriter
, DeadInsts
);
3580 // If this is reuse-by-noop-cast, insert the noop cast.
3581 const Type
*OpTy
= LF
.OperandValToReplace
->getType();
3582 if (FullV
->getType() != OpTy
)
3584 CastInst::Create(CastInst::getCastOpcode(FullV
, false,
3586 FullV
, LF
.OperandValToReplace
->getType(),
3587 "tmp", BB
->getTerminator());
3589 PN
->setIncomingValue(i
, FullV
);
3590 Pair
.first
->second
= FullV
;
3595 /// Rewrite - Emit instructions for the leading candidate expression for this
3596 /// LSRUse (this is called "expanding"), and update the UserInst to reference
3597 /// the newly expanded value.
3598 void LSRInstance::Rewrite(const LSRFixup
&LF
,
3600 SCEVExpander
&Rewriter
,
3601 SmallVectorImpl
<WeakVH
> &DeadInsts
,
3603 // First, find an insertion point that dominates UserInst. For PHI nodes,
3604 // find the nearest block which dominates all the relevant uses.
3605 if (PHINode
*PN
= dyn_cast
<PHINode
>(LF
.UserInst
)) {
3606 RewriteForPHI(PN
, LF
, F
, Rewriter
, DeadInsts
, P
);
3608 Value
*FullV
= Expand(LF
, F
, LF
.UserInst
, Rewriter
, DeadInsts
);
3610 // If this is reuse-by-noop-cast, insert the noop cast.
3611 const Type
*OpTy
= LF
.OperandValToReplace
->getType();
3612 if (FullV
->getType() != OpTy
) {
3614 CastInst::Create(CastInst::getCastOpcode(FullV
, false, OpTy
, false),
3615 FullV
, OpTy
, "tmp", LF
.UserInst
);
3619 // Update the user. ICmpZero is handled specially here (for now) because
3620 // Expand may have updated one of the operands of the icmp already, and
3621 // its new value may happen to be equal to LF.OperandValToReplace, in
3622 // which case doing replaceUsesOfWith leads to replacing both operands
3623 // with the same value. TODO: Reorganize this.
3624 if (Uses
[LF
.LUIdx
].Kind
== LSRUse::ICmpZero
)
3625 LF
.UserInst
->setOperand(0, FullV
);
3627 LF
.UserInst
->replaceUsesOfWith(LF
.OperandValToReplace
, FullV
);
3630 DeadInsts
.push_back(LF
.OperandValToReplace
);
3633 /// ImplementSolution - Rewrite all the fixup locations with new values,
3634 /// following the chosen solution.
3636 LSRInstance::ImplementSolution(const SmallVectorImpl
<const Formula
*> &Solution
,
3638 // Keep track of instructions we may have made dead, so that
3639 // we can remove them after we are done working.
3640 SmallVector
<WeakVH
, 16> DeadInsts
;
3642 SCEVExpander
Rewriter(SE
);
3643 Rewriter
.disableCanonicalMode();
3644 Rewriter
.setIVIncInsertPos(L
, IVIncInsertPos
);
3646 // Expand the new value definitions and update the users.
3647 for (SmallVectorImpl
<LSRFixup
>::const_iterator I
= Fixups
.begin(),
3648 E
= Fixups
.end(); I
!= E
; ++I
) {
3649 const LSRFixup
&Fixup
= *I
;
3651 Rewrite(Fixup
, *Solution
[Fixup
.LUIdx
], Rewriter
, DeadInsts
, P
);
3656 // Clean up after ourselves. This must be done before deleting any
3660 Changed
|= DeleteTriviallyDeadInstructions(DeadInsts
);
3663 LSRInstance::LSRInstance(const TargetLowering
*tli
, Loop
*l
, Pass
*P
)
3664 : IU(P
->getAnalysis
<IVUsers
>()),
3665 SE(P
->getAnalysis
<ScalarEvolution
>()),
3666 DT(P
->getAnalysis
<DominatorTree
>()),
3667 LI(P
->getAnalysis
<LoopInfo
>()),
3668 TLI(tli
), L(l
), Changed(false), IVIncInsertPos(0) {
3670 // If LoopSimplify form is not available, stay out of trouble.
3671 if (!L
->isLoopSimplifyForm()) return;
3673 // If there's no interesting work to be done, bail early.
3674 if (IU
.empty()) return;
3676 DEBUG(dbgs() << "\nLSR on loop ";
3677 WriteAsOperand(dbgs(), L
->getHeader(), /*PrintType=*/false);
3680 // First, perform some low-level loop optimizations.
3682 OptimizeLoopTermCond();
3684 // Start collecting data and preparing for the solver.
3685 CollectInterestingTypesAndFactors();
3686 CollectFixupsAndInitialFormulae();
3687 CollectLoopInvariantFixupsAndFormulae();
3689 DEBUG(dbgs() << "LSR found " << Uses
.size() << " uses:\n";
3690 print_uses(dbgs()));
3692 // Now use the reuse data to generate a bunch of interesting ways
3693 // to formulate the values needed for the uses.
3694 GenerateAllReuseFormulae();
3696 FilterOutUndesirableDedicatedRegisters();
3697 NarrowSearchSpaceUsingHeuristics();
3699 SmallVector
<const Formula
*, 8> Solution
;
3702 // Release memory that is no longer needed.
3708 // Formulae should be legal.
3709 for (SmallVectorImpl
<LSRUse
>::const_iterator I
= Uses
.begin(),
3710 E
= Uses
.end(); I
!= E
; ++I
) {
3711 const LSRUse
&LU
= *I
;
3712 for (SmallVectorImpl
<Formula
>::const_iterator J
= LU
.Formulae
.begin(),
3713 JE
= LU
.Formulae
.end(); J
!= JE
; ++J
)
3714 assert(isLegalUse(J
->AM
, LU
.MinOffset
, LU
.MaxOffset
,
3715 LU
.Kind
, LU
.AccessTy
, TLI
) &&
3716 "Illegal formula generated!");
3720 // Now that we've decided what we want, make it so.
3721 ImplementSolution(Solution
, P
);
3724 void LSRInstance::print_factors_and_types(raw_ostream
&OS
) const {
3725 if (Factors
.empty() && Types
.empty()) return;
3727 OS
<< "LSR has identified the following interesting factors and types: ";
3730 for (SmallSetVector
<int64_t, 8>::const_iterator
3731 I
= Factors
.begin(), E
= Factors
.end(); I
!= E
; ++I
) {
3732 if (!First
) OS
<< ", ";
3737 for (SmallSetVector
<const Type
*, 4>::const_iterator
3738 I
= Types
.begin(), E
= Types
.end(); I
!= E
; ++I
) {
3739 if (!First
) OS
<< ", ";
3741 OS
<< '(' << **I
<< ')';
3746 void LSRInstance::print_fixups(raw_ostream
&OS
) const {
3747 OS
<< "LSR is examining the following fixup sites:\n";
3748 for (SmallVectorImpl
<LSRFixup
>::const_iterator I
= Fixups
.begin(),
3749 E
= Fixups
.end(); I
!= E
; ++I
) {
3756 void LSRInstance::print_uses(raw_ostream
&OS
) const {
3757 OS
<< "LSR is examining the following uses:\n";
3758 for (SmallVectorImpl
<LSRUse
>::const_iterator I
= Uses
.begin(),
3759 E
= Uses
.end(); I
!= E
; ++I
) {
3760 const LSRUse
&LU
= *I
;
3764 for (SmallVectorImpl
<Formula
>::const_iterator J
= LU
.Formulae
.begin(),
3765 JE
= LU
.Formulae
.end(); J
!= JE
; ++J
) {
3773 void LSRInstance::print(raw_ostream
&OS
) const {
3774 print_factors_and_types(OS
);
3779 void LSRInstance::dump() const {
3780 print(errs()); errs() << '\n';
3785 class LoopStrengthReduce
: public LoopPass
{
3786 /// TLI - Keep a pointer of a TargetLowering to consult for determining
3787 /// transformation profitability.
3788 const TargetLowering
*const TLI
;
3791 static char ID
; // Pass ID, replacement for typeid
3792 explicit LoopStrengthReduce(const TargetLowering
*tli
= 0);
3795 bool runOnLoop(Loop
*L
, LPPassManager
&LPM
);
3796 void getAnalysisUsage(AnalysisUsage
&AU
) const;
3801 char LoopStrengthReduce::ID
= 0;
3802 INITIALIZE_PASS_BEGIN(LoopStrengthReduce
, "loop-reduce",
3803 "Loop Strength Reduction", false, false)
3804 INITIALIZE_PASS_DEPENDENCY(DominatorTree
)
3805 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution
)
3806 INITIALIZE_PASS_DEPENDENCY(IVUsers
)
3807 INITIALIZE_PASS_DEPENDENCY(LoopInfo
)
3808 INITIALIZE_PASS_DEPENDENCY(LoopSimplify
)
3809 INITIALIZE_PASS_END(LoopStrengthReduce
, "loop-reduce",
3810 "Loop Strength Reduction", false, false)
3813 Pass
*llvm::createLoopStrengthReducePass(const TargetLowering
*TLI
) {
3814 return new LoopStrengthReduce(TLI
);
3817 LoopStrengthReduce::LoopStrengthReduce(const TargetLowering
*tli
)
3818 : LoopPass(ID
), TLI(tli
) {
3819 initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry());
3822 void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage
&AU
) const {
3823 // We split critical edges, so we change the CFG. However, we do update
3824 // many analyses if they are around.
3825 AU
.addPreservedID(LoopSimplifyID
);
3827 AU
.addRequired
<LoopInfo
>();
3828 AU
.addPreserved
<LoopInfo
>();
3829 AU
.addRequiredID(LoopSimplifyID
);
3830 AU
.addRequired
<DominatorTree
>();
3831 AU
.addPreserved
<DominatorTree
>();
3832 AU
.addRequired
<ScalarEvolution
>();
3833 AU
.addPreserved
<ScalarEvolution
>();
3834 // Requiring LoopSimplify a second time here prevents IVUsers from running
3835 // twice, since LoopSimplify was invalidated by running ScalarEvolution.
3836 AU
.addRequiredID(LoopSimplifyID
);
3837 AU
.addRequired
<IVUsers
>();
3838 AU
.addPreserved
<IVUsers
>();
3841 bool LoopStrengthReduce::runOnLoop(Loop
*L
, LPPassManager
& /*LPM*/) {
3842 bool Changed
= false;
3844 // Run the main LSR transformation.
3845 Changed
|= LSRInstance(TLI
, L
, this).getChanged();
3847 // At this point, it is worth checking to see if any recurrence PHIs are also
3848 // dead, so that we can remove them as well.
3849 Changed
|= DeleteDeadPHIs(L
->getHeader());