1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This transformation analyzes and transforms the induction variables (and
10 // computations derived from them) into forms suitable for efficient execution
13 // This pass performs a strength reduction on array references inside loops that
14 // have as one or more of their components the loop induction variable, it
15 // rewrites expressions to take advantage of scaled-index addressing modes
16 // available on the target, and it performs a variety of other optimizations
17 // related to loop induction variables.
19 // Terminology note: this code has a lot of handling for "post-increment" or
20 // "post-inc" users. This is not talking about post-increment addressing modes;
21 // it is instead talking about code like this:
23 // %i = phi [ 0, %entry ], [ %i.next, %latch ]
25 // %i.next = add %i, 1
26 // %c = icmp eq %i.next, %n
28 // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however
29 // it's useful to think about these as the same register, with some uses using
30 // the value of the register before the add and some using it after. In this
31 // example, the icmp is a post-increment user, since it uses %i.next, which is
32 // the value of the induction variable after the increment. The other common
33 // case of post-increment users is users outside the loop.
35 // TODO: More sophistication in the way Formulae are generated and filtered.
37 // TODO: Handle multiple loops at a time.
39 // TODO: Should the addressing mode BaseGV be changed to a ConstantExpr instead
42 // TODO: When truncation is free, truncate ICmp users' operands to make it a
43 // smaller encoding (on x86 at least).
45 // TODO: When a negated register is used by an add (such as in a list of
46 // multiple base registers, or as the increment expression in an addrec),
47 // we may not actually need both reg and (-1 * reg) in registers; the
48 // negation can be implemented by using a sub instead of an add. The
49 // lack of support for taking this into consideration when making
50 // register pressure decisions is partly worked around by the "Special"
53 //===----------------------------------------------------------------------===//
55 #include "llvm/Transforms/Scalar/LoopStrengthReduce.h"
56 #include "llvm/ADT/APInt.h"
57 #include "llvm/ADT/DenseMap.h"
58 #include "llvm/ADT/DenseSet.h"
59 #include "llvm/ADT/Hashing.h"
60 #include "llvm/ADT/PointerIntPair.h"
61 #include "llvm/ADT/STLExtras.h"
62 #include "llvm/ADT/SetVector.h"
63 #include "llvm/ADT/SmallBitVector.h"
64 #include "llvm/ADT/SmallPtrSet.h"
65 #include "llvm/ADT/SmallSet.h"
66 #include "llvm/ADT/SmallVector.h"
67 #include "llvm/ADT/Statistic.h"
68 #include "llvm/ADT/iterator_range.h"
69 #include "llvm/Analysis/AssumptionCache.h"
70 #include "llvm/Analysis/DomTreeUpdater.h"
71 #include "llvm/Analysis/IVUsers.h"
72 #include "llvm/Analysis/LoopAnalysisManager.h"
73 #include "llvm/Analysis/LoopInfo.h"
74 #include "llvm/Analysis/LoopPass.h"
75 #include "llvm/Analysis/MemorySSA.h"
76 #include "llvm/Analysis/MemorySSAUpdater.h"
77 #include "llvm/Analysis/ScalarEvolution.h"
78 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
79 #include "llvm/Analysis/ScalarEvolutionNormalization.h"
80 #include "llvm/Analysis/TargetLibraryInfo.h"
81 #include "llvm/Analysis/TargetTransformInfo.h"
82 #include "llvm/Analysis/ValueTracking.h"
83 #include "llvm/BinaryFormat/Dwarf.h"
84 #include "llvm/Config/llvm-config.h"
85 #include "llvm/IR/BasicBlock.h"
86 #include "llvm/IR/Constant.h"
87 #include "llvm/IR/Constants.h"
88 #include "llvm/IR/DebugInfoMetadata.h"
89 #include "llvm/IR/DerivedTypes.h"
90 #include "llvm/IR/Dominators.h"
91 #include "llvm/IR/GlobalValue.h"
92 #include "llvm/IR/IRBuilder.h"
93 #include "llvm/IR/InstrTypes.h"
94 #include "llvm/IR/Instruction.h"
95 #include "llvm/IR/Instructions.h"
96 #include "llvm/IR/IntrinsicInst.h"
97 #include "llvm/IR/Module.h"
98 #include "llvm/IR/Operator.h"
99 #include "llvm/IR/PassManager.h"
100 #include "llvm/IR/Type.h"
101 #include "llvm/IR/Use.h"
102 #include "llvm/IR/User.h"
103 #include "llvm/IR/Value.h"
104 #include "llvm/IR/ValueHandle.h"
105 #include "llvm/InitializePasses.h"
106 #include "llvm/Pass.h"
107 #include "llvm/Support/Casting.h"
108 #include "llvm/Support/CommandLine.h"
109 #include "llvm/Support/Compiler.h"
110 #include "llvm/Support/Debug.h"
111 #include "llvm/Support/ErrorHandling.h"
112 #include "llvm/Support/MathExtras.h"
113 #include "llvm/Support/raw_ostream.h"
114 #include "llvm/Transforms/Scalar.h"
115 #include "llvm/Transforms/Utils.h"
116 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
117 #include "llvm/Transforms/Utils/Local.h"
118 #include "llvm/Transforms/Utils/LoopUtils.h"
119 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
131 using namespace llvm
;
133 #define DEBUG_TYPE "loop-reduce"
135 /// MaxIVUsers is an arbitrary threshold that provides an early opportunity for
136 /// bail out. This threshold is far beyond the number of users that LSR can
137 /// conceivably solve, so it should not affect generated code, but catches the
138 /// worst cases before LSR burns too much compile time and stack space.
139 static const unsigned MaxIVUsers
= 200;
141 /// Limit the size of expression that SCEV-based salvaging will attempt to
142 /// translate into a DIExpression.
143 /// Choose a maximum size such that debuginfo is not excessively increased and
144 /// the salvaging is not too expensive for the compiler.
145 static const unsigned MaxSCEVSalvageExpressionSize
= 64;
147 // Cleanup congruent phis after LSR phi expansion.
148 static cl::opt
<bool> EnablePhiElim(
149 "enable-lsr-phielim", cl::Hidden
, cl::init(true),
150 cl::desc("Enable LSR phi elimination"));
152 // The flag adds instruction count to solutions cost comparison.
153 static cl::opt
<bool> InsnsCost(
154 "lsr-insns-cost", cl::Hidden
, cl::init(true),
155 cl::desc("Add instruction count to a LSR cost model"));
157 // Flag to choose how to narrow complex lsr solution
158 static cl::opt
<bool> LSRExpNarrow(
159 "lsr-exp-narrow", cl::Hidden
, cl::init(false),
160 cl::desc("Narrow LSR complex solution using"
161 " expectation of registers number"));
163 // Flag to narrow search space by filtering non-optimal formulae with
164 // the same ScaledReg and Scale.
165 static cl::opt
<bool> FilterSameScaledReg(
166 "lsr-filter-same-scaled-reg", cl::Hidden
, cl::init(true),
167 cl::desc("Narrow LSR search space by filtering non-optimal formulae"
168 " with the same ScaledReg and Scale"));
170 static cl::opt
<TTI::AddressingModeKind
> PreferredAddresingMode(
171 "lsr-preferred-addressing-mode", cl::Hidden
, cl::init(TTI::AMK_None
),
172 cl::desc("A flag that overrides the target's preferred addressing mode."),
173 cl::values(clEnumValN(TTI::AMK_None
,
175 "Don't prefer any addressing mode"),
176 clEnumValN(TTI::AMK_PreIndexed
,
178 "Prefer pre-indexed addressing mode"),
179 clEnumValN(TTI::AMK_PostIndexed
,
181 "Prefer post-indexed addressing mode")));
183 static cl::opt
<unsigned> ComplexityLimit(
184 "lsr-complexity-limit", cl::Hidden
,
185 cl::init(std::numeric_limits
<uint16_t>::max()),
186 cl::desc("LSR search space complexity limit"));
188 static cl::opt
<unsigned> SetupCostDepthLimit(
189 "lsr-setupcost-depth-limit", cl::Hidden
, cl::init(7),
190 cl::desc("The limit on recursion depth for LSRs setup cost"));
192 static cl::opt
<cl::boolOrDefault
> AllowTerminatingConditionFoldingAfterLSR(
193 "lsr-term-fold", cl::Hidden
,
194 cl::desc("Attempt to replace primary IV with other IV."));
196 static cl::opt
<bool> AllowDropSolutionIfLessProfitable(
197 "lsr-drop-solution", cl::Hidden
, cl::init(false),
198 cl::desc("Attempt to drop solution if it is less profitable"));
200 STATISTIC(NumTermFold
,
201 "Number of terminating condition fold recognized and performed");
204 // Stress test IV chain generation.
205 static cl::opt
<bool> StressIVChain(
206 "stress-ivchain", cl::Hidden
, cl::init(false),
207 cl::desc("Stress test LSR IV chains"));
209 static bool StressIVChain
= false;
215 /// Used in situations where the accessed memory type is unknown.
216 static const unsigned UnknownAddressSpace
=
217 std::numeric_limits
<unsigned>::max();
219 Type
*MemTy
= nullptr;
220 unsigned AddrSpace
= UnknownAddressSpace
;
222 MemAccessTy() = default;
223 MemAccessTy(Type
*Ty
, unsigned AS
) : MemTy(Ty
), AddrSpace(AS
) {}
225 bool operator==(MemAccessTy Other
) const {
226 return MemTy
== Other
.MemTy
&& AddrSpace
== Other
.AddrSpace
;
229 bool operator!=(MemAccessTy Other
) const { return !(*this == Other
); }
231 static MemAccessTy
getUnknown(LLVMContext
&Ctx
,
232 unsigned AS
= UnknownAddressSpace
) {
233 return MemAccessTy(Type::getVoidTy(Ctx
), AS
);
236 Type
*getType() { return MemTy
; }
239 /// This class holds data which is used to order reuse candidates.
242 /// This represents the set of LSRUse indices which reference
243 /// a particular register.
244 SmallBitVector UsedByIndices
;
246 void print(raw_ostream
&OS
) const;
250 } // end anonymous namespace
252 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
253 void RegSortData::print(raw_ostream
&OS
) const {
254 OS
<< "[NumUses=" << UsedByIndices
.count() << ']';
257 LLVM_DUMP_METHOD
void RegSortData::dump() const {
258 print(errs()); errs() << '\n';
264 /// Map register candidates to information about how they are used.
265 class RegUseTracker
{
266 using RegUsesTy
= DenseMap
<const SCEV
*, RegSortData
>;
268 RegUsesTy RegUsesMap
;
269 SmallVector
<const SCEV
*, 16> RegSequence
;
272 void countRegister(const SCEV
*Reg
, size_t LUIdx
);
273 void dropRegister(const SCEV
*Reg
, size_t LUIdx
);
274 void swapAndDropUse(size_t LUIdx
, size_t LastLUIdx
);
276 bool isRegUsedByUsesOtherThan(const SCEV
*Reg
, size_t LUIdx
) const;
278 const SmallBitVector
&getUsedByIndices(const SCEV
*Reg
) const;
282 using iterator
= SmallVectorImpl
<const SCEV
*>::iterator
;
283 using const_iterator
= SmallVectorImpl
<const SCEV
*>::const_iterator
;
285 iterator
begin() { return RegSequence
.begin(); }
286 iterator
end() { return RegSequence
.end(); }
287 const_iterator
begin() const { return RegSequence
.begin(); }
288 const_iterator
end() const { return RegSequence
.end(); }
291 } // end anonymous namespace
294 RegUseTracker::countRegister(const SCEV
*Reg
, size_t LUIdx
) {
295 std::pair
<RegUsesTy::iterator
, bool> Pair
=
296 RegUsesMap
.insert(std::make_pair(Reg
, RegSortData()));
297 RegSortData
&RSD
= Pair
.first
->second
;
299 RegSequence
.push_back(Reg
);
300 RSD
.UsedByIndices
.resize(std::max(RSD
.UsedByIndices
.size(), LUIdx
+ 1));
301 RSD
.UsedByIndices
.set(LUIdx
);
305 RegUseTracker::dropRegister(const SCEV
*Reg
, size_t LUIdx
) {
306 RegUsesTy::iterator It
= RegUsesMap
.find(Reg
);
307 assert(It
!= RegUsesMap
.end());
308 RegSortData
&RSD
= It
->second
;
309 assert(RSD
.UsedByIndices
.size() > LUIdx
);
310 RSD
.UsedByIndices
.reset(LUIdx
);
314 RegUseTracker::swapAndDropUse(size_t LUIdx
, size_t LastLUIdx
) {
315 assert(LUIdx
<= LastLUIdx
);
317 // Update RegUses. The data structure is not optimized for this purpose;
318 // we must iterate through it and update each of the bit vectors.
319 for (auto &Pair
: RegUsesMap
) {
320 SmallBitVector
&UsedByIndices
= Pair
.second
.UsedByIndices
;
321 if (LUIdx
< UsedByIndices
.size())
322 UsedByIndices
[LUIdx
] =
323 LastLUIdx
< UsedByIndices
.size() ? UsedByIndices
[LastLUIdx
] : false;
324 UsedByIndices
.resize(std::min(UsedByIndices
.size(), LastLUIdx
));
329 RegUseTracker::isRegUsedByUsesOtherThan(const SCEV
*Reg
, size_t LUIdx
) const {
330 RegUsesTy::const_iterator I
= RegUsesMap
.find(Reg
);
331 if (I
== RegUsesMap
.end())
333 const SmallBitVector
&UsedByIndices
= I
->second
.UsedByIndices
;
334 int i
= UsedByIndices
.find_first();
335 if (i
== -1) return false;
336 if ((size_t)i
!= LUIdx
) return true;
337 return UsedByIndices
.find_next(i
) != -1;
340 const SmallBitVector
&RegUseTracker::getUsedByIndices(const SCEV
*Reg
) const {
341 RegUsesTy::const_iterator I
= RegUsesMap
.find(Reg
);
342 assert(I
!= RegUsesMap
.end() && "Unknown register!");
343 return I
->second
.UsedByIndices
;
346 void RegUseTracker::clear() {
353 /// This class holds information that describes a formula for computing
354 /// satisfying a use. It may include broken-out immediates and scaled registers.
356 /// Global base address used for complex addressing.
357 GlobalValue
*BaseGV
= nullptr;
359 /// Base offset for complex addressing.
360 int64_t BaseOffset
= 0;
362 /// Whether any complex addressing has a base register.
363 bool HasBaseReg
= false;
365 /// The scale of any complex addressing.
368 /// The list of "base" registers for this use. When this is non-empty. The
369 /// canonical representation of a formula is
370 /// 1. BaseRegs.size > 1 implies ScaledReg != NULL and
371 /// 2. ScaledReg != NULL implies Scale != 1 || !BaseRegs.empty().
372 /// 3. The reg containing recurrent expr related with currect loop in the
373 /// formula should be put in the ScaledReg.
374 /// #1 enforces that the scaled register is always used when at least two
375 /// registers are needed by the formula: e.g., reg1 + reg2 is reg1 + 1 * reg2.
376 /// #2 enforces that 1 * reg is reg.
377 /// #3 ensures invariant regs with respect to current loop can be combined
378 /// together in LSR codegen.
379 /// This invariant can be temporarily broken while building a formula.
380 /// However, every formula inserted into the LSRInstance must be in canonical
382 SmallVector
<const SCEV
*, 4> BaseRegs
;
384 /// The 'scaled' register for this use. This should be non-null when Scale is
386 const SCEV
*ScaledReg
= nullptr;
388 /// An additional constant offset which added near the use. This requires a
389 /// temporary register, but the offset itself can live in an add immediate
390 /// field rather than a register.
391 int64_t UnfoldedOffset
= 0;
395 void initialMatch(const SCEV
*S
, Loop
*L
, ScalarEvolution
&SE
);
397 bool isCanonical(const Loop
&L
) const;
399 void canonicalize(const Loop
&L
);
403 bool hasZeroEnd() const;
405 size_t getNumRegs() const;
406 Type
*getType() const;
408 void deleteBaseReg(const SCEV
*&S
);
410 bool referencesReg(const SCEV
*S
) const;
411 bool hasRegsUsedByUsesOtherThan(size_t LUIdx
,
412 const RegUseTracker
&RegUses
) const;
414 void print(raw_ostream
&OS
) const;
418 } // end anonymous namespace
420 /// Recursion helper for initialMatch.
421 static void DoInitialMatch(const SCEV
*S
, Loop
*L
,
422 SmallVectorImpl
<const SCEV
*> &Good
,
423 SmallVectorImpl
<const SCEV
*> &Bad
,
424 ScalarEvolution
&SE
) {
425 // Collect expressions which properly dominate the loop header.
426 if (SE
.properlyDominates(S
, L
->getHeader())) {
431 // Look at add operands.
432 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
433 for (const SCEV
*S
: Add
->operands())
434 DoInitialMatch(S
, L
, Good
, Bad
, SE
);
438 // Look at addrec operands.
439 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
))
440 if (!AR
->getStart()->isZero() && AR
->isAffine()) {
441 DoInitialMatch(AR
->getStart(), L
, Good
, Bad
, SE
);
442 DoInitialMatch(SE
.getAddRecExpr(SE
.getConstant(AR
->getType(), 0),
443 AR
->getStepRecurrence(SE
),
444 // FIXME: AR->getNoWrapFlags()
445 AR
->getLoop(), SCEV::FlagAnyWrap
),
450 // Handle a multiplication by -1 (negation) if it didn't fold.
451 if (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(S
))
452 if (Mul
->getOperand(0)->isAllOnesValue()) {
453 SmallVector
<const SCEV
*, 4> Ops(drop_begin(Mul
->operands()));
454 const SCEV
*NewMul
= SE
.getMulExpr(Ops
);
456 SmallVector
<const SCEV
*, 4> MyGood
;
457 SmallVector
<const SCEV
*, 4> MyBad
;
458 DoInitialMatch(NewMul
, L
, MyGood
, MyBad
, SE
);
459 const SCEV
*NegOne
= SE
.getSCEV(ConstantInt::getAllOnesValue(
460 SE
.getEffectiveSCEVType(NewMul
->getType())));
461 for (const SCEV
*S
: MyGood
)
462 Good
.push_back(SE
.getMulExpr(NegOne
, S
));
463 for (const SCEV
*S
: MyBad
)
464 Bad
.push_back(SE
.getMulExpr(NegOne
, S
));
468 // Ok, we can't do anything interesting. Just stuff the whole thing into a
469 // register and hope for the best.
473 /// Incorporate loop-variant parts of S into this Formula, attempting to keep
474 /// all loop-invariant and loop-computable values in a single base register.
475 void Formula::initialMatch(const SCEV
*S
, Loop
*L
, ScalarEvolution
&SE
) {
476 SmallVector
<const SCEV
*, 4> Good
;
477 SmallVector
<const SCEV
*, 4> Bad
;
478 DoInitialMatch(S
, L
, Good
, Bad
, SE
);
480 const SCEV
*Sum
= SE
.getAddExpr(Good
);
482 BaseRegs
.push_back(Sum
);
486 const SCEV
*Sum
= SE
.getAddExpr(Bad
);
488 BaseRegs
.push_back(Sum
);
494 static bool containsAddRecDependentOnLoop(const SCEV
*S
, const Loop
&L
) {
495 return SCEVExprContains(S
, [&L
](const SCEV
*S
) {
496 return isa
<SCEVAddRecExpr
>(S
) && (cast
<SCEVAddRecExpr
>(S
)->getLoop() == &L
);
500 /// Check whether or not this formula satisfies the canonical
502 /// \see Formula::BaseRegs.
503 bool Formula::isCanonical(const Loop
&L
) const {
505 return BaseRegs
.size() <= 1;
510 if (Scale
== 1 && BaseRegs
.empty())
513 if (containsAddRecDependentOnLoop(ScaledReg
, L
))
516 // If ScaledReg is not a recurrent expr, or it is but its loop is not current
517 // loop, meanwhile BaseRegs contains a recurrent expr reg related with current
518 // loop, we want to swap the reg in BaseRegs with ScaledReg.
519 return none_of(BaseRegs
, [&L
](const SCEV
*S
) {
520 return containsAddRecDependentOnLoop(S
, L
);
524 /// Helper method to morph a formula into its canonical representation.
525 /// \see Formula::BaseRegs.
526 /// Every formula having more than one base register, must use the ScaledReg
527 /// field. Otherwise, we would have to do special cases everywhere in LSR
528 /// to treat reg1 + reg2 + ... the same way as reg1 + 1*reg2 + ...
529 /// On the other hand, 1*reg should be canonicalized into reg.
530 void Formula::canonicalize(const Loop
&L
) {
534 if (BaseRegs
.empty()) {
535 // No base reg? Use scale reg with scale = 1 as such.
536 assert(ScaledReg
&& "Expected 1*reg => reg");
537 assert(Scale
== 1 && "Expected 1*reg => reg");
538 BaseRegs
.push_back(ScaledReg
);
544 // Keep the invariant sum in BaseRegs and one of the variant sum in ScaledReg.
546 ScaledReg
= BaseRegs
.pop_back_val();
550 // If ScaledReg is an invariant with respect to L, find the reg from
551 // BaseRegs containing the recurrent expr related with Loop L. Swap the
552 // reg with ScaledReg.
553 if (!containsAddRecDependentOnLoop(ScaledReg
, L
)) {
554 auto I
= find_if(BaseRegs
, [&L
](const SCEV
*S
) {
555 return containsAddRecDependentOnLoop(S
, L
);
557 if (I
!= BaseRegs
.end())
558 std::swap(ScaledReg
, *I
);
560 assert(isCanonical(L
) && "Failed to canonicalize?");
563 /// Get rid of the scale in the formula.
564 /// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2.
565 /// \return true if it was possible to get rid of the scale, false otherwise.
566 /// \note After this operation the formula may not be in the canonical form.
567 bool Formula::unscale() {
571 BaseRegs
.push_back(ScaledReg
);
576 bool Formula::hasZeroEnd() const {
577 if (UnfoldedOffset
|| BaseOffset
)
579 if (BaseRegs
.size() != 1 || ScaledReg
)
584 /// Return the total number of register operands used by this formula. This does
585 /// not include register uses implied by non-constant addrec strides.
586 size_t Formula::getNumRegs() const {
587 return !!ScaledReg
+ BaseRegs
.size();
590 /// Return the type of this formula, if it has one, or null otherwise. This type
591 /// is meaningless except for the bit size.
592 Type
*Formula::getType() const {
593 return !BaseRegs
.empty() ? BaseRegs
.front()->getType() :
594 ScaledReg
? ScaledReg
->getType() :
595 BaseGV
? BaseGV
->getType() :
599 /// Delete the given base reg from the BaseRegs list.
600 void Formula::deleteBaseReg(const SCEV
*&S
) {
601 if (&S
!= &BaseRegs
.back())
602 std::swap(S
, BaseRegs
.back());
606 /// Test if this formula references the given register.
607 bool Formula::referencesReg(const SCEV
*S
) const {
608 return S
== ScaledReg
|| is_contained(BaseRegs
, S
);
611 /// Test whether this formula uses registers which are used by uses other than
612 /// the use with the given index.
613 bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx
,
614 const RegUseTracker
&RegUses
) const {
616 if (RegUses
.isRegUsedByUsesOtherThan(ScaledReg
, LUIdx
))
618 for (const SCEV
*BaseReg
: BaseRegs
)
619 if (RegUses
.isRegUsedByUsesOtherThan(BaseReg
, LUIdx
))
624 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
625 void Formula::print(raw_ostream
&OS
) const {
628 if (!First
) OS
<< " + "; else First
= false;
629 BaseGV
->printAsOperand(OS
, /*PrintType=*/false);
631 if (BaseOffset
!= 0) {
632 if (!First
) OS
<< " + "; else First
= false;
635 for (const SCEV
*BaseReg
: BaseRegs
) {
636 if (!First
) OS
<< " + "; else First
= false;
637 OS
<< "reg(" << *BaseReg
<< ')';
639 if (HasBaseReg
&& BaseRegs
.empty()) {
640 if (!First
) OS
<< " + "; else First
= false;
641 OS
<< "**error: HasBaseReg**";
642 } else if (!HasBaseReg
&& !BaseRegs
.empty()) {
643 if (!First
) OS
<< " + "; else First
= false;
644 OS
<< "**error: !HasBaseReg**";
647 if (!First
) OS
<< " + "; else First
= false;
648 OS
<< Scale
<< "*reg(";
655 if (UnfoldedOffset
!= 0) {
656 if (!First
) OS
<< " + ";
657 OS
<< "imm(" << UnfoldedOffset
<< ')';
661 LLVM_DUMP_METHOD
void Formula::dump() const {
662 print(errs()); errs() << '\n';
666 /// Return true if the given addrec can be sign-extended without changing its
668 static bool isAddRecSExtable(const SCEVAddRecExpr
*AR
, ScalarEvolution
&SE
) {
670 IntegerType::get(SE
.getContext(), SE
.getTypeSizeInBits(AR
->getType()) + 1);
671 return isa
<SCEVAddRecExpr
>(SE
.getSignExtendExpr(AR
, WideTy
));
674 /// Return true if the given add can be sign-extended without changing its
676 static bool isAddSExtable(const SCEVAddExpr
*A
, ScalarEvolution
&SE
) {
678 IntegerType::get(SE
.getContext(), SE
.getTypeSizeInBits(A
->getType()) + 1);
679 return isa
<SCEVAddExpr
>(SE
.getSignExtendExpr(A
, WideTy
));
682 /// Return true if the given mul can be sign-extended without changing its
684 static bool isMulSExtable(const SCEVMulExpr
*M
, ScalarEvolution
&SE
) {
686 IntegerType::get(SE
.getContext(),
687 SE
.getTypeSizeInBits(M
->getType()) * M
->getNumOperands());
688 return isa
<SCEVMulExpr
>(SE
.getSignExtendExpr(M
, WideTy
));
691 /// Return an expression for LHS /s RHS, if it can be determined and if the
692 /// remainder is known to be zero, or null otherwise. If IgnoreSignificantBits
693 /// is true, expressions like (X * Y) /s Y are simplified to X, ignoring that
694 /// the multiplication may overflow, which is useful when the result will be
695 /// used in a context where the most significant bits are ignored.
696 static const SCEV
*getExactSDiv(const SCEV
*LHS
, const SCEV
*RHS
,
698 bool IgnoreSignificantBits
= false) {
699 // Handle the trivial case, which works for any SCEV type.
701 return SE
.getConstant(LHS
->getType(), 1);
703 // Handle a few RHS special cases.
704 const SCEVConstant
*RC
= dyn_cast
<SCEVConstant
>(RHS
);
706 const APInt
&RA
= RC
->getAPInt();
707 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do
709 if (RA
.isAllOnes()) {
710 if (LHS
->getType()->isPointerTy())
712 return SE
.getMulExpr(LHS
, RC
);
714 // Handle x /s 1 as x.
719 // Check for a division of a constant by a constant.
720 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(LHS
)) {
723 const APInt
&LA
= C
->getAPInt();
724 const APInt
&RA
= RC
->getAPInt();
725 if (LA
.srem(RA
) != 0)
727 return SE
.getConstant(LA
.sdiv(RA
));
730 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow.
731 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(LHS
)) {
732 if ((IgnoreSignificantBits
|| isAddRecSExtable(AR
, SE
)) && AR
->isAffine()) {
733 const SCEV
*Step
= getExactSDiv(AR
->getStepRecurrence(SE
), RHS
, SE
,
734 IgnoreSignificantBits
);
735 if (!Step
) return nullptr;
736 const SCEV
*Start
= getExactSDiv(AR
->getStart(), RHS
, SE
,
737 IgnoreSignificantBits
);
738 if (!Start
) return nullptr;
739 // FlagNW is independent of the start value, step direction, and is
740 // preserved with smaller magnitude steps.
741 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
742 return SE
.getAddRecExpr(Start
, Step
, AR
->getLoop(), SCEV::FlagAnyWrap
);
747 // Distribute the sdiv over add operands, if the add doesn't overflow.
748 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(LHS
)) {
749 if (IgnoreSignificantBits
|| isAddSExtable(Add
, SE
)) {
750 SmallVector
<const SCEV
*, 8> Ops
;
751 for (const SCEV
*S
: Add
->operands()) {
752 const SCEV
*Op
= getExactSDiv(S
, RHS
, SE
, IgnoreSignificantBits
);
753 if (!Op
) return nullptr;
756 return SE
.getAddExpr(Ops
);
761 // Check for a multiply operand that we can pull RHS out of.
762 if (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(LHS
)) {
763 if (IgnoreSignificantBits
|| isMulSExtable(Mul
, SE
)) {
764 // Handle special case C1*X*Y /s C2*X*Y.
765 if (const SCEVMulExpr
*MulRHS
= dyn_cast
<SCEVMulExpr
>(RHS
)) {
766 if (IgnoreSignificantBits
|| isMulSExtable(MulRHS
, SE
)) {
767 const SCEVConstant
*LC
= dyn_cast
<SCEVConstant
>(Mul
->getOperand(0));
768 const SCEVConstant
*RC
=
769 dyn_cast
<SCEVConstant
>(MulRHS
->getOperand(0));
771 SmallVector
<const SCEV
*, 4> LOps(drop_begin(Mul
->operands()));
772 SmallVector
<const SCEV
*, 4> ROps(drop_begin(MulRHS
->operands()));
774 return getExactSDiv(LC
, RC
, SE
, IgnoreSignificantBits
);
779 SmallVector
<const SCEV
*, 4> Ops
;
781 for (const SCEV
*S
: Mul
->operands()) {
783 if (const SCEV
*Q
= getExactSDiv(S
, RHS
, SE
,
784 IgnoreSignificantBits
)) {
790 return Found
? SE
.getMulExpr(Ops
) : nullptr;
795 // Otherwise we don't know.
799 /// If S involves the addition of a constant integer value, return that integer
800 /// value, and mutate S to point to a new SCEV with that value excluded.
801 static int64_t ExtractImmediate(const SCEV
*&S
, ScalarEvolution
&SE
) {
802 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
)) {
803 if (C
->getAPInt().getSignificantBits() <= 64) {
804 S
= SE
.getConstant(C
->getType(), 0);
805 return C
->getValue()->getSExtValue();
807 } else if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
808 SmallVector
<const SCEV
*, 8> NewOps(Add
->operands());
809 int64_t Result
= ExtractImmediate(NewOps
.front(), SE
);
811 S
= SE
.getAddExpr(NewOps
);
813 } else if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
814 SmallVector
<const SCEV
*, 8> NewOps(AR
->operands());
815 int64_t Result
= ExtractImmediate(NewOps
.front(), SE
);
817 S
= SE
.getAddRecExpr(NewOps
, AR
->getLoop(),
818 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
825 /// If S involves the addition of a GlobalValue address, return that symbol, and
826 /// mutate S to point to a new SCEV with that value excluded.
827 static GlobalValue
*ExtractSymbol(const SCEV
*&S
, ScalarEvolution
&SE
) {
828 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
829 if (GlobalValue
*GV
= dyn_cast
<GlobalValue
>(U
->getValue())) {
830 S
= SE
.getConstant(GV
->getType(), 0);
833 } else if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
834 SmallVector
<const SCEV
*, 8> NewOps(Add
->operands());
835 GlobalValue
*Result
= ExtractSymbol(NewOps
.back(), SE
);
837 S
= SE
.getAddExpr(NewOps
);
839 } else if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
840 SmallVector
<const SCEV
*, 8> NewOps(AR
->operands());
841 GlobalValue
*Result
= ExtractSymbol(NewOps
.front(), SE
);
843 S
= SE
.getAddRecExpr(NewOps
, AR
->getLoop(),
844 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
851 /// Returns true if the specified instruction is using the specified value as an
853 static bool isAddressUse(const TargetTransformInfo
&TTI
,
854 Instruction
*Inst
, Value
*OperandVal
) {
855 bool isAddress
= isa
<LoadInst
>(Inst
);
856 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
857 if (SI
->getPointerOperand() == OperandVal
)
859 } else if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
)) {
860 // Addressing modes can also be folded into prefetches and a variety
862 switch (II
->getIntrinsicID()) {
863 case Intrinsic::memset
:
864 case Intrinsic::prefetch
:
865 case Intrinsic::masked_load
:
866 if (II
->getArgOperand(0) == OperandVal
)
869 case Intrinsic::masked_store
:
870 if (II
->getArgOperand(1) == OperandVal
)
873 case Intrinsic::memmove
:
874 case Intrinsic::memcpy
:
875 if (II
->getArgOperand(0) == OperandVal
||
876 II
->getArgOperand(1) == OperandVal
)
880 MemIntrinsicInfo IntrInfo
;
881 if (TTI
.getTgtMemIntrinsic(II
, IntrInfo
)) {
882 if (IntrInfo
.PtrVal
== OperandVal
)
887 } else if (AtomicRMWInst
*RMW
= dyn_cast
<AtomicRMWInst
>(Inst
)) {
888 if (RMW
->getPointerOperand() == OperandVal
)
890 } else if (AtomicCmpXchgInst
*CmpX
= dyn_cast
<AtomicCmpXchgInst
>(Inst
)) {
891 if (CmpX
->getPointerOperand() == OperandVal
)
897 /// Return the type of the memory being accessed.
898 static MemAccessTy
getAccessType(const TargetTransformInfo
&TTI
,
899 Instruction
*Inst
, Value
*OperandVal
) {
900 MemAccessTy AccessTy
= MemAccessTy::getUnknown(Inst
->getContext());
902 // First get the type of memory being accessed.
903 if (Type
*Ty
= Inst
->getAccessType())
906 // Then get the pointer address space.
907 if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
908 AccessTy
.AddrSpace
= SI
->getPointerAddressSpace();
909 } else if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
)) {
910 AccessTy
.AddrSpace
= LI
->getPointerAddressSpace();
911 } else if (const AtomicRMWInst
*RMW
= dyn_cast
<AtomicRMWInst
>(Inst
)) {
912 AccessTy
.AddrSpace
= RMW
->getPointerAddressSpace();
913 } else if (const AtomicCmpXchgInst
*CmpX
= dyn_cast
<AtomicCmpXchgInst
>(Inst
)) {
914 AccessTy
.AddrSpace
= CmpX
->getPointerAddressSpace();
915 } else if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
)) {
916 switch (II
->getIntrinsicID()) {
917 case Intrinsic::prefetch
:
918 case Intrinsic::memset
:
919 AccessTy
.AddrSpace
= II
->getArgOperand(0)->getType()->getPointerAddressSpace();
920 AccessTy
.MemTy
= OperandVal
->getType();
922 case Intrinsic::memmove
:
923 case Intrinsic::memcpy
:
924 AccessTy
.AddrSpace
= OperandVal
->getType()->getPointerAddressSpace();
925 AccessTy
.MemTy
= OperandVal
->getType();
927 case Intrinsic::masked_load
:
929 II
->getArgOperand(0)->getType()->getPointerAddressSpace();
931 case Intrinsic::masked_store
:
933 II
->getArgOperand(1)->getType()->getPointerAddressSpace();
936 MemIntrinsicInfo IntrInfo
;
937 if (TTI
.getTgtMemIntrinsic(II
, IntrInfo
) && IntrInfo
.PtrVal
) {
939 = IntrInfo
.PtrVal
->getType()->getPointerAddressSpace();
950 /// Return true if this AddRec is already a phi in its loop.
951 static bool isExistingPhi(const SCEVAddRecExpr
*AR
, ScalarEvolution
&SE
) {
952 for (PHINode
&PN
: AR
->getLoop()->getHeader()->phis()) {
953 if (SE
.isSCEVable(PN
.getType()) &&
954 (SE
.getEffectiveSCEVType(PN
.getType()) ==
955 SE
.getEffectiveSCEVType(AR
->getType())) &&
956 SE
.getSCEV(&PN
) == AR
)
962 /// Check if expanding this expression is likely to incur significant cost. This
963 /// is tricky because SCEV doesn't track which expressions are actually computed
964 /// by the current IR.
966 /// We currently allow expansion of IV increments that involve adds,
967 /// multiplication by constants, and AddRecs from existing phis.
969 /// TODO: Allow UDivExpr if we can find an existing IV increment that is an
970 /// obvious multiple of the UDivExpr.
971 static bool isHighCostExpansion(const SCEV
*S
,
972 SmallPtrSetImpl
<const SCEV
*> &Processed
,
973 ScalarEvolution
&SE
) {
974 // Zero/One operand expressions
975 switch (S
->getSCEVType()) {
981 return isHighCostExpansion(cast
<SCEVTruncateExpr
>(S
)->getOperand(),
984 return isHighCostExpansion(cast
<SCEVZeroExtendExpr
>(S
)->getOperand(),
987 return isHighCostExpansion(cast
<SCEVSignExtendExpr
>(S
)->getOperand(),
993 if (!Processed
.insert(S
).second
)
996 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
997 for (const SCEV
*S
: Add
->operands()) {
998 if (isHighCostExpansion(S
, Processed
, SE
))
1004 if (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(S
)) {
1005 if (Mul
->getNumOperands() == 2) {
1006 // Multiplication by a constant is ok
1007 if (isa
<SCEVConstant
>(Mul
->getOperand(0)))
1008 return isHighCostExpansion(Mul
->getOperand(1), Processed
, SE
);
1010 // If we have the value of one operand, check if an existing
1011 // multiplication already generates this expression.
1012 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(Mul
->getOperand(1))) {
1013 Value
*UVal
= U
->getValue();
1014 for (User
*UR
: UVal
->users()) {
1015 // If U is a constant, it may be used by a ConstantExpr.
1016 Instruction
*UI
= dyn_cast
<Instruction
>(UR
);
1017 if (UI
&& UI
->getOpcode() == Instruction::Mul
&&
1018 SE
.isSCEVable(UI
->getType())) {
1019 return SE
.getSCEV(UI
) == Mul
;
1026 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
1027 if (isExistingPhi(AR
, SE
))
1031 // Fow now, consider any other type of expression (div/mul/min/max) high cost.
1039 } // end anonymous namespace
1041 /// Check if the addressing mode defined by \p F is completely
1042 /// folded in \p LU at isel time.
1043 /// This includes address-mode folding and special icmp tricks.
1044 /// This function returns true if \p LU can accommodate what \p F
1045 /// defines and up to 1 base + 1 scaled + offset.
1046 /// In other words, if \p F has several base registers, this function may
1047 /// still return true. Therefore, users still need to account for
1048 /// additional base registers and/or unfolded offsets to derive an
1049 /// accurate cost model.
1050 static bool isAMCompletelyFolded(const TargetTransformInfo
&TTI
,
1051 const LSRUse
&LU
, const Formula
&F
);
1053 // Get the cost of the scaling factor used in F for LU.
1054 static InstructionCost
getScalingFactorCost(const TargetTransformInfo
&TTI
,
1055 const LSRUse
&LU
, const Formula
&F
,
1060 /// This class is used to measure and compare candidate formulae.
1062 const Loop
*L
= nullptr;
1063 ScalarEvolution
*SE
= nullptr;
1064 const TargetTransformInfo
*TTI
= nullptr;
1065 TargetTransformInfo::LSRCost C
;
1066 TTI::AddressingModeKind AMK
= TTI::AMK_None
;
1070 Cost(const Loop
*L
, ScalarEvolution
&SE
, const TargetTransformInfo
&TTI
,
1071 TTI::AddressingModeKind AMK
) :
1072 L(L
), SE(&SE
), TTI(&TTI
), AMK(AMK
) {
1083 bool isLess(const Cost
&Other
) const;
1088 // Once any of the metrics loses, they must all remain losers.
1090 return ((C
.Insns
| C
.NumRegs
| C
.AddRecCost
| C
.NumIVMuls
| C
.NumBaseAdds
1091 | C
.ImmCost
| C
.SetupCost
| C
.ScaleCost
) != ~0u)
1092 || ((C
.Insns
& C
.NumRegs
& C
.AddRecCost
& C
.NumIVMuls
& C
.NumBaseAdds
1093 & C
.ImmCost
& C
.SetupCost
& C
.ScaleCost
) == ~0u);
1098 assert(isValid() && "invalid cost");
1099 return C
.NumRegs
== ~0u;
1102 void RateFormula(const Formula
&F
,
1103 SmallPtrSetImpl
<const SCEV
*> &Regs
,
1104 const DenseSet
<const SCEV
*> &VisitedRegs
,
1106 SmallPtrSetImpl
<const SCEV
*> *LoserRegs
= nullptr);
1108 void print(raw_ostream
&OS
) const;
1112 void RateRegister(const Formula
&F
, const SCEV
*Reg
,
1113 SmallPtrSetImpl
<const SCEV
*> &Regs
);
1114 void RatePrimaryRegister(const Formula
&F
, const SCEV
*Reg
,
1115 SmallPtrSetImpl
<const SCEV
*> &Regs
,
1116 SmallPtrSetImpl
<const SCEV
*> *LoserRegs
);
1119 /// An operand value in an instruction which is to be replaced with some
1120 /// equivalent, possibly strength-reduced, replacement.
1122 /// The instruction which will be updated.
1123 Instruction
*UserInst
= nullptr;
1125 /// The operand of the instruction which will be replaced. The operand may be
1126 /// used more than once; every instance will be replaced.
1127 Value
*OperandValToReplace
= nullptr;
1129 /// If this user is to use the post-incremented value of an induction
1130 /// variable, this set is non-empty and holds the loops associated with the
1131 /// induction variable.
1132 PostIncLoopSet PostIncLoops
;
1134 /// A constant offset to be added to the LSRUse expression. This allows
1135 /// multiple fixups to share the same LSRUse with different offsets, for
1136 /// example in an unrolled loop.
1139 LSRFixup() = default;
1141 bool isUseFullyOutsideLoop(const Loop
*L
) const;
1143 void print(raw_ostream
&OS
) const;
1147 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of sorted
1148 /// SmallVectors of const SCEV*.
1149 struct UniquifierDenseMapInfo
{
1150 static SmallVector
<const SCEV
*, 4> getEmptyKey() {
1151 SmallVector
<const SCEV
*, 4> V
;
1152 V
.push_back(reinterpret_cast<const SCEV
*>(-1));
1156 static SmallVector
<const SCEV
*, 4> getTombstoneKey() {
1157 SmallVector
<const SCEV
*, 4> V
;
1158 V
.push_back(reinterpret_cast<const SCEV
*>(-2));
1162 static unsigned getHashValue(const SmallVector
<const SCEV
*, 4> &V
) {
1163 return static_cast<unsigned>(hash_combine_range(V
.begin(), V
.end()));
1166 static bool isEqual(const SmallVector
<const SCEV
*, 4> &LHS
,
1167 const SmallVector
<const SCEV
*, 4> &RHS
) {
1172 /// This class holds the state that LSR keeps for each use in IVUsers, as well
1173 /// as uses invented by LSR itself. It includes information about what kinds of
1174 /// things can be folded into the user, information about the user itself, and
1175 /// information about how the use may be satisfied. TODO: Represent multiple
1176 /// users of the same expression in common?
1178 DenseSet
<SmallVector
<const SCEV
*, 4>, UniquifierDenseMapInfo
> Uniquifier
;
1181 /// An enum for a kind of use, indicating what types of scaled and immediate
1182 /// operands it might support.
1184 Basic
, ///< A normal use, with no folding.
1185 Special
, ///< A special case of basic, allowing -1 scales.
1186 Address
, ///< An address use; folding according to TargetLowering
1187 ICmpZero
///< An equality icmp with both operands folded into one.
1188 // TODO: Add a generic icmp too?
1191 using SCEVUseKindPair
= PointerIntPair
<const SCEV
*, 2, KindType
>;
1194 MemAccessTy AccessTy
;
1196 /// The list of operands which are to be replaced.
1197 SmallVector
<LSRFixup
, 8> Fixups
;
1199 /// Keep track of the min and max offsets of the fixups.
1200 int64_t MinOffset
= std::numeric_limits
<int64_t>::max();
1201 int64_t MaxOffset
= std::numeric_limits
<int64_t>::min();
1203 /// This records whether all of the fixups using this LSRUse are outside of
1204 /// the loop, in which case some special-case heuristics may be used.
1205 bool AllFixupsOutsideLoop
= true;
1207 /// RigidFormula is set to true to guarantee that this use will be associated
1208 /// with a single formula--the one that initially matched. Some SCEV
1209 /// expressions cannot be expanded. This allows LSR to consider the registers
1210 /// used by those expressions without the need to expand them later after
1211 /// changing the formula.
1212 bool RigidFormula
= false;
1214 /// This records the widest use type for any fixup using this
1215 /// LSRUse. FindUseWithSimilarFormula can't consider uses with different max
1216 /// fixup widths to be equivalent, because the narrower one may be relying on
1217 /// the implicit truncation to truncate away bogus bits.
1218 Type
*WidestFixupType
= nullptr;
1220 /// A list of ways to build a value that can satisfy this user. After the
1221 /// list is populated, one of these is selected heuristically and used to
1222 /// formulate a replacement for OperandValToReplace in UserInst.
1223 SmallVector
<Formula
, 12> Formulae
;
1225 /// The set of register candidates used by all formulae in this LSRUse.
1226 SmallPtrSet
<const SCEV
*, 4> Regs
;
1228 LSRUse(KindType K
, MemAccessTy AT
) : Kind(K
), AccessTy(AT
) {}
1230 LSRFixup
&getNewFixup() {
1231 Fixups
.push_back(LSRFixup());
1232 return Fixups
.back();
1235 void pushFixup(LSRFixup
&f
) {
1236 Fixups
.push_back(f
);
1237 if (f
.Offset
> MaxOffset
)
1238 MaxOffset
= f
.Offset
;
1239 if (f
.Offset
< MinOffset
)
1240 MinOffset
= f
.Offset
;
1243 bool HasFormulaWithSameRegs(const Formula
&F
) const;
1244 float getNotSelectedProbability(const SCEV
*Reg
) const;
1245 bool InsertFormula(const Formula
&F
, const Loop
&L
);
1246 void DeleteFormula(Formula
&F
);
1247 void RecomputeRegs(size_t LUIdx
, RegUseTracker
&Reguses
);
1249 void print(raw_ostream
&OS
) const;
1253 } // end anonymous namespace
1255 static bool isAMCompletelyFolded(const TargetTransformInfo
&TTI
,
1256 LSRUse::KindType Kind
, MemAccessTy AccessTy
,
1257 GlobalValue
*BaseGV
, int64_t BaseOffset
,
1258 bool HasBaseReg
, int64_t Scale
,
1259 Instruction
*Fixup
= nullptr);
1261 static unsigned getSetupCost(const SCEV
*Reg
, unsigned Depth
) {
1262 if (isa
<SCEVUnknown
>(Reg
) || isa
<SCEVConstant
>(Reg
))
1266 if (const auto *S
= dyn_cast
<SCEVAddRecExpr
>(Reg
))
1267 return getSetupCost(S
->getStart(), Depth
- 1);
1268 if (auto S
= dyn_cast
<SCEVIntegralCastExpr
>(Reg
))
1269 return getSetupCost(S
->getOperand(), Depth
- 1);
1270 if (auto S
= dyn_cast
<SCEVNAryExpr
>(Reg
))
1271 return std::accumulate(S
->operands().begin(), S
->operands().end(), 0,
1272 [&](unsigned i
, const SCEV
*Reg
) {
1273 return i
+ getSetupCost(Reg
, Depth
- 1);
1275 if (auto S
= dyn_cast
<SCEVUDivExpr
>(Reg
))
1276 return getSetupCost(S
->getLHS(), Depth
- 1) +
1277 getSetupCost(S
->getRHS(), Depth
- 1);
1281 /// Tally up interesting quantities from the given register.
1282 void Cost::RateRegister(const Formula
&F
, const SCEV
*Reg
,
1283 SmallPtrSetImpl
<const SCEV
*> &Regs
) {
1284 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Reg
)) {
1285 // If this is an addrec for another loop, it should be an invariant
1286 // with respect to L since L is the innermost loop (at least
1287 // for now LSR only handles innermost loops).
1288 if (AR
->getLoop() != L
) {
1289 // If the AddRec exists, consider it's register free and leave it alone.
1290 if (isExistingPhi(AR
, *SE
) && AMK
!= TTI::AMK_PostIndexed
)
1293 // It is bad to allow LSR for current loop to add induction variables
1294 // for its sibling loops.
1295 if (!AR
->getLoop()->contains(L
)) {
1300 // Otherwise, it will be an invariant with respect to Loop L.
1305 unsigned LoopCost
= 1;
1306 if (TTI
->isIndexedLoadLegal(TTI
->MIM_PostInc
, AR
->getType()) ||
1307 TTI
->isIndexedStoreLegal(TTI
->MIM_PostInc
, AR
->getType())) {
1309 // If the step size matches the base offset, we could use pre-indexed
1311 if (AMK
== TTI::AMK_PreIndexed
) {
1312 if (auto *Step
= dyn_cast
<SCEVConstant
>(AR
->getStepRecurrence(*SE
)))
1313 if (Step
->getAPInt() == F
.BaseOffset
)
1315 } else if (AMK
== TTI::AMK_PostIndexed
) {
1316 const SCEV
*LoopStep
= AR
->getStepRecurrence(*SE
);
1317 if (isa
<SCEVConstant
>(LoopStep
)) {
1318 const SCEV
*LoopStart
= AR
->getStart();
1319 if (!isa
<SCEVConstant
>(LoopStart
) &&
1320 SE
->isLoopInvariant(LoopStart
, L
))
1325 C
.AddRecCost
+= LoopCost
;
1327 // Add the step value register, if it needs one.
1328 // TODO: The non-affine case isn't precisely modeled here.
1329 if (!AR
->isAffine() || !isa
<SCEVConstant
>(AR
->getOperand(1))) {
1330 if (!Regs
.count(AR
->getOperand(1))) {
1331 RateRegister(F
, AR
->getOperand(1), Regs
);
1339 // Rough heuristic; favor registers which don't require extra setup
1340 // instructions in the preheader.
1341 C
.SetupCost
+= getSetupCost(Reg
, SetupCostDepthLimit
);
1342 // Ensure we don't, even with the recusion limit, produce invalid costs.
1343 C
.SetupCost
= std::min
<unsigned>(C
.SetupCost
, 1 << 16);
1345 C
.NumIVMuls
+= isa
<SCEVMulExpr
>(Reg
) &&
1346 SE
->hasComputableLoopEvolution(Reg
, L
);
1349 /// Record this register in the set. If we haven't seen it before, rate
1350 /// it. Optional LoserRegs provides a way to declare any formula that refers to
1351 /// one of those regs an instant loser.
1352 void Cost::RatePrimaryRegister(const Formula
&F
, const SCEV
*Reg
,
1353 SmallPtrSetImpl
<const SCEV
*> &Regs
,
1354 SmallPtrSetImpl
<const SCEV
*> *LoserRegs
) {
1355 if (LoserRegs
&& LoserRegs
->count(Reg
)) {
1359 if (Regs
.insert(Reg
).second
) {
1360 RateRegister(F
, Reg
, Regs
);
1361 if (LoserRegs
&& isLoser())
1362 LoserRegs
->insert(Reg
);
1366 void Cost::RateFormula(const Formula
&F
,
1367 SmallPtrSetImpl
<const SCEV
*> &Regs
,
1368 const DenseSet
<const SCEV
*> &VisitedRegs
,
1370 SmallPtrSetImpl
<const SCEV
*> *LoserRegs
) {
1373 assert(F
.isCanonical(*L
) && "Cost is accurate only for canonical formula");
1374 // Tally up the registers.
1375 unsigned PrevAddRecCost
= C
.AddRecCost
;
1376 unsigned PrevNumRegs
= C
.NumRegs
;
1377 unsigned PrevNumBaseAdds
= C
.NumBaseAdds
;
1378 if (const SCEV
*ScaledReg
= F
.ScaledReg
) {
1379 if (VisitedRegs
.count(ScaledReg
)) {
1383 RatePrimaryRegister(F
, ScaledReg
, Regs
, LoserRegs
);
1387 for (const SCEV
*BaseReg
: F
.BaseRegs
) {
1388 if (VisitedRegs
.count(BaseReg
)) {
1392 RatePrimaryRegister(F
, BaseReg
, Regs
, LoserRegs
);
1397 // Determine how many (unfolded) adds we'll need inside the loop.
1398 size_t NumBaseParts
= F
.getNumRegs();
1399 if (NumBaseParts
> 1)
1400 // Do not count the base and a possible second register if the target
1401 // allows to fold 2 registers.
1403 NumBaseParts
- (1 + (F
.Scale
&& isAMCompletelyFolded(*TTI
, LU
, F
)));
1404 C
.NumBaseAdds
+= (F
.UnfoldedOffset
!= 0);
1406 // Accumulate non-free scaling amounts.
1407 C
.ScaleCost
+= *getScalingFactorCost(*TTI
, LU
, F
, *L
).getValue();
1409 // Tally up the non-zero immediates.
1410 for (const LSRFixup
&Fixup
: LU
.Fixups
) {
1411 int64_t O
= Fixup
.Offset
;
1412 int64_t Offset
= (uint64_t)O
+ F
.BaseOffset
;
1414 C
.ImmCost
+= 64; // Handle symbolic values conservatively.
1415 // TODO: This should probably be the pointer size.
1416 else if (Offset
!= 0)
1417 C
.ImmCost
+= APInt(64, Offset
, true).getSignificantBits();
1419 // Check with target if this offset with this instruction is
1420 // specifically not supported.
1421 if (LU
.Kind
== LSRUse::Address
&& Offset
!= 0 &&
1422 !isAMCompletelyFolded(*TTI
, LSRUse::Address
, LU
.AccessTy
, F
.BaseGV
,
1423 Offset
, F
.HasBaseReg
, F
.Scale
, Fixup
.UserInst
))
1427 // If we don't count instruction cost exit here.
1429 assert(isValid() && "invalid cost");
1433 // Treat every new register that exceeds TTI.getNumberOfRegisters() - 1 as
1434 // additional instruction (at least fill).
1435 // TODO: Need distinguish register class?
1436 unsigned TTIRegNum
= TTI
->getNumberOfRegisters(
1437 TTI
->getRegisterClassForType(false, F
.getType())) - 1;
1438 if (C
.NumRegs
> TTIRegNum
) {
1439 // Cost already exceeded TTIRegNum, then only newly added register can add
1440 // new instructions.
1441 if (PrevNumRegs
> TTIRegNum
)
1442 C
.Insns
+= (C
.NumRegs
- PrevNumRegs
);
1444 C
.Insns
+= (C
.NumRegs
- TTIRegNum
);
1447 // If ICmpZero formula ends with not 0, it could not be replaced by
1448 // just add or sub. We'll need to compare final result of AddRec.
1449 // That means we'll need an additional instruction. But if the target can
1450 // macro-fuse a compare with a branch, don't count this extra instruction.
1451 // For -10 + {0, +, 1}:
1457 if (LU
.Kind
== LSRUse::ICmpZero
&& !F
.hasZeroEnd() &&
1458 !TTI
->canMacroFuseCmp())
1460 // Each new AddRec adds 1 instruction to calculation.
1461 C
.Insns
+= (C
.AddRecCost
- PrevAddRecCost
);
1463 // BaseAdds adds instructions for unfolded registers.
1464 if (LU
.Kind
!= LSRUse::ICmpZero
)
1465 C
.Insns
+= C
.NumBaseAdds
- PrevNumBaseAdds
;
1466 assert(isValid() && "invalid cost");
1469 /// Set this cost to a losing value.
1471 C
.Insns
= std::numeric_limits
<unsigned>::max();
1472 C
.NumRegs
= std::numeric_limits
<unsigned>::max();
1473 C
.AddRecCost
= std::numeric_limits
<unsigned>::max();
1474 C
.NumIVMuls
= std::numeric_limits
<unsigned>::max();
1475 C
.NumBaseAdds
= std::numeric_limits
<unsigned>::max();
1476 C
.ImmCost
= std::numeric_limits
<unsigned>::max();
1477 C
.SetupCost
= std::numeric_limits
<unsigned>::max();
1478 C
.ScaleCost
= std::numeric_limits
<unsigned>::max();
1481 /// Choose the lower cost.
1482 bool Cost::isLess(const Cost
&Other
) const {
1483 if (InsnsCost
.getNumOccurrences() > 0 && InsnsCost
&&
1484 C
.Insns
!= Other
.C
.Insns
)
1485 return C
.Insns
< Other
.C
.Insns
;
1486 return TTI
->isLSRCostLess(C
, Other
.C
);
1489 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1490 void Cost::print(raw_ostream
&OS
) const {
1492 OS
<< C
.Insns
<< " instruction" << (C
.Insns
== 1 ? " " : "s ");
1493 OS
<< C
.NumRegs
<< " reg" << (C
.NumRegs
== 1 ? "" : "s");
1494 if (C
.AddRecCost
!= 0)
1495 OS
<< ", with addrec cost " << C
.AddRecCost
;
1496 if (C
.NumIVMuls
!= 0)
1497 OS
<< ", plus " << C
.NumIVMuls
<< " IV mul"
1498 << (C
.NumIVMuls
== 1 ? "" : "s");
1499 if (C
.NumBaseAdds
!= 0)
1500 OS
<< ", plus " << C
.NumBaseAdds
<< " base add"
1501 << (C
.NumBaseAdds
== 1 ? "" : "s");
1502 if (C
.ScaleCost
!= 0)
1503 OS
<< ", plus " << C
.ScaleCost
<< " scale cost";
1505 OS
<< ", plus " << C
.ImmCost
<< " imm cost";
1506 if (C
.SetupCost
!= 0)
1507 OS
<< ", plus " << C
.SetupCost
<< " setup cost";
1510 LLVM_DUMP_METHOD
void Cost::dump() const {
1511 print(errs()); errs() << '\n';
1515 /// Test whether this fixup always uses its value outside of the given loop.
1516 bool LSRFixup::isUseFullyOutsideLoop(const Loop
*L
) const {
1517 // PHI nodes use their value in their incoming blocks.
1518 if (const PHINode
*PN
= dyn_cast
<PHINode
>(UserInst
)) {
1519 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
1520 if (PN
->getIncomingValue(i
) == OperandValToReplace
&&
1521 L
->contains(PN
->getIncomingBlock(i
)))
1526 return !L
->contains(UserInst
);
1529 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1530 void LSRFixup::print(raw_ostream
&OS
) const {
1532 // Store is common and interesting enough to be worth special-casing.
1533 if (StoreInst
*Store
= dyn_cast
<StoreInst
>(UserInst
)) {
1535 Store
->getOperand(0)->printAsOperand(OS
, /*PrintType=*/false);
1536 } else if (UserInst
->getType()->isVoidTy())
1537 OS
<< UserInst
->getOpcodeName();
1539 UserInst
->printAsOperand(OS
, /*PrintType=*/false);
1541 OS
<< ", OperandValToReplace=";
1542 OperandValToReplace
->printAsOperand(OS
, /*PrintType=*/false);
1544 for (const Loop
*PIL
: PostIncLoops
) {
1545 OS
<< ", PostIncLoop=";
1546 PIL
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
1550 OS
<< ", Offset=" << Offset
;
1553 LLVM_DUMP_METHOD
void LSRFixup::dump() const {
1554 print(errs()); errs() << '\n';
1558 /// Test whether this use as a formula which has the same registers as the given
1560 bool LSRUse::HasFormulaWithSameRegs(const Formula
&F
) const {
1561 SmallVector
<const SCEV
*, 4> Key
= F
.BaseRegs
;
1562 if (F
.ScaledReg
) Key
.push_back(F
.ScaledReg
);
1563 // Unstable sort by host order ok, because this is only used for uniquifying.
1565 return Uniquifier
.count(Key
);
1568 /// The function returns a probability of selecting formula without Reg.
1569 float LSRUse::getNotSelectedProbability(const SCEV
*Reg
) const {
1571 for (const Formula
&F
: Formulae
)
1572 if (F
.referencesReg(Reg
))
1574 return ((float)(Formulae
.size() - FNum
)) / Formulae
.size();
1577 /// If the given formula has not yet been inserted, add it to the list, and
1578 /// return true. Return false otherwise. The formula must be in canonical form.
1579 bool LSRUse::InsertFormula(const Formula
&F
, const Loop
&L
) {
1580 assert(F
.isCanonical(L
) && "Invalid canonical representation");
1582 if (!Formulae
.empty() && RigidFormula
)
1585 SmallVector
<const SCEV
*, 4> Key
= F
.BaseRegs
;
1586 if (F
.ScaledReg
) Key
.push_back(F
.ScaledReg
);
1587 // Unstable sort by host order ok, because this is only used for uniquifying.
1590 if (!Uniquifier
.insert(Key
).second
)
1593 // Using a register to hold the value of 0 is not profitable.
1594 assert((!F
.ScaledReg
|| !F
.ScaledReg
->isZero()) &&
1595 "Zero allocated in a scaled register!");
1597 for (const SCEV
*BaseReg
: F
.BaseRegs
)
1598 assert(!BaseReg
->isZero() && "Zero allocated in a base register!");
1601 // Add the formula to the list.
1602 Formulae
.push_back(F
);
1604 // Record registers now being used by this use.
1605 Regs
.insert(F
.BaseRegs
.begin(), F
.BaseRegs
.end());
1607 Regs
.insert(F
.ScaledReg
);
1612 /// Remove the given formula from this use's list.
1613 void LSRUse::DeleteFormula(Formula
&F
) {
1614 if (&F
!= &Formulae
.back())
1615 std::swap(F
, Formulae
.back());
1616 Formulae
.pop_back();
1619 /// Recompute the Regs field, and update RegUses.
1620 void LSRUse::RecomputeRegs(size_t LUIdx
, RegUseTracker
&RegUses
) {
1621 // Now that we've filtered out some formulae, recompute the Regs set.
1622 SmallPtrSet
<const SCEV
*, 4> OldRegs
= std::move(Regs
);
1624 for (const Formula
&F
: Formulae
) {
1625 if (F
.ScaledReg
) Regs
.insert(F
.ScaledReg
);
1626 Regs
.insert(F
.BaseRegs
.begin(), F
.BaseRegs
.end());
1629 // Update the RegTracker.
1630 for (const SCEV
*S
: OldRegs
)
1632 RegUses
.dropRegister(S
, LUIdx
);
1635 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1636 void LSRUse::print(raw_ostream
&OS
) const {
1637 OS
<< "LSR Use: Kind=";
1639 case Basic
: OS
<< "Basic"; break;
1640 case Special
: OS
<< "Special"; break;
1641 case ICmpZero
: OS
<< "ICmpZero"; break;
1643 OS
<< "Address of ";
1644 if (AccessTy
.MemTy
->isPointerTy())
1645 OS
<< "pointer"; // the full pointer type could be really verbose
1647 OS
<< *AccessTy
.MemTy
;
1650 OS
<< " in addrspace(" << AccessTy
.AddrSpace
<< ')';
1653 OS
<< ", Offsets={";
1654 bool NeedComma
= false;
1655 for (const LSRFixup
&Fixup
: Fixups
) {
1656 if (NeedComma
) OS
<< ',';
1662 if (AllFixupsOutsideLoop
)
1663 OS
<< ", all-fixups-outside-loop";
1665 if (WidestFixupType
)
1666 OS
<< ", widest fixup type: " << *WidestFixupType
;
1669 LLVM_DUMP_METHOD
void LSRUse::dump() const {
1670 print(errs()); errs() << '\n';
1674 static bool isAMCompletelyFolded(const TargetTransformInfo
&TTI
,
1675 LSRUse::KindType Kind
, MemAccessTy AccessTy
,
1676 GlobalValue
*BaseGV
, int64_t BaseOffset
,
1677 bool HasBaseReg
, int64_t Scale
,
1678 Instruction
*Fixup
/*= nullptr*/) {
1680 case LSRUse::Address
:
1681 return TTI
.isLegalAddressingMode(AccessTy
.MemTy
, BaseGV
, BaseOffset
,
1682 HasBaseReg
, Scale
, AccessTy
.AddrSpace
, Fixup
);
1684 case LSRUse::ICmpZero
:
1685 // There's not even a target hook for querying whether it would be legal to
1686 // fold a GV into an ICmp.
1690 // ICmp only has two operands; don't allow more than two non-trivial parts.
1691 if (Scale
!= 0 && HasBaseReg
&& BaseOffset
!= 0)
1694 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by
1695 // putting the scaled register in the other operand of the icmp.
1696 if (Scale
!= 0 && Scale
!= -1)
1699 // If we have low-level target information, ask the target if it can fold an
1700 // integer immediate on an icmp.
1701 if (BaseOffset
!= 0) {
1703 // ICmpZero BaseReg + BaseOffset => ICmp BaseReg, -BaseOffset
1704 // ICmpZero -1*ScaleReg + BaseOffset => ICmp ScaleReg, BaseOffset
1705 // Offs is the ICmp immediate.
1707 // The cast does the right thing with
1708 // std::numeric_limits<int64_t>::min().
1709 BaseOffset
= -(uint64_t)BaseOffset
;
1710 return TTI
.isLegalICmpImmediate(BaseOffset
);
1713 // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg
1717 // Only handle single-register values.
1718 return !BaseGV
&& Scale
== 0 && BaseOffset
== 0;
1720 case LSRUse::Special
:
1721 // Special case Basic to handle -1 scales.
1722 return !BaseGV
&& (Scale
== 0 || Scale
== -1) && BaseOffset
== 0;
1725 llvm_unreachable("Invalid LSRUse Kind!");
1728 static bool isAMCompletelyFolded(const TargetTransformInfo
&TTI
,
1729 int64_t MinOffset
, int64_t MaxOffset
,
1730 LSRUse::KindType Kind
, MemAccessTy AccessTy
,
1731 GlobalValue
*BaseGV
, int64_t BaseOffset
,
1732 bool HasBaseReg
, int64_t Scale
) {
1733 // Check for overflow.
1734 if (((int64_t)((uint64_t)BaseOffset
+ MinOffset
) > BaseOffset
) !=
1737 MinOffset
= (uint64_t)BaseOffset
+ MinOffset
;
1738 if (((int64_t)((uint64_t)BaseOffset
+ MaxOffset
) > BaseOffset
) !=
1741 MaxOffset
= (uint64_t)BaseOffset
+ MaxOffset
;
1743 return isAMCompletelyFolded(TTI
, Kind
, AccessTy
, BaseGV
, MinOffset
,
1744 HasBaseReg
, Scale
) &&
1745 isAMCompletelyFolded(TTI
, Kind
, AccessTy
, BaseGV
, MaxOffset
,
1749 static bool isAMCompletelyFolded(const TargetTransformInfo
&TTI
,
1750 int64_t MinOffset
, int64_t MaxOffset
,
1751 LSRUse::KindType Kind
, MemAccessTy AccessTy
,
1752 const Formula
&F
, const Loop
&L
) {
1753 // For the purpose of isAMCompletelyFolded either having a canonical formula
1754 // or a scale not equal to zero is correct.
1755 // Problems may arise from non canonical formulae having a scale == 0.
1756 // Strictly speaking it would best to just rely on canonical formulae.
1757 // However, when we generate the scaled formulae, we first check that the
1758 // scaling factor is profitable before computing the actual ScaledReg for
1759 // compile time sake.
1760 assert((F
.isCanonical(L
) || F
.Scale
!= 0));
1761 return isAMCompletelyFolded(TTI
, MinOffset
, MaxOffset
, Kind
, AccessTy
,
1762 F
.BaseGV
, F
.BaseOffset
, F
.HasBaseReg
, F
.Scale
);
1765 /// Test whether we know how to expand the current formula.
1766 static bool isLegalUse(const TargetTransformInfo
&TTI
, int64_t MinOffset
,
1767 int64_t MaxOffset
, LSRUse::KindType Kind
,
1768 MemAccessTy AccessTy
, GlobalValue
*BaseGV
,
1769 int64_t BaseOffset
, bool HasBaseReg
, int64_t Scale
) {
1770 // We know how to expand completely foldable formulae.
1771 return isAMCompletelyFolded(TTI
, MinOffset
, MaxOffset
, Kind
, AccessTy
, BaseGV
,
1772 BaseOffset
, HasBaseReg
, Scale
) ||
1773 // Or formulae that use a base register produced by a sum of base
1776 isAMCompletelyFolded(TTI
, MinOffset
, MaxOffset
, Kind
, AccessTy
,
1777 BaseGV
, BaseOffset
, true, 0));
1780 static bool isLegalUse(const TargetTransformInfo
&TTI
, int64_t MinOffset
,
1781 int64_t MaxOffset
, LSRUse::KindType Kind
,
1782 MemAccessTy AccessTy
, const Formula
&F
) {
1783 return isLegalUse(TTI
, MinOffset
, MaxOffset
, Kind
, AccessTy
, F
.BaseGV
,
1784 F
.BaseOffset
, F
.HasBaseReg
, F
.Scale
);
1787 static bool isAMCompletelyFolded(const TargetTransformInfo
&TTI
,
1788 const LSRUse
&LU
, const Formula
&F
) {
1789 // Target may want to look at the user instructions.
1790 if (LU
.Kind
== LSRUse::Address
&& TTI
.LSRWithInstrQueries()) {
1791 for (const LSRFixup
&Fixup
: LU
.Fixups
)
1792 if (!isAMCompletelyFolded(TTI
, LSRUse::Address
, LU
.AccessTy
, F
.BaseGV
,
1793 (F
.BaseOffset
+ Fixup
.Offset
), F
.HasBaseReg
,
1794 F
.Scale
, Fixup
.UserInst
))
1799 return isAMCompletelyFolded(TTI
, LU
.MinOffset
, LU
.MaxOffset
, LU
.Kind
,
1800 LU
.AccessTy
, F
.BaseGV
, F
.BaseOffset
, F
.HasBaseReg
,
1804 static InstructionCost
getScalingFactorCost(const TargetTransformInfo
&TTI
,
1805 const LSRUse
&LU
, const Formula
&F
,
1810 // If the use is not completely folded in that instruction, we will have to
1811 // pay an extra cost only for scale != 1.
1812 if (!isAMCompletelyFolded(TTI
, LU
.MinOffset
, LU
.MaxOffset
, LU
.Kind
,
1814 return F
.Scale
!= 1;
1817 case LSRUse::Address
: {
1818 // Check the scaling factor cost with both the min and max offsets.
1819 InstructionCost ScaleCostMinOffset
= TTI
.getScalingFactorCost(
1820 LU
.AccessTy
.MemTy
, F
.BaseGV
, F
.BaseOffset
+ LU
.MinOffset
, F
.HasBaseReg
,
1821 F
.Scale
, LU
.AccessTy
.AddrSpace
);
1822 InstructionCost ScaleCostMaxOffset
= TTI
.getScalingFactorCost(
1823 LU
.AccessTy
.MemTy
, F
.BaseGV
, F
.BaseOffset
+ LU
.MaxOffset
, F
.HasBaseReg
,
1824 F
.Scale
, LU
.AccessTy
.AddrSpace
);
1826 assert(ScaleCostMinOffset
.isValid() && ScaleCostMaxOffset
.isValid() &&
1827 "Legal addressing mode has an illegal cost!");
1828 return std::max(ScaleCostMinOffset
, ScaleCostMaxOffset
);
1830 case LSRUse::ICmpZero
:
1832 case LSRUse::Special
:
1833 // The use is completely folded, i.e., everything is folded into the
1838 llvm_unreachable("Invalid LSRUse Kind!");
1841 static bool isAlwaysFoldable(const TargetTransformInfo
&TTI
,
1842 LSRUse::KindType Kind
, MemAccessTy AccessTy
,
1843 GlobalValue
*BaseGV
, int64_t BaseOffset
,
1845 // Fast-path: zero is always foldable.
1846 if (BaseOffset
== 0 && !BaseGV
) return true;
1848 // Conservatively, create an address with an immediate and a
1849 // base and a scale.
1850 int64_t Scale
= Kind
== LSRUse::ICmpZero
? -1 : 1;
1852 // Canonicalize a scale of 1 to a base register if the formula doesn't
1853 // already have a base register.
1854 if (!HasBaseReg
&& Scale
== 1) {
1859 return isAMCompletelyFolded(TTI
, Kind
, AccessTy
, BaseGV
, BaseOffset
,
1863 static bool isAlwaysFoldable(const TargetTransformInfo
&TTI
,
1864 ScalarEvolution
&SE
, int64_t MinOffset
,
1865 int64_t MaxOffset
, LSRUse::KindType Kind
,
1866 MemAccessTy AccessTy
, const SCEV
*S
,
1868 // Fast-path: zero is always foldable.
1869 if (S
->isZero()) return true;
1871 // Conservatively, create an address with an immediate and a
1872 // base and a scale.
1873 int64_t BaseOffset
= ExtractImmediate(S
, SE
);
1874 GlobalValue
*BaseGV
= ExtractSymbol(S
, SE
);
1876 // If there's anything else involved, it's not foldable.
1877 if (!S
->isZero()) return false;
1879 // Fast-path: zero is always foldable.
1880 if (BaseOffset
== 0 && !BaseGV
) return true;
1882 // Conservatively, create an address with an immediate and a
1883 // base and a scale.
1884 int64_t Scale
= Kind
== LSRUse::ICmpZero
? -1 : 1;
1886 return isAMCompletelyFolded(TTI
, MinOffset
, MaxOffset
, Kind
, AccessTy
, BaseGV
,
1887 BaseOffset
, HasBaseReg
, Scale
);
1892 /// An individual increment in a Chain of IV increments. Relate an IV user to
1893 /// an expression that computes the IV it uses from the IV used by the previous
1894 /// link in the Chain.
1896 /// For the head of a chain, IncExpr holds the absolute SCEV expression for the
1897 /// original IVOperand. The head of the chain's IVOperand is only valid during
1898 /// chain collection, before LSR replaces IV users. During chain generation,
1899 /// IncExpr can be used to find the new IVOperand that computes the same
1902 Instruction
*UserInst
;
1904 const SCEV
*IncExpr
;
1906 IVInc(Instruction
*U
, Value
*O
, const SCEV
*E
)
1907 : UserInst(U
), IVOperand(O
), IncExpr(E
) {}
1910 // The list of IV increments in program order. We typically add the head of a
1911 // chain without finding subsequent links.
1913 SmallVector
<IVInc
, 1> Incs
;
1914 const SCEV
*ExprBase
= nullptr;
1916 IVChain() = default;
1917 IVChain(const IVInc
&Head
, const SCEV
*Base
)
1918 : Incs(1, Head
), ExprBase(Base
) {}
1920 using const_iterator
= SmallVectorImpl
<IVInc
>::const_iterator
;
1922 // Return the first increment in the chain.
1923 const_iterator
begin() const {
1924 assert(!Incs
.empty());
1925 return std::next(Incs
.begin());
1927 const_iterator
end() const {
1931 // Returns true if this chain contains any increments.
1932 bool hasIncs() const { return Incs
.size() >= 2; }
1934 // Add an IVInc to the end of this chain.
1935 void add(const IVInc
&X
) { Incs
.push_back(X
); }
1937 // Returns the last UserInst in the chain.
1938 Instruction
*tailUserInst() const { return Incs
.back().UserInst
; }
1940 // Returns true if IncExpr can be profitably added to this chain.
1941 bool isProfitableIncrement(const SCEV
*OperExpr
,
1942 const SCEV
*IncExpr
,
1946 /// Helper for CollectChains to track multiple IV increment uses. Distinguish
1947 /// between FarUsers that definitely cross IV increments and NearUsers that may
1948 /// be used between IV increments.
1950 SmallPtrSet
<Instruction
*, 4> FarUsers
;
1951 SmallPtrSet
<Instruction
*, 4> NearUsers
;
1954 /// This class holds state for the main loop strength reduction logic.
1957 ScalarEvolution
&SE
;
1960 AssumptionCache
&AC
;
1961 TargetLibraryInfo
&TLI
;
1962 const TargetTransformInfo
&TTI
;
1964 MemorySSAUpdater
*MSSAU
;
1965 TTI::AddressingModeKind AMK
;
1966 mutable SCEVExpander Rewriter
;
1967 bool Changed
= false;
1969 /// This is the insert position that the current loop's induction variable
1970 /// increment should be placed. In simple loops, this is the latch block's
1971 /// terminator. But in more complicated cases, this is a position which will
1972 /// dominate all the in-loop post-increment users.
1973 Instruction
*IVIncInsertPos
= nullptr;
1975 /// Interesting factors between use strides.
1977 /// We explicitly use a SetVector which contains a SmallSet, instead of the
1978 /// default, a SmallDenseSet, because we need to use the full range of
1979 /// int64_ts, and there's currently no good way of doing that with
1981 SetVector
<int64_t, SmallVector
<int64_t, 8>, SmallSet
<int64_t, 8>> Factors
;
1983 /// The cost of the current SCEV, the best solution by LSR will be dropped if
1984 /// the solution is not profitable.
1987 /// Interesting use types, to facilitate truncation reuse.
1988 SmallSetVector
<Type
*, 4> Types
;
1990 /// The list of interesting uses.
1991 mutable SmallVector
<LSRUse
, 16> Uses
;
1993 /// Track which uses use which register candidates.
1994 RegUseTracker RegUses
;
1996 // Limit the number of chains to avoid quadratic behavior. We don't expect to
1997 // have more than a few IV increment chains in a loop. Missing a Chain falls
1998 // back to normal LSR behavior for those uses.
1999 static const unsigned MaxChains
= 8;
2001 /// IV users can form a chain of IV increments.
2002 SmallVector
<IVChain
, MaxChains
> IVChainVec
;
2004 /// IV users that belong to profitable IVChains.
2005 SmallPtrSet
<Use
*, MaxChains
> IVIncSet
;
2007 /// Induction variables that were generated and inserted by the SCEV Expander.
2008 SmallVector
<llvm::WeakVH
, 2> ScalarEvolutionIVs
;
2010 void OptimizeShadowIV();
2011 bool FindIVUserForCond(ICmpInst
*Cond
, IVStrideUse
*&CondUse
);
2012 ICmpInst
*OptimizeMax(ICmpInst
*Cond
, IVStrideUse
* &CondUse
);
2013 void OptimizeLoopTermCond();
2015 void ChainInstruction(Instruction
*UserInst
, Instruction
*IVOper
,
2016 SmallVectorImpl
<ChainUsers
> &ChainUsersVec
);
2017 void FinalizeChain(IVChain
&Chain
);
2018 void CollectChains();
2019 void GenerateIVChain(const IVChain
&Chain
,
2020 SmallVectorImpl
<WeakTrackingVH
> &DeadInsts
);
2022 void CollectInterestingTypesAndFactors();
2023 void CollectFixupsAndInitialFormulae();
2025 // Support for sharing of LSRUses between LSRFixups.
2026 using UseMapTy
= DenseMap
<LSRUse::SCEVUseKindPair
, size_t>;
2029 bool reconcileNewOffset(LSRUse
&LU
, int64_t NewOffset
, bool HasBaseReg
,
2030 LSRUse::KindType Kind
, MemAccessTy AccessTy
);
2032 std::pair
<size_t, int64_t> getUse(const SCEV
*&Expr
, LSRUse::KindType Kind
,
2033 MemAccessTy AccessTy
);
2035 void DeleteUse(LSRUse
&LU
, size_t LUIdx
);
2037 LSRUse
*FindUseWithSimilarFormula(const Formula
&F
, const LSRUse
&OrigLU
);
2039 void InsertInitialFormula(const SCEV
*S
, LSRUse
&LU
, size_t LUIdx
);
2040 void InsertSupplementalFormula(const SCEV
*S
, LSRUse
&LU
, size_t LUIdx
);
2041 void CountRegisters(const Formula
&F
, size_t LUIdx
);
2042 bool InsertFormula(LSRUse
&LU
, unsigned LUIdx
, const Formula
&F
);
2044 void CollectLoopInvariantFixupsAndFormulae();
2046 void GenerateReassociations(LSRUse
&LU
, unsigned LUIdx
, Formula Base
,
2047 unsigned Depth
= 0);
2049 void GenerateReassociationsImpl(LSRUse
&LU
, unsigned LUIdx
,
2050 const Formula
&Base
, unsigned Depth
,
2051 size_t Idx
, bool IsScaledReg
= false);
2052 void GenerateCombinations(LSRUse
&LU
, unsigned LUIdx
, Formula Base
);
2053 void GenerateSymbolicOffsetsImpl(LSRUse
&LU
, unsigned LUIdx
,
2054 const Formula
&Base
, size_t Idx
,
2055 bool IsScaledReg
= false);
2056 void GenerateSymbolicOffsets(LSRUse
&LU
, unsigned LUIdx
, Formula Base
);
2057 void GenerateConstantOffsetsImpl(LSRUse
&LU
, unsigned LUIdx
,
2058 const Formula
&Base
,
2059 const SmallVectorImpl
<int64_t> &Worklist
,
2060 size_t Idx
, bool IsScaledReg
= false);
2061 void GenerateConstantOffsets(LSRUse
&LU
, unsigned LUIdx
, Formula Base
);
2062 void GenerateICmpZeroScales(LSRUse
&LU
, unsigned LUIdx
, Formula Base
);
2063 void GenerateScales(LSRUse
&LU
, unsigned LUIdx
, Formula Base
);
2064 void GenerateTruncates(LSRUse
&LU
, unsigned LUIdx
, Formula Base
);
2065 void GenerateCrossUseConstantOffsets();
2066 void GenerateAllReuseFormulae();
2068 void FilterOutUndesirableDedicatedRegisters();
2070 size_t EstimateSearchSpaceComplexity() const;
2071 void NarrowSearchSpaceByDetectingSupersets();
2072 void NarrowSearchSpaceByCollapsingUnrolledCode();
2073 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
2074 void NarrowSearchSpaceByFilterFormulaWithSameScaledReg();
2075 void NarrowSearchSpaceByFilterPostInc();
2076 void NarrowSearchSpaceByDeletingCostlyFormulas();
2077 void NarrowSearchSpaceByPickingWinnerRegs();
2078 void NarrowSearchSpaceUsingHeuristics();
2080 void SolveRecurse(SmallVectorImpl
<const Formula
*> &Solution
,
2082 SmallVectorImpl
<const Formula
*> &Workspace
,
2083 const Cost
&CurCost
,
2084 const SmallPtrSet
<const SCEV
*, 16> &CurRegs
,
2085 DenseSet
<const SCEV
*> &VisitedRegs
) const;
2086 void Solve(SmallVectorImpl
<const Formula
*> &Solution
) const;
2088 BasicBlock::iterator
2089 HoistInsertPosition(BasicBlock::iterator IP
,
2090 const SmallVectorImpl
<Instruction
*> &Inputs
) const;
2091 BasicBlock::iterator
AdjustInsertPositionForExpand(BasicBlock::iterator IP
,
2093 const LSRUse
&LU
) const;
2095 Value
*Expand(const LSRUse
&LU
, const LSRFixup
&LF
, const Formula
&F
,
2096 BasicBlock::iterator IP
,
2097 SmallVectorImpl
<WeakTrackingVH
> &DeadInsts
) const;
2098 void RewriteForPHI(PHINode
*PN
, const LSRUse
&LU
, const LSRFixup
&LF
,
2100 SmallVectorImpl
<WeakTrackingVH
> &DeadInsts
) const;
2101 void Rewrite(const LSRUse
&LU
, const LSRFixup
&LF
, const Formula
&F
,
2102 SmallVectorImpl
<WeakTrackingVH
> &DeadInsts
) const;
2103 void ImplementSolution(const SmallVectorImpl
<const Formula
*> &Solution
);
2106 LSRInstance(Loop
*L
, IVUsers
&IU
, ScalarEvolution
&SE
, DominatorTree
&DT
,
2107 LoopInfo
&LI
, const TargetTransformInfo
&TTI
, AssumptionCache
&AC
,
2108 TargetLibraryInfo
&TLI
, MemorySSAUpdater
*MSSAU
);
2110 bool getChanged() const { return Changed
; }
2111 const SmallVectorImpl
<WeakVH
> &getScalarEvolutionIVs() const {
2112 return ScalarEvolutionIVs
;
2115 void print_factors_and_types(raw_ostream
&OS
) const;
2116 void print_fixups(raw_ostream
&OS
) const;
2117 void print_uses(raw_ostream
&OS
) const;
2118 void print(raw_ostream
&OS
) const;
2122 } // end anonymous namespace
2124 /// If IV is used in a int-to-float cast inside the loop then try to eliminate
2125 /// the cast operation.
2126 void LSRInstance::OptimizeShadowIV() {
2127 const SCEV
*BackedgeTakenCount
= SE
.getBackedgeTakenCount(L
);
2128 if (isa
<SCEVCouldNotCompute
>(BackedgeTakenCount
))
2131 for (IVUsers::const_iterator UI
= IU
.begin(), E
= IU
.end();
2132 UI
!= E
; /* empty */) {
2133 IVUsers::const_iterator CandidateUI
= UI
;
2135 Instruction
*ShadowUse
= CandidateUI
->getUser();
2136 Type
*DestTy
= nullptr;
2137 bool IsSigned
= false;
2139 /* If shadow use is a int->float cast then insert a second IV
2140 to eliminate this cast.
2142 for (unsigned i = 0; i < n; ++i)
2148 for (unsigned i = 0; i < n; ++i, ++d)
2151 if (UIToFPInst
*UCast
= dyn_cast
<UIToFPInst
>(CandidateUI
->getUser())) {
2153 DestTy
= UCast
->getDestTy();
2155 else if (SIToFPInst
*SCast
= dyn_cast
<SIToFPInst
>(CandidateUI
->getUser())) {
2157 DestTy
= SCast
->getDestTy();
2159 if (!DestTy
) continue;
2161 // If target does not support DestTy natively then do not apply
2162 // this transformation.
2163 if (!TTI
.isTypeLegal(DestTy
)) continue;
2165 PHINode
*PH
= dyn_cast
<PHINode
>(ShadowUse
->getOperand(0));
2167 if (PH
->getNumIncomingValues() != 2) continue;
2169 // If the calculation in integers overflows, the result in FP type will
2170 // differ. So we only can do this transformation if we are guaranteed to not
2171 // deal with overflowing values
2172 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(SE
.getSCEV(PH
));
2174 if (IsSigned
&& !AR
->hasNoSignedWrap()) continue;
2175 if (!IsSigned
&& !AR
->hasNoUnsignedWrap()) continue;
2177 Type
*SrcTy
= PH
->getType();
2178 int Mantissa
= DestTy
->getFPMantissaWidth();
2179 if (Mantissa
== -1) continue;
2180 if ((int)SE
.getTypeSizeInBits(SrcTy
) > Mantissa
)
2183 unsigned Entry
, Latch
;
2184 if (PH
->getIncomingBlock(0) == L
->getLoopPreheader()) {
2192 ConstantInt
*Init
= dyn_cast
<ConstantInt
>(PH
->getIncomingValue(Entry
));
2193 if (!Init
) continue;
2194 Constant
*NewInit
= ConstantFP::get(DestTy
, IsSigned
?
2195 (double)Init
->getSExtValue() :
2196 (double)Init
->getZExtValue());
2198 BinaryOperator
*Incr
=
2199 dyn_cast
<BinaryOperator
>(PH
->getIncomingValue(Latch
));
2200 if (!Incr
) continue;
2201 if (Incr
->getOpcode() != Instruction::Add
2202 && Incr
->getOpcode() != Instruction::Sub
)
2205 /* Initialize new IV, double d = 0.0 in above example. */
2206 ConstantInt
*C
= nullptr;
2207 if (Incr
->getOperand(0) == PH
)
2208 C
= dyn_cast
<ConstantInt
>(Incr
->getOperand(1));
2209 else if (Incr
->getOperand(1) == PH
)
2210 C
= dyn_cast
<ConstantInt
>(Incr
->getOperand(0));
2216 // Ignore negative constants, as the code below doesn't handle them
2217 // correctly. TODO: Remove this restriction.
2218 if (!C
->getValue().isStrictlyPositive()) continue;
2220 /* Add new PHINode. */
2221 PHINode
*NewPH
= PHINode::Create(DestTy
, 2, "IV.S.", PH
);
2223 /* create new increment. '++d' in above example. */
2224 Constant
*CFP
= ConstantFP::get(DestTy
, C
->getZExtValue());
2225 BinaryOperator
*NewIncr
=
2226 BinaryOperator::Create(Incr
->getOpcode() == Instruction::Add
?
2227 Instruction::FAdd
: Instruction::FSub
,
2228 NewPH
, CFP
, "IV.S.next.", Incr
);
2230 NewPH
->addIncoming(NewInit
, PH
->getIncomingBlock(Entry
));
2231 NewPH
->addIncoming(NewIncr
, PH
->getIncomingBlock(Latch
));
2233 /* Remove cast operation */
2234 ShadowUse
->replaceAllUsesWith(NewPH
);
2235 ShadowUse
->eraseFromParent();
2241 /// If Cond has an operand that is an expression of an IV, set the IV user and
2242 /// stride information and return true, otherwise return false.
2243 bool LSRInstance::FindIVUserForCond(ICmpInst
*Cond
, IVStrideUse
*&CondUse
) {
2244 for (IVStrideUse
&U
: IU
)
2245 if (U
.getUser() == Cond
) {
2246 // NOTE: we could handle setcc instructions with multiple uses here, but
2247 // InstCombine does it as well for simple uses, it's not clear that it
2248 // occurs enough in real life to handle.
2255 /// Rewrite the loop's terminating condition if it uses a max computation.
2257 /// This is a narrow solution to a specific, but acute, problem. For loops
2263 /// } while (++i < n);
2265 /// the trip count isn't just 'n', because 'n' might not be positive. And
2266 /// unfortunately this can come up even for loops where the user didn't use
2267 /// a C do-while loop. For example, seemingly well-behaved top-test loops
2268 /// will commonly be lowered like this:
2274 /// } while (++i < n);
2277 /// and then it's possible for subsequent optimization to obscure the if
2278 /// test in such a way that indvars can't find it.
2280 /// When indvars can't find the if test in loops like this, it creates a
2281 /// max expression, which allows it to give the loop a canonical
2282 /// induction variable:
2285 /// max = n < 1 ? 1 : n;
2288 /// } while (++i != max);
2290 /// Canonical induction variables are necessary because the loop passes
2291 /// are designed around them. The most obvious example of this is the
2292 /// LoopInfo analysis, which doesn't remember trip count values. It
2293 /// expects to be able to rediscover the trip count each time it is
2294 /// needed, and it does this using a simple analysis that only succeeds if
2295 /// the loop has a canonical induction variable.
2297 /// However, when it comes time to generate code, the maximum operation
2298 /// can be quite costly, especially if it's inside of an outer loop.
2300 /// This function solves this problem by detecting this type of loop and
2301 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
2302 /// the instructions for the maximum computation.
2303 ICmpInst
*LSRInstance::OptimizeMax(ICmpInst
*Cond
, IVStrideUse
* &CondUse
) {
2304 // Check that the loop matches the pattern we're looking for.
2305 if (Cond
->getPredicate() != CmpInst::ICMP_EQ
&&
2306 Cond
->getPredicate() != CmpInst::ICMP_NE
)
2309 SelectInst
*Sel
= dyn_cast
<SelectInst
>(Cond
->getOperand(1));
2310 if (!Sel
|| !Sel
->hasOneUse()) return Cond
;
2312 const SCEV
*BackedgeTakenCount
= SE
.getBackedgeTakenCount(L
);
2313 if (isa
<SCEVCouldNotCompute
>(BackedgeTakenCount
))
2315 const SCEV
*One
= SE
.getConstant(BackedgeTakenCount
->getType(), 1);
2317 // Add one to the backedge-taken count to get the trip count.
2318 const SCEV
*IterationCount
= SE
.getAddExpr(One
, BackedgeTakenCount
);
2319 if (IterationCount
!= SE
.getSCEV(Sel
)) return Cond
;
2321 // Check for a max calculation that matches the pattern. There's no check
2322 // for ICMP_ULE here because the comparison would be with zero, which
2323 // isn't interesting.
2324 CmpInst::Predicate Pred
= ICmpInst::BAD_ICMP_PREDICATE
;
2325 const SCEVNAryExpr
*Max
= nullptr;
2326 if (const SCEVSMaxExpr
*S
= dyn_cast
<SCEVSMaxExpr
>(BackedgeTakenCount
)) {
2327 Pred
= ICmpInst::ICMP_SLE
;
2329 } else if (const SCEVSMaxExpr
*S
= dyn_cast
<SCEVSMaxExpr
>(IterationCount
)) {
2330 Pred
= ICmpInst::ICMP_SLT
;
2332 } else if (const SCEVUMaxExpr
*U
= dyn_cast
<SCEVUMaxExpr
>(IterationCount
)) {
2333 Pred
= ICmpInst::ICMP_ULT
;
2340 // To handle a max with more than two operands, this optimization would
2341 // require additional checking and setup.
2342 if (Max
->getNumOperands() != 2)
2345 const SCEV
*MaxLHS
= Max
->getOperand(0);
2346 const SCEV
*MaxRHS
= Max
->getOperand(1);
2348 // ScalarEvolution canonicalizes constants to the left. For < and >, look
2349 // for a comparison with 1. For <= and >=, a comparison with zero.
2351 (ICmpInst::isTrueWhenEqual(Pred
) ? !MaxLHS
->isZero() : (MaxLHS
!= One
)))
2354 // Check the relevant induction variable for conformance to
2356 const SCEV
*IV
= SE
.getSCEV(Cond
->getOperand(0));
2357 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(IV
);
2358 if (!AR
|| !AR
->isAffine() ||
2359 AR
->getStart() != One
||
2360 AR
->getStepRecurrence(SE
) != One
)
2363 assert(AR
->getLoop() == L
&&
2364 "Loop condition operand is an addrec in a different loop!");
2366 // Check the right operand of the select, and remember it, as it will
2367 // be used in the new comparison instruction.
2368 Value
*NewRHS
= nullptr;
2369 if (ICmpInst::isTrueWhenEqual(Pred
)) {
2370 // Look for n+1, and grab n.
2371 if (AddOperator
*BO
= dyn_cast
<AddOperator
>(Sel
->getOperand(1)))
2372 if (ConstantInt
*BO1
= dyn_cast
<ConstantInt
>(BO
->getOperand(1)))
2373 if (BO1
->isOne() && SE
.getSCEV(BO
->getOperand(0)) == MaxRHS
)
2374 NewRHS
= BO
->getOperand(0);
2375 if (AddOperator
*BO
= dyn_cast
<AddOperator
>(Sel
->getOperand(2)))
2376 if (ConstantInt
*BO1
= dyn_cast
<ConstantInt
>(BO
->getOperand(1)))
2377 if (BO1
->isOne() && SE
.getSCEV(BO
->getOperand(0)) == MaxRHS
)
2378 NewRHS
= BO
->getOperand(0);
2381 } else if (SE
.getSCEV(Sel
->getOperand(1)) == MaxRHS
)
2382 NewRHS
= Sel
->getOperand(1);
2383 else if (SE
.getSCEV(Sel
->getOperand(2)) == MaxRHS
)
2384 NewRHS
= Sel
->getOperand(2);
2385 else if (const SCEVUnknown
*SU
= dyn_cast
<SCEVUnknown
>(MaxRHS
))
2386 NewRHS
= SU
->getValue();
2388 // Max doesn't match expected pattern.
2391 // Determine the new comparison opcode. It may be signed or unsigned,
2392 // and the original comparison may be either equality or inequality.
2393 if (Cond
->getPredicate() == CmpInst::ICMP_EQ
)
2394 Pred
= CmpInst::getInversePredicate(Pred
);
2396 // Ok, everything looks ok to change the condition into an SLT or SGE and
2397 // delete the max calculation.
2399 new ICmpInst(Cond
, Pred
, Cond
->getOperand(0), NewRHS
, "scmp");
2401 // Delete the max calculation instructions.
2402 NewCond
->setDebugLoc(Cond
->getDebugLoc());
2403 Cond
->replaceAllUsesWith(NewCond
);
2404 CondUse
->setUser(NewCond
);
2405 Instruction
*Cmp
= cast
<Instruction
>(Sel
->getOperand(0));
2406 Cond
->eraseFromParent();
2407 Sel
->eraseFromParent();
2408 if (Cmp
->use_empty())
2409 Cmp
->eraseFromParent();
2413 /// Change loop terminating condition to use the postinc iv when possible.
2415 LSRInstance::OptimizeLoopTermCond() {
2416 SmallPtrSet
<Instruction
*, 4> PostIncs
;
2418 // We need a different set of heuristics for rotated and non-rotated loops.
2419 // If a loop is rotated then the latch is also the backedge, so inserting
2420 // post-inc expressions just before the latch is ideal. To reduce live ranges
2421 // it also makes sense to rewrite terminating conditions to use post-inc
2424 // If the loop is not rotated then the latch is not a backedge; the latch
2425 // check is done in the loop head. Adding post-inc expressions before the
2426 // latch will cause overlapping live-ranges of pre-inc and post-inc expressions
2427 // in the loop body. In this case we do *not* want to use post-inc expressions
2428 // in the latch check, and we want to insert post-inc expressions before
2430 BasicBlock
*LatchBlock
= L
->getLoopLatch();
2431 SmallVector
<BasicBlock
*, 8> ExitingBlocks
;
2432 L
->getExitingBlocks(ExitingBlocks
);
2433 if (!llvm::is_contained(ExitingBlocks
, LatchBlock
)) {
2434 // The backedge doesn't exit the loop; treat this as a head-tested loop.
2435 IVIncInsertPos
= LatchBlock
->getTerminator();
2439 // Otherwise treat this as a rotated loop.
2440 for (BasicBlock
*ExitingBlock
: ExitingBlocks
) {
2441 // Get the terminating condition for the loop if possible. If we
2442 // can, we want to change it to use a post-incremented version of its
2443 // induction variable, to allow coalescing the live ranges for the IV into
2444 // one register value.
2446 BranchInst
*TermBr
= dyn_cast
<BranchInst
>(ExitingBlock
->getTerminator());
2449 // FIXME: Overly conservative, termination condition could be an 'or' etc..
2450 if (TermBr
->isUnconditional() || !isa
<ICmpInst
>(TermBr
->getCondition()))
2453 // Search IVUsesByStride to find Cond's IVUse if there is one.
2454 IVStrideUse
*CondUse
= nullptr;
2455 ICmpInst
*Cond
= cast
<ICmpInst
>(TermBr
->getCondition());
2456 if (!FindIVUserForCond(Cond
, CondUse
))
2459 // If the trip count is computed in terms of a max (due to ScalarEvolution
2460 // being unable to find a sufficient guard, for example), change the loop
2461 // comparison to use SLT or ULT instead of NE.
2462 // One consequence of doing this now is that it disrupts the count-down
2463 // optimization. That's not always a bad thing though, because in such
2464 // cases it may still be worthwhile to avoid a max.
2465 Cond
= OptimizeMax(Cond
, CondUse
);
2467 // If this exiting block dominates the latch block, it may also use
2468 // the post-inc value if it won't be shared with other uses.
2469 // Check for dominance.
2470 if (!DT
.dominates(ExitingBlock
, LatchBlock
))
2473 // Conservatively avoid trying to use the post-inc value in non-latch
2474 // exits if there may be pre-inc users in intervening blocks.
2475 if (LatchBlock
!= ExitingBlock
)
2476 for (IVUsers::const_iterator UI
= IU
.begin(), E
= IU
.end(); UI
!= E
; ++UI
)
2477 // Test if the use is reachable from the exiting block. This dominator
2478 // query is a conservative approximation of reachability.
2479 if (&*UI
!= CondUse
&&
2480 !DT
.properlyDominates(UI
->getUser()->getParent(), ExitingBlock
)) {
2481 // Conservatively assume there may be reuse if the quotient of their
2482 // strides could be a legal scale.
2483 const SCEV
*A
= IU
.getStride(*CondUse
, L
);
2484 const SCEV
*B
= IU
.getStride(*UI
, L
);
2485 if (!A
|| !B
) continue;
2486 if (SE
.getTypeSizeInBits(A
->getType()) !=
2487 SE
.getTypeSizeInBits(B
->getType())) {
2488 if (SE
.getTypeSizeInBits(A
->getType()) >
2489 SE
.getTypeSizeInBits(B
->getType()))
2490 B
= SE
.getSignExtendExpr(B
, A
->getType());
2492 A
= SE
.getSignExtendExpr(A
, B
->getType());
2494 if (const SCEVConstant
*D
=
2495 dyn_cast_or_null
<SCEVConstant
>(getExactSDiv(B
, A
, SE
))) {
2496 const ConstantInt
*C
= D
->getValue();
2497 // Stride of one or negative one can have reuse with non-addresses.
2498 if (C
->isOne() || C
->isMinusOne())
2499 goto decline_post_inc
;
2500 // Avoid weird situations.
2501 if (C
->getValue().getSignificantBits() >= 64 ||
2502 C
->getValue().isMinSignedValue())
2503 goto decline_post_inc
;
2504 // Check for possible scaled-address reuse.
2505 if (isAddressUse(TTI
, UI
->getUser(), UI
->getOperandValToReplace())) {
2506 MemAccessTy AccessTy
= getAccessType(
2507 TTI
, UI
->getUser(), UI
->getOperandValToReplace());
2508 int64_t Scale
= C
->getSExtValue();
2509 if (TTI
.isLegalAddressingMode(AccessTy
.MemTy
, /*BaseGV=*/nullptr,
2511 /*HasBaseReg=*/true, Scale
,
2512 AccessTy
.AddrSpace
))
2513 goto decline_post_inc
;
2515 if (TTI
.isLegalAddressingMode(AccessTy
.MemTy
, /*BaseGV=*/nullptr,
2517 /*HasBaseReg=*/true, Scale
,
2518 AccessTy
.AddrSpace
))
2519 goto decline_post_inc
;
2524 LLVM_DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: "
2527 // It's possible for the setcc instruction to be anywhere in the loop, and
2528 // possible for it to have multiple users. If it is not immediately before
2529 // the exiting block branch, move it.
2530 if (Cond
->getNextNonDebugInstruction() != TermBr
) {
2531 if (Cond
->hasOneUse()) {
2532 Cond
->moveBefore(TermBr
);
2534 // Clone the terminating condition and insert into the loopend.
2535 ICmpInst
*OldCond
= Cond
;
2536 Cond
= cast
<ICmpInst
>(Cond
->clone());
2537 Cond
->setName(L
->getHeader()->getName() + ".termcond");
2538 Cond
->insertInto(ExitingBlock
, TermBr
->getIterator());
2540 // Clone the IVUse, as the old use still exists!
2541 CondUse
= &IU
.AddUser(Cond
, CondUse
->getOperandValToReplace());
2542 TermBr
->replaceUsesOfWith(OldCond
, Cond
);
2546 // If we get to here, we know that we can transform the setcc instruction to
2547 // use the post-incremented version of the IV, allowing us to coalesce the
2548 // live ranges for the IV correctly.
2549 CondUse
->transformToPostInc(L
);
2552 PostIncs
.insert(Cond
);
2556 // Determine an insertion point for the loop induction variable increment. It
2557 // must dominate all the post-inc comparisons we just set up, and it must
2558 // dominate the loop latch edge.
2559 IVIncInsertPos
= L
->getLoopLatch()->getTerminator();
2560 for (Instruction
*Inst
: PostIncs
)
2561 IVIncInsertPos
= DT
.findNearestCommonDominator(IVIncInsertPos
, Inst
);
2564 /// Determine if the given use can accommodate a fixup at the given offset and
2565 /// other details. If so, update the use and return true.
2566 bool LSRInstance::reconcileNewOffset(LSRUse
&LU
, int64_t NewOffset
,
2567 bool HasBaseReg
, LSRUse::KindType Kind
,
2568 MemAccessTy AccessTy
) {
2569 int64_t NewMinOffset
= LU
.MinOffset
;
2570 int64_t NewMaxOffset
= LU
.MaxOffset
;
2571 MemAccessTy NewAccessTy
= AccessTy
;
2573 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to
2574 // something conservative, however this can pessimize in the case that one of
2575 // the uses will have all its uses outside the loop, for example.
2576 if (LU
.Kind
!= Kind
)
2579 // Check for a mismatched access type, and fall back conservatively as needed.
2580 // TODO: Be less conservative when the type is similar and can use the same
2581 // addressing modes.
2582 if (Kind
== LSRUse::Address
) {
2583 if (AccessTy
.MemTy
!= LU
.AccessTy
.MemTy
) {
2584 NewAccessTy
= MemAccessTy::getUnknown(AccessTy
.MemTy
->getContext(),
2585 AccessTy
.AddrSpace
);
2589 // Conservatively assume HasBaseReg is true for now.
2590 if (NewOffset
< LU
.MinOffset
) {
2591 if (!isAlwaysFoldable(TTI
, Kind
, NewAccessTy
, /*BaseGV=*/nullptr,
2592 LU
.MaxOffset
- NewOffset
, HasBaseReg
))
2594 NewMinOffset
= NewOffset
;
2595 } else if (NewOffset
> LU
.MaxOffset
) {
2596 if (!isAlwaysFoldable(TTI
, Kind
, NewAccessTy
, /*BaseGV=*/nullptr,
2597 NewOffset
- LU
.MinOffset
, HasBaseReg
))
2599 NewMaxOffset
= NewOffset
;
2603 LU
.MinOffset
= NewMinOffset
;
2604 LU
.MaxOffset
= NewMaxOffset
;
2605 LU
.AccessTy
= NewAccessTy
;
2609 /// Return an LSRUse index and an offset value for a fixup which needs the given
2610 /// expression, with the given kind and optional access type. Either reuse an
2611 /// existing use or create a new one, as needed.
2612 std::pair
<size_t, int64_t> LSRInstance::getUse(const SCEV
*&Expr
,
2613 LSRUse::KindType Kind
,
2614 MemAccessTy AccessTy
) {
2615 const SCEV
*Copy
= Expr
;
2616 int64_t Offset
= ExtractImmediate(Expr
, SE
);
2618 // Basic uses can't accept any offset, for example.
2619 if (!isAlwaysFoldable(TTI
, Kind
, AccessTy
, /*BaseGV=*/ nullptr,
2620 Offset
, /*HasBaseReg=*/ true)) {
2625 std::pair
<UseMapTy::iterator
, bool> P
=
2626 UseMap
.insert(std::make_pair(LSRUse::SCEVUseKindPair(Expr
, Kind
), 0));
2628 // A use already existed with this base.
2629 size_t LUIdx
= P
.first
->second
;
2630 LSRUse
&LU
= Uses
[LUIdx
];
2631 if (reconcileNewOffset(LU
, Offset
, /*HasBaseReg=*/true, Kind
, AccessTy
))
2633 return std::make_pair(LUIdx
, Offset
);
2636 // Create a new use.
2637 size_t LUIdx
= Uses
.size();
2638 P
.first
->second
= LUIdx
;
2639 Uses
.push_back(LSRUse(Kind
, AccessTy
));
2640 LSRUse
&LU
= Uses
[LUIdx
];
2642 LU
.MinOffset
= Offset
;
2643 LU
.MaxOffset
= Offset
;
2644 return std::make_pair(LUIdx
, Offset
);
2647 /// Delete the given use from the Uses list.
2648 void LSRInstance::DeleteUse(LSRUse
&LU
, size_t LUIdx
) {
2649 if (&LU
!= &Uses
.back())
2650 std::swap(LU
, Uses
.back());
2654 RegUses
.swapAndDropUse(LUIdx
, Uses
.size());
2657 /// Look for a use distinct from OrigLU which is has a formula that has the same
2658 /// registers as the given formula.
2660 LSRInstance::FindUseWithSimilarFormula(const Formula
&OrigF
,
2661 const LSRUse
&OrigLU
) {
2662 // Search all uses for the formula. This could be more clever.
2663 for (LSRUse
&LU
: Uses
) {
2664 // Check whether this use is close enough to OrigLU, to see whether it's
2665 // worthwhile looking through its formulae.
2666 // Ignore ICmpZero uses because they may contain formulae generated by
2667 // GenerateICmpZeroScales, in which case adding fixup offsets may
2669 if (&LU
!= &OrigLU
&&
2670 LU
.Kind
!= LSRUse::ICmpZero
&&
2671 LU
.Kind
== OrigLU
.Kind
&& OrigLU
.AccessTy
== LU
.AccessTy
&&
2672 LU
.WidestFixupType
== OrigLU
.WidestFixupType
&&
2673 LU
.HasFormulaWithSameRegs(OrigF
)) {
2674 // Scan through this use's formulae.
2675 for (const Formula
&F
: LU
.Formulae
) {
2676 // Check to see if this formula has the same registers and symbols
2678 if (F
.BaseRegs
== OrigF
.BaseRegs
&&
2679 F
.ScaledReg
== OrigF
.ScaledReg
&&
2680 F
.BaseGV
== OrigF
.BaseGV
&&
2681 F
.Scale
== OrigF
.Scale
&&
2682 F
.UnfoldedOffset
== OrigF
.UnfoldedOffset
) {
2683 if (F
.BaseOffset
== 0)
2685 // This is the formula where all the registers and symbols matched;
2686 // there aren't going to be any others. Since we declined it, we
2687 // can skip the rest of the formulae and proceed to the next LSRUse.
2694 // Nothing looked good.
2698 void LSRInstance::CollectInterestingTypesAndFactors() {
2699 SmallSetVector
<const SCEV
*, 4> Strides
;
2701 // Collect interesting types and strides.
2702 SmallVector
<const SCEV
*, 4> Worklist
;
2703 for (const IVStrideUse
&U
: IU
) {
2704 const SCEV
*Expr
= IU
.getExpr(U
);
2708 // Collect interesting types.
2709 Types
.insert(SE
.getEffectiveSCEVType(Expr
->getType()));
2711 // Add strides for mentioned loops.
2712 Worklist
.push_back(Expr
);
2714 const SCEV
*S
= Worklist
.pop_back_val();
2715 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
2716 if (AR
->getLoop() == L
)
2717 Strides
.insert(AR
->getStepRecurrence(SE
));
2718 Worklist
.push_back(AR
->getStart());
2719 } else if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
2720 append_range(Worklist
, Add
->operands());
2722 } while (!Worklist
.empty());
2725 // Compute interesting factors from the set of interesting strides.
2726 for (SmallSetVector
<const SCEV
*, 4>::const_iterator
2727 I
= Strides
.begin(), E
= Strides
.end(); I
!= E
; ++I
)
2728 for (SmallSetVector
<const SCEV
*, 4>::const_iterator NewStrideIter
=
2729 std::next(I
); NewStrideIter
!= E
; ++NewStrideIter
) {
2730 const SCEV
*OldStride
= *I
;
2731 const SCEV
*NewStride
= *NewStrideIter
;
2733 if (SE
.getTypeSizeInBits(OldStride
->getType()) !=
2734 SE
.getTypeSizeInBits(NewStride
->getType())) {
2735 if (SE
.getTypeSizeInBits(OldStride
->getType()) >
2736 SE
.getTypeSizeInBits(NewStride
->getType()))
2737 NewStride
= SE
.getSignExtendExpr(NewStride
, OldStride
->getType());
2739 OldStride
= SE
.getSignExtendExpr(OldStride
, NewStride
->getType());
2741 if (const SCEVConstant
*Factor
=
2742 dyn_cast_or_null
<SCEVConstant
>(getExactSDiv(NewStride
, OldStride
,
2744 if (Factor
->getAPInt().getSignificantBits() <= 64 && !Factor
->isZero())
2745 Factors
.insert(Factor
->getAPInt().getSExtValue());
2746 } else if (const SCEVConstant
*Factor
=
2747 dyn_cast_or_null
<SCEVConstant
>(getExactSDiv(OldStride
,
2750 if (Factor
->getAPInt().getSignificantBits() <= 64 && !Factor
->isZero())
2751 Factors
.insert(Factor
->getAPInt().getSExtValue());
2755 // If all uses use the same type, don't bother looking for truncation-based
2757 if (Types
.size() == 1)
2760 LLVM_DEBUG(print_factors_and_types(dbgs()));
2763 /// Helper for CollectChains that finds an IV operand (computed by an AddRec in
2764 /// this loop) within [OI,OE) or returns OE. If IVUsers mapped Instructions to
2765 /// IVStrideUses, we could partially skip this.
2766 static User::op_iterator
2767 findIVOperand(User::op_iterator OI
, User::op_iterator OE
,
2768 Loop
*L
, ScalarEvolution
&SE
) {
2769 for(; OI
!= OE
; ++OI
) {
2770 if (Instruction
*Oper
= dyn_cast
<Instruction
>(*OI
)) {
2771 if (!SE
.isSCEVable(Oper
->getType()))
2774 if (const SCEVAddRecExpr
*AR
=
2775 dyn_cast
<SCEVAddRecExpr
>(SE
.getSCEV(Oper
))) {
2776 if (AR
->getLoop() == L
)
2784 /// IVChain logic must consistently peek base TruncInst operands, so wrap it in
2785 /// a convenient helper.
2786 static Value
*getWideOperand(Value
*Oper
) {
2787 if (TruncInst
*Trunc
= dyn_cast
<TruncInst
>(Oper
))
2788 return Trunc
->getOperand(0);
2792 /// Return an approximation of this SCEV expression's "base", or NULL for any
2793 /// constant. Returning the expression itself is conservative. Returning a
2794 /// deeper subexpression is more precise and valid as long as it isn't less
2795 /// complex than another subexpression. For expressions involving multiple
2796 /// unscaled values, we need to return the pointer-type SCEVUnknown. This avoids
2797 /// forming chains across objects, such as: PrevOper==a[i], IVOper==b[i],
2800 /// Since SCEVUnknown is the rightmost type, and pointers are the rightmost
2801 /// SCEVUnknown, we simply return the rightmost SCEV operand.
2802 static const SCEV
*getExprBase(const SCEV
*S
) {
2803 switch (S
->getSCEVType()) {
2804 default: // including scUnknown.
2810 return getExprBase(cast
<SCEVTruncateExpr
>(S
)->getOperand());
2812 return getExprBase(cast
<SCEVZeroExtendExpr
>(S
)->getOperand());
2814 return getExprBase(cast
<SCEVSignExtendExpr
>(S
)->getOperand());
2816 // Skip over scaled operands (scMulExpr) to follow add operands as long as
2817 // there's nothing more complex.
2818 // FIXME: not sure if we want to recognize negation.
2819 const SCEVAddExpr
*Add
= cast
<SCEVAddExpr
>(S
);
2820 for (const SCEV
*SubExpr
: reverse(Add
->operands())) {
2821 if (SubExpr
->getSCEVType() == scAddExpr
)
2822 return getExprBase(SubExpr
);
2824 if (SubExpr
->getSCEVType() != scMulExpr
)
2827 return S
; // all operands are scaled, be conservative.
2830 return getExprBase(cast
<SCEVAddRecExpr
>(S
)->getStart());
2832 llvm_unreachable("Unknown SCEV kind!");
2835 /// Return true if the chain increment is profitable to expand into a loop
2836 /// invariant value, which may require its own register. A profitable chain
2837 /// increment will be an offset relative to the same base. We allow such offsets
2838 /// to potentially be used as chain increment as long as it's not obviously
2839 /// expensive to expand using real instructions.
2840 bool IVChain::isProfitableIncrement(const SCEV
*OperExpr
,
2841 const SCEV
*IncExpr
,
2842 ScalarEvolution
&SE
) {
2843 // Aggressively form chains when -stress-ivchain.
2847 // Do not replace a constant offset from IV head with a nonconstant IV
2849 if (!isa
<SCEVConstant
>(IncExpr
)) {
2850 const SCEV
*HeadExpr
= SE
.getSCEV(getWideOperand(Incs
[0].IVOperand
));
2851 if (isa
<SCEVConstant
>(SE
.getMinusSCEV(OperExpr
, HeadExpr
)))
2855 SmallPtrSet
<const SCEV
*, 8> Processed
;
2856 return !isHighCostExpansion(IncExpr
, Processed
, SE
);
2859 /// Return true if the number of registers needed for the chain is estimated to
2860 /// be less than the number required for the individual IV users. First prohibit
2861 /// any IV users that keep the IV live across increments (the Users set should
2862 /// be empty). Next count the number and type of increments in the chain.
2864 /// Chaining IVs can lead to considerable code bloat if ISEL doesn't
2865 /// effectively use postinc addressing modes. Only consider it profitable it the
2866 /// increments can be computed in fewer registers when chained.
2868 /// TODO: Consider IVInc free if it's already used in another chains.
2869 static bool isProfitableChain(IVChain
&Chain
,
2870 SmallPtrSetImpl
<Instruction
*> &Users
,
2871 ScalarEvolution
&SE
,
2872 const TargetTransformInfo
&TTI
) {
2876 if (!Chain
.hasIncs())
2879 if (!Users
.empty()) {
2880 LLVM_DEBUG(dbgs() << "Chain: " << *Chain
.Incs
[0].UserInst
<< " users:\n";
2881 for (Instruction
*Inst
2882 : Users
) { dbgs() << " " << *Inst
<< "\n"; });
2885 assert(!Chain
.Incs
.empty() && "empty IV chains are not allowed");
2887 // The chain itself may require a register, so intialize cost to 1.
2890 // A complete chain likely eliminates the need for keeping the original IV in
2891 // a register. LSR does not currently know how to form a complete chain unless
2892 // the header phi already exists.
2893 if (isa
<PHINode
>(Chain
.tailUserInst())
2894 && SE
.getSCEV(Chain
.tailUserInst()) == Chain
.Incs
[0].IncExpr
) {
2897 const SCEV
*LastIncExpr
= nullptr;
2898 unsigned NumConstIncrements
= 0;
2899 unsigned NumVarIncrements
= 0;
2900 unsigned NumReusedIncrements
= 0;
2902 if (TTI
.isProfitableLSRChainElement(Chain
.Incs
[0].UserInst
))
2905 for (const IVInc
&Inc
: Chain
) {
2906 if (TTI
.isProfitableLSRChainElement(Inc
.UserInst
))
2908 if (Inc
.IncExpr
->isZero())
2911 // Incrementing by zero or some constant is neutral. We assume constants can
2912 // be folded into an addressing mode or an add's immediate operand.
2913 if (isa
<SCEVConstant
>(Inc
.IncExpr
)) {
2914 ++NumConstIncrements
;
2918 if (Inc
.IncExpr
== LastIncExpr
)
2919 ++NumReusedIncrements
;
2923 LastIncExpr
= Inc
.IncExpr
;
2925 // An IV chain with a single increment is handled by LSR's postinc
2926 // uses. However, a chain with multiple increments requires keeping the IV's
2927 // value live longer than it needs to be if chained.
2928 if (NumConstIncrements
> 1)
2931 // Materializing increment expressions in the preheader that didn't exist in
2932 // the original code may cost a register. For example, sign-extended array
2933 // indices can produce ridiculous increments like this:
2934 // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64)))
2935 cost
+= NumVarIncrements
;
2937 // Reusing variable increments likely saves a register to hold the multiple of
2939 cost
-= NumReusedIncrements
;
2941 LLVM_DEBUG(dbgs() << "Chain: " << *Chain
.Incs
[0].UserInst
<< " Cost: " << cost
2947 /// Add this IV user to an existing chain or make it the head of a new chain.
2948 void LSRInstance::ChainInstruction(Instruction
*UserInst
, Instruction
*IVOper
,
2949 SmallVectorImpl
<ChainUsers
> &ChainUsersVec
) {
2950 // When IVs are used as types of varying widths, they are generally converted
2951 // to a wider type with some uses remaining narrow under a (free) trunc.
2952 Value
*const NextIV
= getWideOperand(IVOper
);
2953 const SCEV
*const OperExpr
= SE
.getSCEV(NextIV
);
2954 const SCEV
*const OperExprBase
= getExprBase(OperExpr
);
2956 // Visit all existing chains. Check if its IVOper can be computed as a
2957 // profitable loop invariant increment from the last link in the Chain.
2958 unsigned ChainIdx
= 0, NChains
= IVChainVec
.size();
2959 const SCEV
*LastIncExpr
= nullptr;
2960 for (; ChainIdx
< NChains
; ++ChainIdx
) {
2961 IVChain
&Chain
= IVChainVec
[ChainIdx
];
2963 // Prune the solution space aggressively by checking that both IV operands
2964 // are expressions that operate on the same unscaled SCEVUnknown. This
2965 // "base" will be canceled by the subsequent getMinusSCEV call. Checking
2966 // first avoids creating extra SCEV expressions.
2967 if (!StressIVChain
&& Chain
.ExprBase
!= OperExprBase
)
2970 Value
*PrevIV
= getWideOperand(Chain
.Incs
.back().IVOperand
);
2971 if (PrevIV
->getType() != NextIV
->getType())
2974 // A phi node terminates a chain.
2975 if (isa
<PHINode
>(UserInst
) && isa
<PHINode
>(Chain
.tailUserInst()))
2978 // The increment must be loop-invariant so it can be kept in a register.
2979 const SCEV
*PrevExpr
= SE
.getSCEV(PrevIV
);
2980 const SCEV
*IncExpr
= SE
.getMinusSCEV(OperExpr
, PrevExpr
);
2981 if (isa
<SCEVCouldNotCompute
>(IncExpr
) || !SE
.isLoopInvariant(IncExpr
, L
))
2984 if (Chain
.isProfitableIncrement(OperExpr
, IncExpr
, SE
)) {
2985 LastIncExpr
= IncExpr
;
2989 // If we haven't found a chain, create a new one, unless we hit the max. Don't
2990 // bother for phi nodes, because they must be last in the chain.
2991 if (ChainIdx
== NChains
) {
2992 if (isa
<PHINode
>(UserInst
))
2994 if (NChains
>= MaxChains
&& !StressIVChain
) {
2995 LLVM_DEBUG(dbgs() << "IV Chain Limit\n");
2998 LastIncExpr
= OperExpr
;
2999 // IVUsers may have skipped over sign/zero extensions. We don't currently
3000 // attempt to form chains involving extensions unless they can be hoisted
3001 // into this loop's AddRec.
3002 if (!isa
<SCEVAddRecExpr
>(LastIncExpr
))
3005 IVChainVec
.push_back(IVChain(IVInc(UserInst
, IVOper
, LastIncExpr
),
3007 ChainUsersVec
.resize(NChains
);
3008 LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx
<< " Head: (" << *UserInst
3009 << ") IV=" << *LastIncExpr
<< "\n");
3011 LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx
<< " Inc: (" << *UserInst
3012 << ") IV+" << *LastIncExpr
<< "\n");
3013 // Add this IV user to the end of the chain.
3014 IVChainVec
[ChainIdx
].add(IVInc(UserInst
, IVOper
, LastIncExpr
));
3016 IVChain
&Chain
= IVChainVec
[ChainIdx
];
3018 SmallPtrSet
<Instruction
*,4> &NearUsers
= ChainUsersVec
[ChainIdx
].NearUsers
;
3019 // This chain's NearUsers become FarUsers.
3020 if (!LastIncExpr
->isZero()) {
3021 ChainUsersVec
[ChainIdx
].FarUsers
.insert(NearUsers
.begin(),
3026 // All other uses of IVOperand become near uses of the chain.
3027 // We currently ignore intermediate values within SCEV expressions, assuming
3028 // they will eventually be used be the current chain, or can be computed
3029 // from one of the chain increments. To be more precise we could
3030 // transitively follow its user and only add leaf IV users to the set.
3031 for (User
*U
: IVOper
->users()) {
3032 Instruction
*OtherUse
= dyn_cast
<Instruction
>(U
);
3035 // Uses in the chain will no longer be uses if the chain is formed.
3036 // Include the head of the chain in this iteration (not Chain.begin()).
3037 IVChain::const_iterator IncIter
= Chain
.Incs
.begin();
3038 IVChain::const_iterator IncEnd
= Chain
.Incs
.end();
3039 for( ; IncIter
!= IncEnd
; ++IncIter
) {
3040 if (IncIter
->UserInst
== OtherUse
)
3043 if (IncIter
!= IncEnd
)
3046 if (SE
.isSCEVable(OtherUse
->getType())
3047 && !isa
<SCEVUnknown
>(SE
.getSCEV(OtherUse
))
3048 && IU
.isIVUserOrOperand(OtherUse
)) {
3051 NearUsers
.insert(OtherUse
);
3054 // Since this user is part of the chain, it's no longer considered a use
3056 ChainUsersVec
[ChainIdx
].FarUsers
.erase(UserInst
);
3059 /// Populate the vector of Chains.
3061 /// This decreases ILP at the architecture level. Targets with ample registers,
3062 /// multiple memory ports, and no register renaming probably don't want
3063 /// this. However, such targets should probably disable LSR altogether.
3065 /// The job of LSR is to make a reasonable choice of induction variables across
3066 /// the loop. Subsequent passes can easily "unchain" computation exposing more
3067 /// ILP *within the loop* if the target wants it.
3069 /// Finding the best IV chain is potentially a scheduling problem. Since LSR
3070 /// will not reorder memory operations, it will recognize this as a chain, but
3071 /// will generate redundant IV increments. Ideally this would be corrected later
3072 /// by a smart scheduler:
3078 /// TODO: Walk the entire domtree within this loop, not just the path to the
3079 /// loop latch. This will discover chains on side paths, but requires
3080 /// maintaining multiple copies of the Chains state.
3081 void LSRInstance::CollectChains() {
3082 LLVM_DEBUG(dbgs() << "Collecting IV Chains.\n");
3083 SmallVector
<ChainUsers
, 8> ChainUsersVec
;
3085 SmallVector
<BasicBlock
*,8> LatchPath
;
3086 BasicBlock
*LoopHeader
= L
->getHeader();
3087 for (DomTreeNode
*Rung
= DT
.getNode(L
->getLoopLatch());
3088 Rung
->getBlock() != LoopHeader
; Rung
= Rung
->getIDom()) {
3089 LatchPath
.push_back(Rung
->getBlock());
3091 LatchPath
.push_back(LoopHeader
);
3093 // Walk the instruction stream from the loop header to the loop latch.
3094 for (BasicBlock
*BB
: reverse(LatchPath
)) {
3095 for (Instruction
&I
: *BB
) {
3096 // Skip instructions that weren't seen by IVUsers analysis.
3097 if (isa
<PHINode
>(I
) || !IU
.isIVUserOrOperand(&I
))
3100 // Ignore users that are part of a SCEV expression. This way we only
3101 // consider leaf IV Users. This effectively rediscovers a portion of
3102 // IVUsers analysis but in program order this time.
3103 if (SE
.isSCEVable(I
.getType()) && !isa
<SCEVUnknown
>(SE
.getSCEV(&I
)))
3106 // Remove this instruction from any NearUsers set it may be in.
3107 for (unsigned ChainIdx
= 0, NChains
= IVChainVec
.size();
3108 ChainIdx
< NChains
; ++ChainIdx
) {
3109 ChainUsersVec
[ChainIdx
].NearUsers
.erase(&I
);
3111 // Search for operands that can be chained.
3112 SmallPtrSet
<Instruction
*, 4> UniqueOperands
;
3113 User::op_iterator IVOpEnd
= I
.op_end();
3114 User::op_iterator IVOpIter
= findIVOperand(I
.op_begin(), IVOpEnd
, L
, SE
);
3115 while (IVOpIter
!= IVOpEnd
) {
3116 Instruction
*IVOpInst
= cast
<Instruction
>(*IVOpIter
);
3117 if (UniqueOperands
.insert(IVOpInst
).second
)
3118 ChainInstruction(&I
, IVOpInst
, ChainUsersVec
);
3119 IVOpIter
= findIVOperand(std::next(IVOpIter
), IVOpEnd
, L
, SE
);
3121 } // Continue walking down the instructions.
3122 } // Continue walking down the domtree.
3123 // Visit phi backedges to determine if the chain can generate the IV postinc.
3124 for (PHINode
&PN
: L
->getHeader()->phis()) {
3125 if (!SE
.isSCEVable(PN
.getType()))
3129 dyn_cast
<Instruction
>(PN
.getIncomingValueForBlock(L
->getLoopLatch()));
3131 ChainInstruction(&PN
, IncV
, ChainUsersVec
);
3133 // Remove any unprofitable chains.
3134 unsigned ChainIdx
= 0;
3135 for (unsigned UsersIdx
= 0, NChains
= IVChainVec
.size();
3136 UsersIdx
< NChains
; ++UsersIdx
) {
3137 if (!isProfitableChain(IVChainVec
[UsersIdx
],
3138 ChainUsersVec
[UsersIdx
].FarUsers
, SE
, TTI
))
3140 // Preserve the chain at UsesIdx.
3141 if (ChainIdx
!= UsersIdx
)
3142 IVChainVec
[ChainIdx
] = IVChainVec
[UsersIdx
];
3143 FinalizeChain(IVChainVec
[ChainIdx
]);
3146 IVChainVec
.resize(ChainIdx
);
3149 void LSRInstance::FinalizeChain(IVChain
&Chain
) {
3150 assert(!Chain
.Incs
.empty() && "empty IV chains are not allowed");
3151 LLVM_DEBUG(dbgs() << "Final Chain: " << *Chain
.Incs
[0].UserInst
<< "\n");
3153 for (const IVInc
&Inc
: Chain
) {
3154 LLVM_DEBUG(dbgs() << " Inc: " << *Inc
.UserInst
<< "\n");
3155 auto UseI
= find(Inc
.UserInst
->operands(), Inc
.IVOperand
);
3156 assert(UseI
!= Inc
.UserInst
->op_end() && "cannot find IV operand");
3157 IVIncSet
.insert(UseI
);
3161 /// Return true if the IVInc can be folded into an addressing mode.
3162 static bool canFoldIVIncExpr(const SCEV
*IncExpr
, Instruction
*UserInst
,
3163 Value
*Operand
, const TargetTransformInfo
&TTI
) {
3164 const SCEVConstant
*IncConst
= dyn_cast
<SCEVConstant
>(IncExpr
);
3165 if (!IncConst
|| !isAddressUse(TTI
, UserInst
, Operand
))
3168 if (IncConst
->getAPInt().getSignificantBits() > 64)
3171 MemAccessTy AccessTy
= getAccessType(TTI
, UserInst
, Operand
);
3172 int64_t IncOffset
= IncConst
->getValue()->getSExtValue();
3173 if (!isAlwaysFoldable(TTI
, LSRUse::Address
, AccessTy
, /*BaseGV=*/nullptr,
3174 IncOffset
, /*HasBaseReg=*/false))
3180 /// Generate an add or subtract for each IVInc in a chain to materialize the IV
3181 /// user's operand from the previous IV user's operand.
3182 void LSRInstance::GenerateIVChain(const IVChain
&Chain
,
3183 SmallVectorImpl
<WeakTrackingVH
> &DeadInsts
) {
3184 // Find the new IVOperand for the head of the chain. It may have been replaced
3186 const IVInc
&Head
= Chain
.Incs
[0];
3187 User::op_iterator IVOpEnd
= Head
.UserInst
->op_end();
3188 // findIVOperand returns IVOpEnd if it can no longer find a valid IV user.
3189 User::op_iterator IVOpIter
= findIVOperand(Head
.UserInst
->op_begin(),
3191 Value
*IVSrc
= nullptr;
3192 while (IVOpIter
!= IVOpEnd
) {
3193 IVSrc
= getWideOperand(*IVOpIter
);
3195 // If this operand computes the expression that the chain needs, we may use
3196 // it. (Check this after setting IVSrc which is used below.)
3198 // Note that if Head.IncExpr is wider than IVSrc, then this phi is too
3199 // narrow for the chain, so we can no longer use it. We do allow using a
3200 // wider phi, assuming the LSR checked for free truncation. In that case we
3201 // should already have a truncate on this operand such that
3202 // getSCEV(IVSrc) == IncExpr.
3203 if (SE
.getSCEV(*IVOpIter
) == Head
.IncExpr
3204 || SE
.getSCEV(IVSrc
) == Head
.IncExpr
) {
3207 IVOpIter
= findIVOperand(std::next(IVOpIter
), IVOpEnd
, L
, SE
);
3209 if (IVOpIter
== IVOpEnd
) {
3210 // Gracefully give up on this chain.
3211 LLVM_DEBUG(dbgs() << "Concealed chain head: " << *Head
.UserInst
<< "\n");
3214 assert(IVSrc
&& "Failed to find IV chain source");
3216 LLVM_DEBUG(dbgs() << "Generate chain at: " << *IVSrc
<< "\n");
3217 Type
*IVTy
= IVSrc
->getType();
3218 Type
*IntTy
= SE
.getEffectiveSCEVType(IVTy
);
3219 const SCEV
*LeftOverExpr
= nullptr;
3220 for (const IVInc
&Inc
: Chain
) {
3221 Instruction
*InsertPt
= Inc
.UserInst
;
3222 if (isa
<PHINode
>(InsertPt
))
3223 InsertPt
= L
->getLoopLatch()->getTerminator();
3225 // IVOper will replace the current IV User's operand. IVSrc is the IV
3226 // value currently held in a register.
3227 Value
*IVOper
= IVSrc
;
3228 if (!Inc
.IncExpr
->isZero()) {
3229 // IncExpr was the result of subtraction of two narrow values, so must
3231 const SCEV
*IncExpr
= SE
.getNoopOrSignExtend(Inc
.IncExpr
, IntTy
);
3232 LeftOverExpr
= LeftOverExpr
?
3233 SE
.getAddExpr(LeftOverExpr
, IncExpr
) : IncExpr
;
3235 if (LeftOverExpr
&& !LeftOverExpr
->isZero()) {
3236 // Expand the IV increment.
3237 Rewriter
.clearPostInc();
3238 Value
*IncV
= Rewriter
.expandCodeFor(LeftOverExpr
, IntTy
, InsertPt
);
3239 const SCEV
*IVOperExpr
= SE
.getAddExpr(SE
.getUnknown(IVSrc
),
3240 SE
.getUnknown(IncV
));
3241 IVOper
= Rewriter
.expandCodeFor(IVOperExpr
, IVTy
, InsertPt
);
3243 // If an IV increment can't be folded, use it as the next IV value.
3244 if (!canFoldIVIncExpr(LeftOverExpr
, Inc
.UserInst
, Inc
.IVOperand
, TTI
)) {
3245 assert(IVTy
== IVOper
->getType() && "inconsistent IV increment type");
3247 LeftOverExpr
= nullptr;
3250 Type
*OperTy
= Inc
.IVOperand
->getType();
3251 if (IVTy
!= OperTy
) {
3252 assert(SE
.getTypeSizeInBits(IVTy
) >= SE
.getTypeSizeInBits(OperTy
) &&
3253 "cannot extend a chained IV");
3254 IRBuilder
<> Builder(InsertPt
);
3255 IVOper
= Builder
.CreateTruncOrBitCast(IVOper
, OperTy
, "lsr.chain");
3257 Inc
.UserInst
->replaceUsesOfWith(Inc
.IVOperand
, IVOper
);
3258 if (auto *OperandIsInstr
= dyn_cast
<Instruction
>(Inc
.IVOperand
))
3259 DeadInsts
.emplace_back(OperandIsInstr
);
3261 // If LSR created a new, wider phi, we may also replace its postinc. We only
3262 // do this if we also found a wide value for the head of the chain.
3263 if (isa
<PHINode
>(Chain
.tailUserInst())) {
3264 for (PHINode
&Phi
: L
->getHeader()->phis()) {
3265 if (Phi
.getType() != IVSrc
->getType())
3267 Instruction
*PostIncV
= dyn_cast
<Instruction
>(
3268 Phi
.getIncomingValueForBlock(L
->getLoopLatch()));
3269 if (!PostIncV
|| (SE
.getSCEV(PostIncV
) != SE
.getSCEV(IVSrc
)))
3271 Value
*IVOper
= IVSrc
;
3272 Type
*PostIncTy
= PostIncV
->getType();
3273 if (IVTy
!= PostIncTy
) {
3274 assert(PostIncTy
->isPointerTy() && "mixing int/ptr IV types");
3275 IRBuilder
<> Builder(L
->getLoopLatch()->getTerminator());
3276 Builder
.SetCurrentDebugLocation(PostIncV
->getDebugLoc());
3277 IVOper
= Builder
.CreatePointerCast(IVSrc
, PostIncTy
, "lsr.chain");
3279 Phi
.replaceUsesOfWith(PostIncV
, IVOper
);
3280 DeadInsts
.emplace_back(PostIncV
);
3285 void LSRInstance::CollectFixupsAndInitialFormulae() {
3286 BranchInst
*ExitBranch
= nullptr;
3287 bool SaveCmp
= TTI
.canSaveCmp(L
, &ExitBranch
, &SE
, &LI
, &DT
, &AC
, &TLI
);
3289 // For calculating baseline cost
3290 SmallPtrSet
<const SCEV
*, 16> Regs
;
3291 DenseSet
<const SCEV
*> VisitedRegs
;
3292 DenseSet
<size_t> VisitedLSRUse
;
3294 for (const IVStrideUse
&U
: IU
) {
3295 Instruction
*UserInst
= U
.getUser();
3296 // Skip IV users that are part of profitable IV Chains.
3297 User::op_iterator UseI
=
3298 find(UserInst
->operands(), U
.getOperandValToReplace());
3299 assert(UseI
!= UserInst
->op_end() && "cannot find IV operand");
3300 if (IVIncSet
.count(UseI
)) {
3301 LLVM_DEBUG(dbgs() << "Use is in profitable chain: " << **UseI
<< '\n');
3305 LSRUse::KindType Kind
= LSRUse::Basic
;
3306 MemAccessTy AccessTy
;
3307 if (isAddressUse(TTI
, UserInst
, U
.getOperandValToReplace())) {
3308 Kind
= LSRUse::Address
;
3309 AccessTy
= getAccessType(TTI
, UserInst
, U
.getOperandValToReplace());
3312 const SCEV
*S
= IU
.getExpr(U
);
3315 PostIncLoopSet TmpPostIncLoops
= U
.getPostIncLoops();
3317 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as
3318 // (N - i == 0), and this allows (N - i) to be the expression that we work
3319 // with rather than just N or i, so we can consider the register
3320 // requirements for both N and i at the same time. Limiting this code to
3321 // equality icmps is not a problem because all interesting loops use
3322 // equality icmps, thanks to IndVarSimplify.
3323 if (ICmpInst
*CI
= dyn_cast
<ICmpInst
>(UserInst
)) {
3324 // If CI can be saved in some target, like replaced inside hardware loop
3325 // in PowerPC, no need to generate initial formulae for it.
3326 if (SaveCmp
&& CI
== dyn_cast
<ICmpInst
>(ExitBranch
->getCondition()))
3328 if (CI
->isEquality()) {
3329 // Swap the operands if needed to put the OperandValToReplace on the
3330 // left, for consistency.
3331 Value
*NV
= CI
->getOperand(1);
3332 if (NV
== U
.getOperandValToReplace()) {
3333 CI
->setOperand(1, CI
->getOperand(0));
3334 CI
->setOperand(0, NV
);
3335 NV
= CI
->getOperand(1);
3339 // x == y --> x - y == 0
3340 const SCEV
*N
= SE
.getSCEV(NV
);
3341 if (SE
.isLoopInvariant(N
, L
) && Rewriter
.isSafeToExpand(N
) &&
3342 (!NV
->getType()->isPointerTy() ||
3343 SE
.getPointerBase(N
) == SE
.getPointerBase(S
))) {
3344 // S is normalized, so normalize N before folding it into S
3345 // to keep the result normalized.
3346 N
= normalizeForPostIncUse(N
, TmpPostIncLoops
, SE
);
3349 Kind
= LSRUse::ICmpZero
;
3350 S
= SE
.getMinusSCEV(N
, S
);
3351 } else if (L
->isLoopInvariant(NV
) &&
3352 (!isa
<Instruction
>(NV
) ||
3353 DT
.dominates(cast
<Instruction
>(NV
), L
->getHeader())) &&
3354 !NV
->getType()->isPointerTy()) {
3355 // If we can't generally expand the expression (e.g. it contains
3356 // a divide), but it is already at a loop invariant point before the
3357 // loop, wrap it in an unknown (to prevent the expander from trying
3358 // to re-expand in a potentially unsafe way.) The restriction to
3359 // integer types is required because the unknown hides the base, and
3360 // SCEV can't compute the difference of two unknown pointers.
3361 N
= SE
.getUnknown(NV
);
3362 N
= normalizeForPostIncUse(N
, TmpPostIncLoops
, SE
);
3365 Kind
= LSRUse::ICmpZero
;
3366 S
= SE
.getMinusSCEV(N
, S
);
3367 assert(!isa
<SCEVCouldNotCompute
>(S
));
3370 // -1 and the negations of all interesting strides (except the negation
3371 // of -1) are now also interesting.
3372 for (size_t i
= 0, e
= Factors
.size(); i
!= e
; ++i
)
3373 if (Factors
[i
] != -1)
3374 Factors
.insert(-(uint64_t)Factors
[i
]);
3379 // Get or create an LSRUse.
3380 std::pair
<size_t, int64_t> P
= getUse(S
, Kind
, AccessTy
);
3381 size_t LUIdx
= P
.first
;
3382 int64_t Offset
= P
.second
;
3383 LSRUse
&LU
= Uses
[LUIdx
];
3385 // Record the fixup.
3386 LSRFixup
&LF
= LU
.getNewFixup();
3387 LF
.UserInst
= UserInst
;
3388 LF
.OperandValToReplace
= U
.getOperandValToReplace();
3389 LF
.PostIncLoops
= TmpPostIncLoops
;
3391 LU
.AllFixupsOutsideLoop
&= LF
.isUseFullyOutsideLoop(L
);
3393 // Create SCEV as Formula for calculating baseline cost
3394 if (!VisitedLSRUse
.count(LUIdx
) && !LF
.isUseFullyOutsideLoop(L
)) {
3396 F
.initialMatch(S
, L
, SE
);
3397 BaselineCost
.RateFormula(F
, Regs
, VisitedRegs
, LU
);
3398 VisitedLSRUse
.insert(LUIdx
);
3401 if (!LU
.WidestFixupType
||
3402 SE
.getTypeSizeInBits(LU
.WidestFixupType
) <
3403 SE
.getTypeSizeInBits(LF
.OperandValToReplace
->getType()))
3404 LU
.WidestFixupType
= LF
.OperandValToReplace
->getType();
3406 // If this is the first use of this LSRUse, give it a formula.
3407 if (LU
.Formulae
.empty()) {
3408 InsertInitialFormula(S
, LU
, LUIdx
);
3409 CountRegisters(LU
.Formulae
.back(), LUIdx
);
3413 LLVM_DEBUG(print_fixups(dbgs()));
3416 /// Insert a formula for the given expression into the given use, separating out
3417 /// loop-variant portions from loop-invariant and loop-computable portions.
3418 void LSRInstance::InsertInitialFormula(const SCEV
*S
, LSRUse
&LU
,
3420 // Mark uses whose expressions cannot be expanded.
3421 if (!Rewriter
.isSafeToExpand(S
))
3422 LU
.RigidFormula
= true;
3425 F
.initialMatch(S
, L
, SE
);
3426 bool Inserted
= InsertFormula(LU
, LUIdx
, F
);
3427 assert(Inserted
&& "Initial formula already exists!"); (void)Inserted
;
3430 /// Insert a simple single-register formula for the given expression into the
3433 LSRInstance::InsertSupplementalFormula(const SCEV
*S
,
3434 LSRUse
&LU
, size_t LUIdx
) {
3436 F
.BaseRegs
.push_back(S
);
3437 F
.HasBaseReg
= true;
3438 bool Inserted
= InsertFormula(LU
, LUIdx
, F
);
3439 assert(Inserted
&& "Supplemental formula already exists!"); (void)Inserted
;
3442 /// Note which registers are used by the given formula, updating RegUses.
3443 void LSRInstance::CountRegisters(const Formula
&F
, size_t LUIdx
) {
3445 RegUses
.countRegister(F
.ScaledReg
, LUIdx
);
3446 for (const SCEV
*BaseReg
: F
.BaseRegs
)
3447 RegUses
.countRegister(BaseReg
, LUIdx
);
3450 /// If the given formula has not yet been inserted, add it to the list, and
3451 /// return true. Return false otherwise.
3452 bool LSRInstance::InsertFormula(LSRUse
&LU
, unsigned LUIdx
, const Formula
&F
) {
3453 // Do not insert formula that we will not be able to expand.
3454 assert(isLegalUse(TTI
, LU
.MinOffset
, LU
.MaxOffset
, LU
.Kind
, LU
.AccessTy
, F
) &&
3455 "Formula is illegal");
3457 if (!LU
.InsertFormula(F
, *L
))
3460 CountRegisters(F
, LUIdx
);
3464 /// Check for other uses of loop-invariant values which we're tracking. These
3465 /// other uses will pin these values in registers, making them less profitable
3466 /// for elimination.
3467 /// TODO: This currently misses non-constant addrec step registers.
3468 /// TODO: Should this give more weight to users inside the loop?
3470 LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
3471 SmallVector
<const SCEV
*, 8> Worklist(RegUses
.begin(), RegUses
.end());
3472 SmallPtrSet
<const SCEV
*, 32> Visited
;
3474 // Don't collect outside uses if we are favoring postinc - the instructions in
3475 // the loop are more important than the ones outside of it.
3476 if (AMK
== TTI::AMK_PostIndexed
)
3479 while (!Worklist
.empty()) {
3480 const SCEV
*S
= Worklist
.pop_back_val();
3482 // Don't process the same SCEV twice
3483 if (!Visited
.insert(S
).second
)
3486 if (const SCEVNAryExpr
*N
= dyn_cast
<SCEVNAryExpr
>(S
))
3487 append_range(Worklist
, N
->operands());
3488 else if (const SCEVIntegralCastExpr
*C
= dyn_cast
<SCEVIntegralCastExpr
>(S
))
3489 Worklist
.push_back(C
->getOperand());
3490 else if (const SCEVUDivExpr
*D
= dyn_cast
<SCEVUDivExpr
>(S
)) {
3491 Worklist
.push_back(D
->getLHS());
3492 Worklist
.push_back(D
->getRHS());
3493 } else if (const SCEVUnknown
*US
= dyn_cast
<SCEVUnknown
>(S
)) {
3494 const Value
*V
= US
->getValue();
3495 if (const Instruction
*Inst
= dyn_cast
<Instruction
>(V
)) {
3496 // Look for instructions defined outside the loop.
3497 if (L
->contains(Inst
)) continue;
3498 } else if (isa
<Constant
>(V
))
3499 // Constants can be re-materialized.
3501 for (const Use
&U
: V
->uses()) {
3502 const Instruction
*UserInst
= dyn_cast
<Instruction
>(U
.getUser());
3503 // Ignore non-instructions.
3506 // Don't bother if the instruction is an EHPad.
3507 if (UserInst
->isEHPad())
3509 // Ignore instructions in other functions (as can happen with
3511 if (UserInst
->getParent()->getParent() != L
->getHeader()->getParent())
3513 // Ignore instructions not dominated by the loop.
3514 const BasicBlock
*UseBB
= !isa
<PHINode
>(UserInst
) ?
3515 UserInst
->getParent() :
3516 cast
<PHINode
>(UserInst
)->getIncomingBlock(
3517 PHINode::getIncomingValueNumForOperand(U
.getOperandNo()));
3518 if (!DT
.dominates(L
->getHeader(), UseBB
))
3520 // Don't bother if the instruction is in a BB which ends in an EHPad.
3521 if (UseBB
->getTerminator()->isEHPad())
3524 // Ignore cases in which the currently-examined value could come from
3525 // a basic block terminated with an EHPad. This checks all incoming
3526 // blocks of the phi node since it is possible that the same incoming
3527 // value comes from multiple basic blocks, only some of which may end
3528 // in an EHPad. If any of them do, a subsequent rewrite attempt by this
3529 // pass would try to insert instructions into an EHPad, hitting an
3531 if (isa
<PHINode
>(UserInst
)) {
3532 const auto *PhiNode
= cast
<PHINode
>(UserInst
);
3533 bool HasIncompatibleEHPTerminatedBlock
= false;
3534 llvm::Value
*ExpectedValue
= U
;
3535 for (unsigned int I
= 0; I
< PhiNode
->getNumIncomingValues(); I
++) {
3536 if (PhiNode
->getIncomingValue(I
) == ExpectedValue
) {
3537 if (PhiNode
->getIncomingBlock(I
)->getTerminator()->isEHPad()) {
3538 HasIncompatibleEHPTerminatedBlock
= true;
3543 if (HasIncompatibleEHPTerminatedBlock
) {
3548 // Don't bother rewriting PHIs in catchswitch blocks.
3549 if (isa
<CatchSwitchInst
>(UserInst
->getParent()->getTerminator()))
3551 // Ignore uses which are part of other SCEV expressions, to avoid
3552 // analyzing them multiple times.
3553 if (SE
.isSCEVable(UserInst
->getType())) {
3554 const SCEV
*UserS
= SE
.getSCEV(const_cast<Instruction
*>(UserInst
));
3555 // If the user is a no-op, look through to its uses.
3556 if (!isa
<SCEVUnknown
>(UserS
))
3560 SE
.getUnknown(const_cast<Instruction
*>(UserInst
)));
3564 // Ignore icmp instructions which are already being analyzed.
3565 if (const ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(UserInst
)) {
3566 unsigned OtherIdx
= !U
.getOperandNo();
3567 Value
*OtherOp
= const_cast<Value
*>(ICI
->getOperand(OtherIdx
));
3568 if (SE
.hasComputableLoopEvolution(SE
.getSCEV(OtherOp
), L
))
3572 std::pair
<size_t, int64_t> P
= getUse(
3573 S
, LSRUse::Basic
, MemAccessTy());
3574 size_t LUIdx
= P
.first
;
3575 int64_t Offset
= P
.second
;
3576 LSRUse
&LU
= Uses
[LUIdx
];
3577 LSRFixup
&LF
= LU
.getNewFixup();
3578 LF
.UserInst
= const_cast<Instruction
*>(UserInst
);
3579 LF
.OperandValToReplace
= U
;
3581 LU
.AllFixupsOutsideLoop
&= LF
.isUseFullyOutsideLoop(L
);
3582 if (!LU
.WidestFixupType
||
3583 SE
.getTypeSizeInBits(LU
.WidestFixupType
) <
3584 SE
.getTypeSizeInBits(LF
.OperandValToReplace
->getType()))
3585 LU
.WidestFixupType
= LF
.OperandValToReplace
->getType();
3586 InsertSupplementalFormula(US
, LU
, LUIdx
);
3587 CountRegisters(LU
.Formulae
.back(), Uses
.size() - 1);
3594 /// Split S into subexpressions which can be pulled out into separate
3595 /// registers. If C is non-null, multiply each subexpression by C.
3597 /// Return remainder expression after factoring the subexpressions captured by
3598 /// Ops. If Ops is complete, return NULL.
3599 static const SCEV
*CollectSubexprs(const SCEV
*S
, const SCEVConstant
*C
,
3600 SmallVectorImpl
<const SCEV
*> &Ops
,
3602 ScalarEvolution
&SE
,
3603 unsigned Depth
= 0) {
3604 // Arbitrarily cap recursion to protect compile time.
3608 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
3609 // Break out add operands.
3610 for (const SCEV
*S
: Add
->operands()) {
3611 const SCEV
*Remainder
= CollectSubexprs(S
, C
, Ops
, L
, SE
, Depth
+1);
3613 Ops
.push_back(C
? SE
.getMulExpr(C
, Remainder
) : Remainder
);
3616 } else if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
3617 // Split a non-zero base out of an addrec.
3618 if (AR
->getStart()->isZero() || !AR
->isAffine())
3621 const SCEV
*Remainder
= CollectSubexprs(AR
->getStart(),
3622 C
, Ops
, L
, SE
, Depth
+1);
3623 // Split the non-zero AddRec unless it is part of a nested recurrence that
3624 // does not pertain to this loop.
3625 if (Remainder
&& (AR
->getLoop() == L
|| !isa
<SCEVAddRecExpr
>(Remainder
))) {
3626 Ops
.push_back(C
? SE
.getMulExpr(C
, Remainder
) : Remainder
);
3627 Remainder
= nullptr;
3629 if (Remainder
!= AR
->getStart()) {
3631 Remainder
= SE
.getConstant(AR
->getType(), 0);
3632 return SE
.getAddRecExpr(Remainder
,
3633 AR
->getStepRecurrence(SE
),
3635 //FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
3638 } else if (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(S
)) {
3639 // Break (C * (a + b + c)) into C*a + C*b + C*c.
3640 if (Mul
->getNumOperands() != 2)
3642 if (const SCEVConstant
*Op0
=
3643 dyn_cast
<SCEVConstant
>(Mul
->getOperand(0))) {
3644 C
= C
? cast
<SCEVConstant
>(SE
.getMulExpr(C
, Op0
)) : Op0
;
3645 const SCEV
*Remainder
=
3646 CollectSubexprs(Mul
->getOperand(1), C
, Ops
, L
, SE
, Depth
+1);
3648 Ops
.push_back(SE
.getMulExpr(C
, Remainder
));
3655 /// Return true if the SCEV represents a value that may end up as a
3656 /// post-increment operation.
3657 static bool mayUsePostIncMode(const TargetTransformInfo
&TTI
,
3658 LSRUse
&LU
, const SCEV
*S
, const Loop
*L
,
3659 ScalarEvolution
&SE
) {
3660 if (LU
.Kind
!= LSRUse::Address
||
3661 !LU
.AccessTy
.getType()->isIntOrIntVectorTy())
3663 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
);
3666 const SCEV
*LoopStep
= AR
->getStepRecurrence(SE
);
3667 if (!isa
<SCEVConstant
>(LoopStep
))
3669 // Check if a post-indexed load/store can be used.
3670 if (TTI
.isIndexedLoadLegal(TTI
.MIM_PostInc
, AR
->getType()) ||
3671 TTI
.isIndexedStoreLegal(TTI
.MIM_PostInc
, AR
->getType())) {
3672 const SCEV
*LoopStart
= AR
->getStart();
3673 if (!isa
<SCEVConstant
>(LoopStart
) && SE
.isLoopInvariant(LoopStart
, L
))
3679 /// Helper function for LSRInstance::GenerateReassociations.
3680 void LSRInstance::GenerateReassociationsImpl(LSRUse
&LU
, unsigned LUIdx
,
3681 const Formula
&Base
,
3682 unsigned Depth
, size_t Idx
,
3684 const SCEV
*BaseReg
= IsScaledReg
? Base
.ScaledReg
: Base
.BaseRegs
[Idx
];
3685 // Don't generate reassociations for the base register of a value that
3686 // may generate a post-increment operator. The reason is that the
3687 // reassociations cause extra base+register formula to be created,
3688 // and possibly chosen, but the post-increment is more efficient.
3689 if (AMK
== TTI::AMK_PostIndexed
&& mayUsePostIncMode(TTI
, LU
, BaseReg
, L
, SE
))
3691 SmallVector
<const SCEV
*, 8> AddOps
;
3692 const SCEV
*Remainder
= CollectSubexprs(BaseReg
, nullptr, AddOps
, L
, SE
);
3694 AddOps
.push_back(Remainder
);
3696 if (AddOps
.size() == 1)
3699 for (SmallVectorImpl
<const SCEV
*>::const_iterator J
= AddOps
.begin(),
3702 // Loop-variant "unknown" values are uninteresting; we won't be able to
3703 // do anything meaningful with them.
3704 if (isa
<SCEVUnknown
>(*J
) && !SE
.isLoopInvariant(*J
, L
))
3707 // Don't pull a constant into a register if the constant could be folded
3708 // into an immediate field.
3709 if (isAlwaysFoldable(TTI
, SE
, LU
.MinOffset
, LU
.MaxOffset
, LU
.Kind
,
3710 LU
.AccessTy
, *J
, Base
.getNumRegs() > 1))
3713 // Collect all operands except *J.
3714 SmallVector
<const SCEV
*, 8> InnerAddOps(
3715 ((const SmallVector
<const SCEV
*, 8> &)AddOps
).begin(), J
);
3716 InnerAddOps
.append(std::next(J
),
3717 ((const SmallVector
<const SCEV
*, 8> &)AddOps
).end());
3719 // Don't leave just a constant behind in a register if the constant could
3720 // be folded into an immediate field.
3721 if (InnerAddOps
.size() == 1 &&
3722 isAlwaysFoldable(TTI
, SE
, LU
.MinOffset
, LU
.MaxOffset
, LU
.Kind
,
3723 LU
.AccessTy
, InnerAddOps
[0], Base
.getNumRegs() > 1))
3726 const SCEV
*InnerSum
= SE
.getAddExpr(InnerAddOps
);
3727 if (InnerSum
->isZero())
3731 // Add the remaining pieces of the add back into the new formula.
3732 const SCEVConstant
*InnerSumSC
= dyn_cast
<SCEVConstant
>(InnerSum
);
3733 if (InnerSumSC
&& SE
.getTypeSizeInBits(InnerSumSC
->getType()) <= 64 &&
3734 TTI
.isLegalAddImmediate((uint64_t)F
.UnfoldedOffset
+
3735 InnerSumSC
->getValue()->getZExtValue())) {
3737 (uint64_t)F
.UnfoldedOffset
+ InnerSumSC
->getValue()->getZExtValue();
3739 F
.ScaledReg
= nullptr;
3741 F
.BaseRegs
.erase(F
.BaseRegs
.begin() + Idx
);
3742 } else if (IsScaledReg
)
3743 F
.ScaledReg
= InnerSum
;
3745 F
.BaseRegs
[Idx
] = InnerSum
;
3747 // Add J as its own register, or an unfolded immediate.
3748 const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(*J
);
3749 if (SC
&& SE
.getTypeSizeInBits(SC
->getType()) <= 64 &&
3750 TTI
.isLegalAddImmediate((uint64_t)F
.UnfoldedOffset
+
3751 SC
->getValue()->getZExtValue()))
3753 (uint64_t)F
.UnfoldedOffset
+ SC
->getValue()->getZExtValue();
3755 F
.BaseRegs
.push_back(*J
);
3756 // We may have changed the number of register in base regs, adjust the
3757 // formula accordingly.
3760 if (InsertFormula(LU
, LUIdx
, F
))
3761 // If that formula hadn't been seen before, recurse to find more like
3763 // Add check on Log16(AddOps.size()) - same as Log2_32(AddOps.size()) >> 2)
3764 // Because just Depth is not enough to bound compile time.
3765 // This means that every time AddOps.size() is greater 16^x we will add
3767 GenerateReassociations(LU
, LUIdx
, LU
.Formulae
.back(),
3768 Depth
+ 1 + (Log2_32(AddOps
.size()) >> 2));
3772 /// Split out subexpressions from adds and the bases of addrecs.
3773 void LSRInstance::GenerateReassociations(LSRUse
&LU
, unsigned LUIdx
,
3774 Formula Base
, unsigned Depth
) {
3775 assert(Base
.isCanonical(*L
) && "Input must be in the canonical form");
3776 // Arbitrarily cap recursion to protect compile time.
3780 for (size_t i
= 0, e
= Base
.BaseRegs
.size(); i
!= e
; ++i
)
3781 GenerateReassociationsImpl(LU
, LUIdx
, Base
, Depth
, i
);
3783 if (Base
.Scale
== 1)
3784 GenerateReassociationsImpl(LU
, LUIdx
, Base
, Depth
,
3785 /* Idx */ -1, /* IsScaledReg */ true);
3788 /// Generate a formula consisting of all of the loop-dominating registers added
3789 /// into a single register.
3790 void LSRInstance::GenerateCombinations(LSRUse
&LU
, unsigned LUIdx
,
3792 // This method is only interesting on a plurality of registers.
3793 if (Base
.BaseRegs
.size() + (Base
.Scale
== 1) +
3794 (Base
.UnfoldedOffset
!= 0) <= 1)
3797 // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before
3798 // processing the formula.
3800 SmallVector
<const SCEV
*, 4> Ops
;
3801 Formula NewBase
= Base
;
3802 NewBase
.BaseRegs
.clear();
3803 Type
*CombinedIntegerType
= nullptr;
3804 for (const SCEV
*BaseReg
: Base
.BaseRegs
) {
3805 if (SE
.properlyDominates(BaseReg
, L
->getHeader()) &&
3806 !SE
.hasComputableLoopEvolution(BaseReg
, L
)) {
3807 if (!CombinedIntegerType
)
3808 CombinedIntegerType
= SE
.getEffectiveSCEVType(BaseReg
->getType());
3809 Ops
.push_back(BaseReg
);
3812 NewBase
.BaseRegs
.push_back(BaseReg
);
3815 // If no register is relevant, we're done.
3816 if (Ops
.size() == 0)
3819 // Utility function for generating the required variants of the combined
3821 auto GenerateFormula
= [&](const SCEV
*Sum
) {
3822 Formula F
= NewBase
;
3824 // TODO: If Sum is zero, it probably means ScalarEvolution missed an
3825 // opportunity to fold something. For now, just ignore such cases
3826 // rather than proceed with zero in a register.
3830 F
.BaseRegs
.push_back(Sum
);
3832 (void)InsertFormula(LU
, LUIdx
, F
);
3835 // If we collected at least two registers, generate a formula combining them.
3836 if (Ops
.size() > 1) {
3837 SmallVector
<const SCEV
*, 4> OpsCopy(Ops
); // Don't let SE modify Ops.
3838 GenerateFormula(SE
.getAddExpr(OpsCopy
));
3841 // If we have an unfolded offset, generate a formula combining it with the
3842 // registers collected.
3843 if (NewBase
.UnfoldedOffset
) {
3844 assert(CombinedIntegerType
&& "Missing a type for the unfolded offset");
3845 Ops
.push_back(SE
.getConstant(CombinedIntegerType
, NewBase
.UnfoldedOffset
,
3847 NewBase
.UnfoldedOffset
= 0;
3848 GenerateFormula(SE
.getAddExpr(Ops
));
3852 /// Helper function for LSRInstance::GenerateSymbolicOffsets.
3853 void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse
&LU
, unsigned LUIdx
,
3854 const Formula
&Base
, size_t Idx
,
3856 const SCEV
*G
= IsScaledReg
? Base
.ScaledReg
: Base
.BaseRegs
[Idx
];
3857 GlobalValue
*GV
= ExtractSymbol(G
, SE
);
3858 if (G
->isZero() || !GV
)
3862 if (!isLegalUse(TTI
, LU
.MinOffset
, LU
.MaxOffset
, LU
.Kind
, LU
.AccessTy
, F
))
3867 F
.BaseRegs
[Idx
] = G
;
3868 (void)InsertFormula(LU
, LUIdx
, F
);
3871 /// Generate reuse formulae using symbolic offsets.
3872 void LSRInstance::GenerateSymbolicOffsets(LSRUse
&LU
, unsigned LUIdx
,
3874 // We can't add a symbolic offset if the address already contains one.
3875 if (Base
.BaseGV
) return;
3877 for (size_t i
= 0, e
= Base
.BaseRegs
.size(); i
!= e
; ++i
)
3878 GenerateSymbolicOffsetsImpl(LU
, LUIdx
, Base
, i
);
3879 if (Base
.Scale
== 1)
3880 GenerateSymbolicOffsetsImpl(LU
, LUIdx
, Base
, /* Idx */ -1,
3881 /* IsScaledReg */ true);
3884 /// Helper function for LSRInstance::GenerateConstantOffsets.
3885 void LSRInstance::GenerateConstantOffsetsImpl(
3886 LSRUse
&LU
, unsigned LUIdx
, const Formula
&Base
,
3887 const SmallVectorImpl
<int64_t> &Worklist
, size_t Idx
, bool IsScaledReg
) {
3889 auto GenerateOffset
= [&](const SCEV
*G
, int64_t Offset
) {
3891 F
.BaseOffset
= (uint64_t)Base
.BaseOffset
- Offset
;
3893 if (isLegalUse(TTI
, LU
.MinOffset
, LU
.MaxOffset
, LU
.Kind
, LU
.AccessTy
, F
)) {
3894 // Add the offset to the base register.
3895 const SCEV
*NewG
= SE
.getAddExpr(SE
.getConstant(G
->getType(), Offset
), G
);
3896 // If it cancelled out, drop the base register, otherwise update it.
3897 if (NewG
->isZero()) {
3900 F
.ScaledReg
= nullptr;
3902 F
.deleteBaseReg(F
.BaseRegs
[Idx
]);
3904 } else if (IsScaledReg
)
3907 F
.BaseRegs
[Idx
] = NewG
;
3909 (void)InsertFormula(LU
, LUIdx
, F
);
3913 const SCEV
*G
= IsScaledReg
? Base
.ScaledReg
: Base
.BaseRegs
[Idx
];
3915 // With constant offsets and constant steps, we can generate pre-inc
3916 // accesses by having the offset equal the step. So, for access #0 with a
3917 // step of 8, we generate a G - 8 base which would require the first access
3918 // to be ((G - 8) + 8),+,8. The pre-indexed access then updates the pointer
3919 // for itself and hopefully becomes the base for other accesses. This means
3920 // means that a single pre-indexed access can be generated to become the new
3921 // base pointer for each iteration of the loop, resulting in no extra add/sub
3922 // instructions for pointer updating.
3923 if (AMK
== TTI::AMK_PreIndexed
&& LU
.Kind
== LSRUse::Address
) {
3924 if (auto *GAR
= dyn_cast
<SCEVAddRecExpr
>(G
)) {
3926 dyn_cast
<SCEVConstant
>(GAR
->getStepRecurrence(SE
))) {
3927 const APInt
&StepInt
= StepRec
->getAPInt();
3928 int64_t Step
= StepInt
.isNegative() ?
3929 StepInt
.getSExtValue() : StepInt
.getZExtValue();
3931 for (int64_t Offset
: Worklist
) {
3933 GenerateOffset(G
, Offset
);
3938 for (int64_t Offset
: Worklist
)
3939 GenerateOffset(G
, Offset
);
3941 int64_t Imm
= ExtractImmediate(G
, SE
);
3942 if (G
->isZero() || Imm
== 0)
3945 F
.BaseOffset
= (uint64_t)F
.BaseOffset
+ Imm
;
3946 if (!isLegalUse(TTI
, LU
.MinOffset
, LU
.MaxOffset
, LU
.Kind
, LU
.AccessTy
, F
))
3951 F
.BaseRegs
[Idx
] = G
;
3952 // We may generate non canonical Formula if G is a recurrent expr reg
3953 // related with current loop while F.ScaledReg is not.
3956 (void)InsertFormula(LU
, LUIdx
, F
);
3959 /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets.
3960 void LSRInstance::GenerateConstantOffsets(LSRUse
&LU
, unsigned LUIdx
,
3962 // TODO: For now, just add the min and max offset, because it usually isn't
3963 // worthwhile looking at everything inbetween.
3964 SmallVector
<int64_t, 2> Worklist
;
3965 Worklist
.push_back(LU
.MinOffset
);
3966 if (LU
.MaxOffset
!= LU
.MinOffset
)
3967 Worklist
.push_back(LU
.MaxOffset
);
3969 for (size_t i
= 0, e
= Base
.BaseRegs
.size(); i
!= e
; ++i
)
3970 GenerateConstantOffsetsImpl(LU
, LUIdx
, Base
, Worklist
, i
);
3971 if (Base
.Scale
== 1)
3972 GenerateConstantOffsetsImpl(LU
, LUIdx
, Base
, Worklist
, /* Idx */ -1,
3973 /* IsScaledReg */ true);
3976 /// For ICmpZero, check to see if we can scale up the comparison. For example, x
3977 /// == y -> x*c == y*c.
3978 void LSRInstance::GenerateICmpZeroScales(LSRUse
&LU
, unsigned LUIdx
,
3980 if (LU
.Kind
!= LSRUse::ICmpZero
) return;
3982 // Determine the integer type for the base formula.
3983 Type
*IntTy
= Base
.getType();
3985 if (SE
.getTypeSizeInBits(IntTy
) > 64) return;
3987 // Don't do this if there is more than one offset.
3988 if (LU
.MinOffset
!= LU
.MaxOffset
) return;
3990 // Check if transformation is valid. It is illegal to multiply pointer.
3991 if (Base
.ScaledReg
&& Base
.ScaledReg
->getType()->isPointerTy())
3993 for (const SCEV
*BaseReg
: Base
.BaseRegs
)
3994 if (BaseReg
->getType()->isPointerTy())
3996 assert(!Base
.BaseGV
&& "ICmpZero use is not legal!");
3998 // Check each interesting stride.
3999 for (int64_t Factor
: Factors
) {
4000 // Check that Factor can be represented by IntTy
4001 if (!ConstantInt::isValueValidForType(IntTy
, Factor
))
4003 // Check that the multiplication doesn't overflow.
4004 if (Base
.BaseOffset
== std::numeric_limits
<int64_t>::min() && Factor
== -1)
4006 int64_t NewBaseOffset
= (uint64_t)Base
.BaseOffset
* Factor
;
4007 assert(Factor
!= 0 && "Zero factor not expected!");
4008 if (NewBaseOffset
/ Factor
!= Base
.BaseOffset
)
4010 // If the offset will be truncated at this use, check that it is in bounds.
4011 if (!IntTy
->isPointerTy() &&
4012 !ConstantInt::isValueValidForType(IntTy
, NewBaseOffset
))
4015 // Check that multiplying with the use offset doesn't overflow.
4016 int64_t Offset
= LU
.MinOffset
;
4017 if (Offset
== std::numeric_limits
<int64_t>::min() && Factor
== -1)
4019 Offset
= (uint64_t)Offset
* Factor
;
4020 if (Offset
/ Factor
!= LU
.MinOffset
)
4022 // If the offset will be truncated at this use, check that it is in bounds.
4023 if (!IntTy
->isPointerTy() &&
4024 !ConstantInt::isValueValidForType(IntTy
, Offset
))
4028 F
.BaseOffset
= NewBaseOffset
;
4030 // Check that this scale is legal.
4031 if (!isLegalUse(TTI
, Offset
, Offset
, LU
.Kind
, LU
.AccessTy
, F
))
4034 // Compensate for the use having MinOffset built into it.
4035 F
.BaseOffset
= (uint64_t)F
.BaseOffset
+ Offset
- LU
.MinOffset
;
4037 const SCEV
*FactorS
= SE
.getConstant(IntTy
, Factor
);
4039 // Check that multiplying with each base register doesn't overflow.
4040 for (size_t i
= 0, e
= F
.BaseRegs
.size(); i
!= e
; ++i
) {
4041 F
.BaseRegs
[i
] = SE
.getMulExpr(F
.BaseRegs
[i
], FactorS
);
4042 if (getExactSDiv(F
.BaseRegs
[i
], FactorS
, SE
) != Base
.BaseRegs
[i
])
4046 // Check that multiplying with the scaled register doesn't overflow.
4048 F
.ScaledReg
= SE
.getMulExpr(F
.ScaledReg
, FactorS
);
4049 if (getExactSDiv(F
.ScaledReg
, FactorS
, SE
) != Base
.ScaledReg
)
4053 // Check that multiplying with the unfolded offset doesn't overflow.
4054 if (F
.UnfoldedOffset
!= 0) {
4055 if (F
.UnfoldedOffset
== std::numeric_limits
<int64_t>::min() &&
4058 F
.UnfoldedOffset
= (uint64_t)F
.UnfoldedOffset
* Factor
;
4059 if (F
.UnfoldedOffset
/ Factor
!= Base
.UnfoldedOffset
)
4061 // If the offset will be truncated, check that it is in bounds.
4062 if (!IntTy
->isPointerTy() &&
4063 !ConstantInt::isValueValidForType(IntTy
, F
.UnfoldedOffset
))
4067 // If we make it here and it's legal, add it.
4068 (void)InsertFormula(LU
, LUIdx
, F
);
4073 /// Generate stride factor reuse formulae by making use of scaled-offset address
4074 /// modes, for example.
4075 void LSRInstance::GenerateScales(LSRUse
&LU
, unsigned LUIdx
, Formula Base
) {
4076 // Determine the integer type for the base formula.
4077 Type
*IntTy
= Base
.getType();
4080 // If this Formula already has a scaled register, we can't add another one.
4081 // Try to unscale the formula to generate a better scale.
4082 if (Base
.Scale
!= 0 && !Base
.unscale())
4085 assert(Base
.Scale
== 0 && "unscale did not did its job!");
4087 // Check each interesting stride.
4088 for (int64_t Factor
: Factors
) {
4089 Base
.Scale
= Factor
;
4090 Base
.HasBaseReg
= Base
.BaseRegs
.size() > 1;
4091 // Check whether this scale is going to be legal.
4092 if (!isLegalUse(TTI
, LU
.MinOffset
, LU
.MaxOffset
, LU
.Kind
, LU
.AccessTy
,
4094 // As a special-case, handle special out-of-loop Basic users specially.
4095 // TODO: Reconsider this special case.
4096 if (LU
.Kind
== LSRUse::Basic
&&
4097 isLegalUse(TTI
, LU
.MinOffset
, LU
.MaxOffset
, LSRUse::Special
,
4098 LU
.AccessTy
, Base
) &&
4099 LU
.AllFixupsOutsideLoop
)
4100 LU
.Kind
= LSRUse::Special
;
4104 // For an ICmpZero, negating a solitary base register won't lead to
4106 if (LU
.Kind
== LSRUse::ICmpZero
&&
4107 !Base
.HasBaseReg
&& Base
.BaseOffset
== 0 && !Base
.BaseGV
)
4109 // For each addrec base reg, if its loop is current loop, apply the scale.
4110 for (size_t i
= 0, e
= Base
.BaseRegs
.size(); i
!= e
; ++i
) {
4111 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Base
.BaseRegs
[i
]);
4112 if (AR
&& (AR
->getLoop() == L
|| LU
.AllFixupsOutsideLoop
)) {
4113 const SCEV
*FactorS
= SE
.getConstant(IntTy
, Factor
);
4114 if (FactorS
->isZero())
4116 // Divide out the factor, ignoring high bits, since we'll be
4117 // scaling the value back up in the end.
4118 if (const SCEV
*Quotient
= getExactSDiv(AR
, FactorS
, SE
, true))
4119 if (!Quotient
->isZero()) {
4120 // TODO: This could be optimized to avoid all the copying.
4122 F
.ScaledReg
= Quotient
;
4123 F
.deleteBaseReg(F
.BaseRegs
[i
]);
4124 // The canonical representation of 1*reg is reg, which is already in
4125 // Base. In that case, do not try to insert the formula, it will be
4127 if (F
.Scale
== 1 && (F
.BaseRegs
.empty() ||
4128 (AR
->getLoop() != L
&& LU
.AllFixupsOutsideLoop
)))
4130 // If AllFixupsOutsideLoop is true and F.Scale is 1, we may generate
4131 // non canonical Formula with ScaledReg's loop not being L.
4132 if (F
.Scale
== 1 && LU
.AllFixupsOutsideLoop
)
4134 (void)InsertFormula(LU
, LUIdx
, F
);
4141 /// Extend/Truncate \p Expr to \p ToTy considering post-inc uses in \p Loops.
4142 /// For all PostIncLoopSets in \p Loops, first de-normalize \p Expr, then
4143 /// perform the extension/truncate and normalize again, as the normalized form
4144 /// can result in folds that are not valid in the post-inc use contexts. The
4145 /// expressions for all PostIncLoopSets must match, otherwise return nullptr.
4147 getAnyExtendConsideringPostIncUses(ArrayRef
<PostIncLoopSet
> Loops
,
4148 const SCEV
*Expr
, Type
*ToTy
,
4149 ScalarEvolution
&SE
) {
4150 const SCEV
*Result
= nullptr;
4151 for (auto &L
: Loops
) {
4152 auto *DenormExpr
= denormalizeForPostIncUse(Expr
, L
, SE
);
4153 const SCEV
*NewDenormExpr
= SE
.getAnyExtendExpr(DenormExpr
, ToTy
);
4154 const SCEV
*New
= normalizeForPostIncUse(NewDenormExpr
, L
, SE
);
4155 if (!New
|| (Result
&& New
!= Result
))
4160 assert(Result
&& "failed to create expression");
4164 /// Generate reuse formulae from different IV types.
4165 void LSRInstance::GenerateTruncates(LSRUse
&LU
, unsigned LUIdx
, Formula Base
) {
4166 // Don't bother truncating symbolic values.
4167 if (Base
.BaseGV
) return;
4169 // Determine the integer type for the base formula.
4170 Type
*DstTy
= Base
.getType();
4172 if (DstTy
->isPointerTy())
4175 // It is invalid to extend a pointer type so exit early if ScaledReg or
4176 // any of the BaseRegs are pointers.
4177 if (Base
.ScaledReg
&& Base
.ScaledReg
->getType()->isPointerTy())
4179 if (any_of(Base
.BaseRegs
,
4180 [](const SCEV
*S
) { return S
->getType()->isPointerTy(); }))
4183 SmallVector
<PostIncLoopSet
> Loops
;
4184 for (auto &LF
: LU
.Fixups
)
4185 Loops
.push_back(LF
.PostIncLoops
);
4187 for (Type
*SrcTy
: Types
) {
4188 if (SrcTy
!= DstTy
&& TTI
.isTruncateFree(SrcTy
, DstTy
)) {
4191 // Sometimes SCEV is able to prove zero during ext transform. It may
4192 // happen if SCEV did not do all possible transforms while creating the
4193 // initial node (maybe due to depth limitations), but it can do them while
4196 const SCEV
*NewScaledReg
=
4197 getAnyExtendConsideringPostIncUses(Loops
, F
.ScaledReg
, SrcTy
, SE
);
4198 if (!NewScaledReg
|| NewScaledReg
->isZero())
4200 F
.ScaledReg
= NewScaledReg
;
4202 bool HasZeroBaseReg
= false;
4203 for (const SCEV
*&BaseReg
: F
.BaseRegs
) {
4204 const SCEV
*NewBaseReg
=
4205 getAnyExtendConsideringPostIncUses(Loops
, BaseReg
, SrcTy
, SE
);
4206 if (!NewBaseReg
|| NewBaseReg
->isZero()) {
4207 HasZeroBaseReg
= true;
4210 BaseReg
= NewBaseReg
;
4215 // TODO: This assumes we've done basic processing on all uses and
4216 // have an idea what the register usage is.
4217 if (!F
.hasRegsUsedByUsesOtherThan(LUIdx
, RegUses
))
4221 (void)InsertFormula(LU
, LUIdx
, F
);
4228 /// Helper class for GenerateCrossUseConstantOffsets. It's used to defer
4229 /// modifications so that the search phase doesn't have to worry about the data
4230 /// structures moving underneath it.
4234 const SCEV
*OrigReg
;
4236 WorkItem(size_t LI
, int64_t I
, const SCEV
*R
)
4237 : LUIdx(LI
), Imm(I
), OrigReg(R
) {}
4239 void print(raw_ostream
&OS
) const;
4243 } // end anonymous namespace
4245 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4246 void WorkItem::print(raw_ostream
&OS
) const {
4247 OS
<< "in formulae referencing " << *OrigReg
<< " in use " << LUIdx
4248 << " , add offset " << Imm
;
4251 LLVM_DUMP_METHOD
void WorkItem::dump() const {
4252 print(errs()); errs() << '\n';
4256 /// Look for registers which are a constant distance apart and try to form reuse
4257 /// opportunities between them.
4258 void LSRInstance::GenerateCrossUseConstantOffsets() {
4259 // Group the registers by their value without any added constant offset.
4260 using ImmMapTy
= std::map
<int64_t, const SCEV
*>;
4262 DenseMap
<const SCEV
*, ImmMapTy
> Map
;
4263 DenseMap
<const SCEV
*, SmallBitVector
> UsedByIndicesMap
;
4264 SmallVector
<const SCEV
*, 8> Sequence
;
4265 for (const SCEV
*Use
: RegUses
) {
4266 const SCEV
*Reg
= Use
; // Make a copy for ExtractImmediate to modify.
4267 int64_t Imm
= ExtractImmediate(Reg
, SE
);
4268 auto Pair
= Map
.insert(std::make_pair(Reg
, ImmMapTy()));
4270 Sequence
.push_back(Reg
);
4271 Pair
.first
->second
.insert(std::make_pair(Imm
, Use
));
4272 UsedByIndicesMap
[Reg
] |= RegUses
.getUsedByIndices(Use
);
4275 // Now examine each set of registers with the same base value. Build up
4276 // a list of work to do and do the work in a separate step so that we're
4277 // not adding formulae and register counts while we're searching.
4278 SmallVector
<WorkItem
, 32> WorkItems
;
4279 SmallSet
<std::pair
<size_t, int64_t>, 32> UniqueItems
;
4280 for (const SCEV
*Reg
: Sequence
) {
4281 const ImmMapTy
&Imms
= Map
.find(Reg
)->second
;
4283 // It's not worthwhile looking for reuse if there's only one offset.
4284 if (Imms
.size() == 1)
4287 LLVM_DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg
<< ':';
4288 for (const auto &Entry
4290 << ' ' << Entry
.first
;
4293 // Examine each offset.
4294 for (ImmMapTy::const_iterator J
= Imms
.begin(), JE
= Imms
.end();
4296 const SCEV
*OrigReg
= J
->second
;
4298 int64_t JImm
= J
->first
;
4299 const SmallBitVector
&UsedByIndices
= RegUses
.getUsedByIndices(OrigReg
);
4301 if (!isa
<SCEVConstant
>(OrigReg
) &&
4302 UsedByIndicesMap
[Reg
].count() == 1) {
4303 LLVM_DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg
4308 // Conservatively examine offsets between this orig reg a few selected
4310 int64_t First
= Imms
.begin()->first
;
4311 int64_t Last
= std::prev(Imms
.end())->first
;
4312 // Compute (First + Last) / 2 without overflow using the fact that
4313 // First + Last = 2 * (First + Last) + (First ^ Last).
4314 int64_t Avg
= (First
& Last
) + ((First
^ Last
) >> 1);
4315 // If the result is negative and First is odd and Last even (or vice versa),
4316 // we rounded towards -inf. Add 1 in that case, to round towards 0.
4317 Avg
= Avg
+ ((First
^ Last
) & ((uint64_t)Avg
>> 63));
4318 ImmMapTy::const_iterator OtherImms
[] = {
4319 Imms
.begin(), std::prev(Imms
.end()),
4320 Imms
.lower_bound(Avg
)};
4321 for (const auto &M
: OtherImms
) {
4322 if (M
== J
|| M
== JE
) continue;
4324 // Compute the difference between the two.
4325 int64_t Imm
= (uint64_t)JImm
- M
->first
;
4326 for (unsigned LUIdx
: UsedByIndices
.set_bits())
4327 // Make a memo of this use, offset, and register tuple.
4328 if (UniqueItems
.insert(std::make_pair(LUIdx
, Imm
)).second
)
4329 WorkItems
.push_back(WorkItem(LUIdx
, Imm
, OrigReg
));
4336 UsedByIndicesMap
.clear();
4337 UniqueItems
.clear();
4339 // Now iterate through the worklist and add new formulae.
4340 for (const WorkItem
&WI
: WorkItems
) {
4341 size_t LUIdx
= WI
.LUIdx
;
4342 LSRUse
&LU
= Uses
[LUIdx
];
4343 int64_t Imm
= WI
.Imm
;
4344 const SCEV
*OrigReg
= WI
.OrigReg
;
4346 Type
*IntTy
= SE
.getEffectiveSCEVType(OrigReg
->getType());
4347 const SCEV
*NegImmS
= SE
.getSCEV(ConstantInt::get(IntTy
, -(uint64_t)Imm
));
4348 unsigned BitWidth
= SE
.getTypeSizeInBits(IntTy
);
4350 // TODO: Use a more targeted data structure.
4351 for (size_t L
= 0, LE
= LU
.Formulae
.size(); L
!= LE
; ++L
) {
4352 Formula F
= LU
.Formulae
[L
];
4353 // FIXME: The code for the scaled and unscaled registers looks
4354 // very similar but slightly different. Investigate if they
4355 // could be merged. That way, we would not have to unscale the
4358 // Use the immediate in the scaled register.
4359 if (F
.ScaledReg
== OrigReg
) {
4360 int64_t Offset
= (uint64_t)F
.BaseOffset
+ Imm
* (uint64_t)F
.Scale
;
4361 // Don't create 50 + reg(-50).
4362 if (F
.referencesReg(SE
.getSCEV(
4363 ConstantInt::get(IntTy
, -(uint64_t)Offset
))))
4366 NewF
.BaseOffset
= Offset
;
4367 if (!isLegalUse(TTI
, LU
.MinOffset
, LU
.MaxOffset
, LU
.Kind
, LU
.AccessTy
,
4370 NewF
.ScaledReg
= SE
.getAddExpr(NegImmS
, NewF
.ScaledReg
);
4372 // If the new scale is a constant in a register, and adding the constant
4373 // value to the immediate would produce a value closer to zero than the
4374 // immediate itself, then the formula isn't worthwhile.
4375 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(NewF
.ScaledReg
))
4376 if (C
->getValue()->isNegative() != (NewF
.BaseOffset
< 0) &&
4377 (C
->getAPInt().abs() * APInt(BitWidth
, F
.Scale
))
4378 .ule(std::abs(NewF
.BaseOffset
)))
4382 NewF
.canonicalize(*this->L
);
4383 (void)InsertFormula(LU
, LUIdx
, NewF
);
4385 // Use the immediate in a base register.
4386 for (size_t N
= 0, NE
= F
.BaseRegs
.size(); N
!= NE
; ++N
) {
4387 const SCEV
*BaseReg
= F
.BaseRegs
[N
];
4388 if (BaseReg
!= OrigReg
)
4391 NewF
.BaseOffset
= (uint64_t)NewF
.BaseOffset
+ Imm
;
4392 if (!isLegalUse(TTI
, LU
.MinOffset
, LU
.MaxOffset
,
4393 LU
.Kind
, LU
.AccessTy
, NewF
)) {
4394 if (AMK
== TTI::AMK_PostIndexed
&&
4395 mayUsePostIncMode(TTI
, LU
, OrigReg
, this->L
, SE
))
4397 if (!TTI
.isLegalAddImmediate((uint64_t)NewF
.UnfoldedOffset
+ Imm
))
4400 NewF
.UnfoldedOffset
= (uint64_t)NewF
.UnfoldedOffset
+ Imm
;
4402 NewF
.BaseRegs
[N
] = SE
.getAddExpr(NegImmS
, BaseReg
);
4404 // If the new formula has a constant in a register, and adding the
4405 // constant value to the immediate would produce a value closer to
4406 // zero than the immediate itself, then the formula isn't worthwhile.
4407 for (const SCEV
*NewReg
: NewF
.BaseRegs
)
4408 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(NewReg
))
4409 if ((C
->getAPInt() + NewF
.BaseOffset
)
4411 .slt(std::abs(NewF
.BaseOffset
)) &&
4412 (C
->getAPInt() + NewF
.BaseOffset
).countr_zero() >=
4413 (unsigned)llvm::countr_zero
<uint64_t>(NewF
.BaseOffset
))
4417 NewF
.canonicalize(*this->L
);
4418 (void)InsertFormula(LU
, LUIdx
, NewF
);
4427 /// Generate formulae for each use.
4429 LSRInstance::GenerateAllReuseFormulae() {
4430 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan
4431 // queries are more precise.
4432 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
4433 LSRUse
&LU
= Uses
[LUIdx
];
4434 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
4435 GenerateReassociations(LU
, LUIdx
, LU
.Formulae
[i
]);
4436 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
4437 GenerateCombinations(LU
, LUIdx
, LU
.Formulae
[i
]);
4439 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
4440 LSRUse
&LU
= Uses
[LUIdx
];
4441 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
4442 GenerateSymbolicOffsets(LU
, LUIdx
, LU
.Formulae
[i
]);
4443 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
4444 GenerateConstantOffsets(LU
, LUIdx
, LU
.Formulae
[i
]);
4445 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
4446 GenerateICmpZeroScales(LU
, LUIdx
, LU
.Formulae
[i
]);
4447 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
4448 GenerateScales(LU
, LUIdx
, LU
.Formulae
[i
]);
4450 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
4451 LSRUse
&LU
= Uses
[LUIdx
];
4452 for (size_t i
= 0, f
= LU
.Formulae
.size(); i
!= f
; ++i
)
4453 GenerateTruncates(LU
, LUIdx
, LU
.Formulae
[i
]);
4456 GenerateCrossUseConstantOffsets();
4458 LLVM_DEBUG(dbgs() << "\n"
4459 "After generating reuse formulae:\n";
4460 print_uses(dbgs()));
4463 /// If there are multiple formulae with the same set of registers used
4464 /// by other uses, pick the best one and delete the others.
4465 void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
4466 DenseSet
<const SCEV
*> VisitedRegs
;
4467 SmallPtrSet
<const SCEV
*, 16> Regs
;
4468 SmallPtrSet
<const SCEV
*, 16> LoserRegs
;
4470 bool ChangedFormulae
= false;
4473 // Collect the best formula for each unique set of shared registers. This
4474 // is reset for each use.
4475 using BestFormulaeTy
=
4476 DenseMap
<SmallVector
<const SCEV
*, 4>, size_t, UniquifierDenseMapInfo
>;
4478 BestFormulaeTy BestFormulae
;
4480 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
4481 LSRUse
&LU
= Uses
[LUIdx
];
4482 LLVM_DEBUG(dbgs() << "Filtering for use "; LU
.print(dbgs());
4486 for (size_t FIdx
= 0, NumForms
= LU
.Formulae
.size();
4487 FIdx
!= NumForms
; ++FIdx
) {
4488 Formula
&F
= LU
.Formulae
[FIdx
];
4490 // Some formulas are instant losers. For example, they may depend on
4491 // nonexistent AddRecs from other loops. These need to be filtered
4492 // immediately, otherwise heuristics could choose them over others leading
4493 // to an unsatisfactory solution. Passing LoserRegs into RateFormula here
4494 // avoids the need to recompute this information across formulae using the
4495 // same bad AddRec. Passing LoserRegs is also essential unless we remove
4496 // the corresponding bad register from the Regs set.
4497 Cost
CostF(L
, SE
, TTI
, AMK
);
4499 CostF
.RateFormula(F
, Regs
, VisitedRegs
, LU
, &LoserRegs
);
4500 if (CostF
.isLoser()) {
4501 // During initial formula generation, undesirable formulae are generated
4502 // by uses within other loops that have some non-trivial address mode or
4503 // use the postinc form of the IV. LSR needs to provide these formulae
4504 // as the basis of rediscovering the desired formula that uses an AddRec
4505 // corresponding to the existing phi. Once all formulae have been
4506 // generated, these initial losers may be pruned.
4507 LLVM_DEBUG(dbgs() << " Filtering loser "; F
.print(dbgs());
4511 SmallVector
<const SCEV
*, 4> Key
;
4512 for (const SCEV
*Reg
: F
.BaseRegs
) {
4513 if (RegUses
.isRegUsedByUsesOtherThan(Reg
, LUIdx
))
4517 RegUses
.isRegUsedByUsesOtherThan(F
.ScaledReg
, LUIdx
))
4518 Key
.push_back(F
.ScaledReg
);
4519 // Unstable sort by host order ok, because this is only used for
4523 std::pair
<BestFormulaeTy::const_iterator
, bool> P
=
4524 BestFormulae
.insert(std::make_pair(Key
, FIdx
));
4528 Formula
&Best
= LU
.Formulae
[P
.first
->second
];
4530 Cost
CostBest(L
, SE
, TTI
, AMK
);
4532 CostBest
.RateFormula(Best
, Regs
, VisitedRegs
, LU
);
4533 if (CostF
.isLess(CostBest
))
4535 LLVM_DEBUG(dbgs() << " Filtering out formula "; F
.print(dbgs());
4537 " in favor of formula ";
4538 Best
.print(dbgs()); dbgs() << '\n');
4541 ChangedFormulae
= true;
4543 LU
.DeleteFormula(F
);
4549 // Now that we've filtered out some formulae, recompute the Regs set.
4551 LU
.RecomputeRegs(LUIdx
, RegUses
);
4553 // Reset this to prepare for the next use.
4554 BestFormulae
.clear();
4557 LLVM_DEBUG(if (ChangedFormulae
) {
4559 "After filtering out undesirable candidates:\n";
4564 /// Estimate the worst-case number of solutions the solver might have to
4565 /// consider. It almost never considers this many solutions because it prune the
4566 /// search space, but the pruning isn't always sufficient.
4567 size_t LSRInstance::EstimateSearchSpaceComplexity() const {
4569 for (const LSRUse
&LU
: Uses
) {
4570 size_t FSize
= LU
.Formulae
.size();
4571 if (FSize
>= ComplexityLimit
) {
4572 Power
= ComplexityLimit
;
4576 if (Power
>= ComplexityLimit
)
4582 /// When one formula uses a superset of the registers of another formula, it
4583 /// won't help reduce register pressure (though it may not necessarily hurt
4584 /// register pressure); remove it to simplify the system.
4585 void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
4586 if (EstimateSearchSpaceComplexity() >= ComplexityLimit
) {
4587 LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
4589 LLVM_DEBUG(dbgs() << "Narrowing the search space by eliminating formulae "
4590 "which use a superset of registers used by other "
4593 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
4594 LSRUse
&LU
= Uses
[LUIdx
];
4596 for (size_t i
= 0, e
= LU
.Formulae
.size(); i
!= e
; ++i
) {
4597 Formula
&F
= LU
.Formulae
[i
];
4598 // Look for a formula with a constant or GV in a register. If the use
4599 // also has a formula with that same value in an immediate field,
4600 // delete the one that uses a register.
4601 for (SmallVectorImpl
<const SCEV
*>::const_iterator
4602 I
= F
.BaseRegs
.begin(), E
= F
.BaseRegs
.end(); I
!= E
; ++I
) {
4603 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(*I
)) {
4605 //FIXME: Formulas should store bitwidth to do wrapping properly.
4607 NewF
.BaseOffset
+= (uint64_t)C
->getValue()->getSExtValue();
4608 NewF
.BaseRegs
.erase(NewF
.BaseRegs
.begin() +
4609 (I
- F
.BaseRegs
.begin()));
4610 if (LU
.HasFormulaWithSameRegs(NewF
)) {
4611 LLVM_DEBUG(dbgs() << " Deleting "; F
.print(dbgs());
4613 LU
.DeleteFormula(F
);
4619 } else if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(*I
)) {
4620 if (GlobalValue
*GV
= dyn_cast
<GlobalValue
>(U
->getValue()))
4624 NewF
.BaseRegs
.erase(NewF
.BaseRegs
.begin() +
4625 (I
- F
.BaseRegs
.begin()));
4626 if (LU
.HasFormulaWithSameRegs(NewF
)) {
4627 LLVM_DEBUG(dbgs() << " Deleting "; F
.print(dbgs());
4629 LU
.DeleteFormula(F
);
4640 LU
.RecomputeRegs(LUIdx
, RegUses
);
4643 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4647 /// When there are many registers for expressions like A, A+1, A+2, etc.,
4648 /// allocate a single register for them.
4649 void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
4650 if (EstimateSearchSpaceComplexity() < ComplexityLimit
)
4654 dbgs() << "The search space is too complex.\n"
4655 "Narrowing the search space by assuming that uses separated "
4656 "by a constant offset will use the same registers.\n");
4658 // This is especially useful for unrolled loops.
4660 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
4661 LSRUse
&LU
= Uses
[LUIdx
];
4662 for (const Formula
&F
: LU
.Formulae
) {
4663 if (F
.BaseOffset
== 0 || (F
.Scale
!= 0 && F
.Scale
!= 1))
4666 LSRUse
*LUThatHas
= FindUseWithSimilarFormula(F
, LU
);
4670 if (!reconcileNewOffset(*LUThatHas
, F
.BaseOffset
, /*HasBaseReg=*/ false,
4671 LU
.Kind
, LU
.AccessTy
))
4674 LLVM_DEBUG(dbgs() << " Deleting use "; LU
.print(dbgs()); dbgs() << '\n');
4676 LUThatHas
->AllFixupsOutsideLoop
&= LU
.AllFixupsOutsideLoop
;
4678 // Transfer the fixups of LU to LUThatHas.
4679 for (LSRFixup
&Fixup
: LU
.Fixups
) {
4680 Fixup
.Offset
+= F
.BaseOffset
;
4681 LUThatHas
->pushFixup(Fixup
);
4682 LLVM_DEBUG(dbgs() << "New fixup has offset " << Fixup
.Offset
<< '\n');
4685 // Delete formulae from the new use which are no longer legal.
4687 for (size_t i
= 0, e
= LUThatHas
->Formulae
.size(); i
!= e
; ++i
) {
4688 Formula
&F
= LUThatHas
->Formulae
[i
];
4689 if (!isLegalUse(TTI
, LUThatHas
->MinOffset
, LUThatHas
->MaxOffset
,
4690 LUThatHas
->Kind
, LUThatHas
->AccessTy
, F
)) {
4691 LLVM_DEBUG(dbgs() << " Deleting "; F
.print(dbgs()); dbgs() << '\n');
4692 LUThatHas
->DeleteFormula(F
);
4700 LUThatHas
->RecomputeRegs(LUThatHas
- &Uses
.front(), RegUses
);
4702 // Delete the old use.
4703 DeleteUse(LU
, LUIdx
);
4710 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4713 /// Call FilterOutUndesirableDedicatedRegisters again, if necessary, now that
4714 /// we've done more filtering, as it may be able to find more formulae to
4716 void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){
4717 if (EstimateSearchSpaceComplexity() >= ComplexityLimit
) {
4718 LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
4720 LLVM_DEBUG(dbgs() << "Narrowing the search space by re-filtering out "
4721 "undesirable dedicated registers.\n");
4723 FilterOutUndesirableDedicatedRegisters();
4725 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4729 /// If a LSRUse has multiple formulae with the same ScaledReg and Scale.
4730 /// Pick the best one and delete the others.
4731 /// This narrowing heuristic is to keep as many formulae with different
4732 /// Scale and ScaledReg pair as possible while narrowing the search space.
4733 /// The benefit is that it is more likely to find out a better solution
4734 /// from a formulae set with more Scale and ScaledReg variations than
4735 /// a formulae set with the same Scale and ScaledReg. The picking winner
4736 /// reg heuristic will often keep the formulae with the same Scale and
4737 /// ScaledReg and filter others, and we want to avoid that if possible.
4738 void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() {
4739 if (EstimateSearchSpaceComplexity() < ComplexityLimit
)
4743 dbgs() << "The search space is too complex.\n"
4744 "Narrowing the search space by choosing the best Formula "
4745 "from the Formulae with the same Scale and ScaledReg.\n");
4747 // Map the "Scale * ScaledReg" pair to the best formula of current LSRUse.
4748 using BestFormulaeTy
= DenseMap
<std::pair
<const SCEV
*, int64_t>, size_t>;
4750 BestFormulaeTy BestFormulae
;
4752 bool ChangedFormulae
= false;
4754 DenseSet
<const SCEV
*> VisitedRegs
;
4755 SmallPtrSet
<const SCEV
*, 16> Regs
;
4757 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
4758 LSRUse
&LU
= Uses
[LUIdx
];
4759 LLVM_DEBUG(dbgs() << "Filtering for use "; LU
.print(dbgs());
4762 // Return true if Formula FA is better than Formula FB.
4763 auto IsBetterThan
= [&](Formula
&FA
, Formula
&FB
) {
4764 // First we will try to choose the Formula with fewer new registers.
4765 // For a register used by current Formula, the more the register is
4766 // shared among LSRUses, the less we increase the register number
4767 // counter of the formula.
4768 size_t FARegNum
= 0;
4769 for (const SCEV
*Reg
: FA
.BaseRegs
) {
4770 const SmallBitVector
&UsedByIndices
= RegUses
.getUsedByIndices(Reg
);
4771 FARegNum
+= (NumUses
- UsedByIndices
.count() + 1);
4773 size_t FBRegNum
= 0;
4774 for (const SCEV
*Reg
: FB
.BaseRegs
) {
4775 const SmallBitVector
&UsedByIndices
= RegUses
.getUsedByIndices(Reg
);
4776 FBRegNum
+= (NumUses
- UsedByIndices
.count() + 1);
4778 if (FARegNum
!= FBRegNum
)
4779 return FARegNum
< FBRegNum
;
4781 // If the new register numbers are the same, choose the Formula with
4783 Cost
CostFA(L
, SE
, TTI
, AMK
);
4784 Cost
CostFB(L
, SE
, TTI
, AMK
);
4786 CostFA
.RateFormula(FA
, Regs
, VisitedRegs
, LU
);
4788 CostFB
.RateFormula(FB
, Regs
, VisitedRegs
, LU
);
4789 return CostFA
.isLess(CostFB
);
4793 for (size_t FIdx
= 0, NumForms
= LU
.Formulae
.size(); FIdx
!= NumForms
;
4795 Formula
&F
= LU
.Formulae
[FIdx
];
4798 auto P
= BestFormulae
.insert({{F
.ScaledReg
, F
.Scale
}, FIdx
});
4802 Formula
&Best
= LU
.Formulae
[P
.first
->second
];
4803 if (IsBetterThan(F
, Best
))
4805 LLVM_DEBUG(dbgs() << " Filtering out formula "; F
.print(dbgs());
4807 " in favor of formula ";
4808 Best
.print(dbgs()); dbgs() << '\n');
4810 ChangedFormulae
= true;
4812 LU
.DeleteFormula(F
);
4818 LU
.RecomputeRegs(LUIdx
, RegUses
);
4820 // Reset this to prepare for the next use.
4821 BestFormulae
.clear();
4824 LLVM_DEBUG(if (ChangedFormulae
) {
4826 "After filtering out undesirable candidates:\n";
4831 /// If we are over the complexity limit, filter out any post-inc prefering
4832 /// variables to only post-inc values.
4833 void LSRInstance::NarrowSearchSpaceByFilterPostInc() {
4834 if (AMK
!= TTI::AMK_PostIndexed
)
4836 if (EstimateSearchSpaceComplexity() < ComplexityLimit
)
4839 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"
4840 "Narrowing the search space by choosing the lowest "
4841 "register Formula for PostInc Uses.\n");
4843 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
4844 LSRUse
&LU
= Uses
[LUIdx
];
4846 if (LU
.Kind
!= LSRUse::Address
)
4848 if (!TTI
.isIndexedLoadLegal(TTI
.MIM_PostInc
, LU
.AccessTy
.getType()) &&
4849 !TTI
.isIndexedStoreLegal(TTI
.MIM_PostInc
, LU
.AccessTy
.getType()))
4852 size_t MinRegs
= std::numeric_limits
<size_t>::max();
4853 for (const Formula
&F
: LU
.Formulae
)
4854 MinRegs
= std::min(F
.getNumRegs(), MinRegs
);
4857 for (size_t FIdx
= 0, NumForms
= LU
.Formulae
.size(); FIdx
!= NumForms
;
4859 Formula
&F
= LU
.Formulae
[FIdx
];
4860 if (F
.getNumRegs() > MinRegs
) {
4861 LLVM_DEBUG(dbgs() << " Filtering out formula "; F
.print(dbgs());
4863 LU
.DeleteFormula(F
);
4870 LU
.RecomputeRegs(LUIdx
, RegUses
);
4872 if (EstimateSearchSpaceComplexity() < ComplexityLimit
)
4876 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4879 /// The function delete formulas with high registers number expectation.
4880 /// Assuming we don't know the value of each formula (already delete
4881 /// all inefficient), generate probability of not selecting for each
4885 /// reg(a) + reg({0,+,1})
4886 /// reg(a) + reg({-1,+,1}) + 1
4889 /// reg(b) + reg({0,+,1})
4890 /// reg(b) + reg({-1,+,1}) + 1
4893 /// reg(c) + reg(b) + reg({0,+,1})
4894 /// reg(c) + reg({b,+,1})
4896 /// Probability of not selecting
4898 /// reg(a) (1/3) * 1 * 1
4899 /// reg(b) 1 * (1/3) * (1/2)
4900 /// reg({0,+,1}) (2/3) * (2/3) * (1/2)
4901 /// reg({-1,+,1}) (2/3) * (2/3) * 1
4902 /// reg({a,+,1}) (2/3) * 1 * 1
4903 /// reg({b,+,1}) 1 * (2/3) * (2/3)
4904 /// reg(c) 1 * 1 * 0
4906 /// Now count registers number mathematical expectation for each formula:
4907 /// Note that for each use we exclude probability if not selecting for the use.
4908 /// For example for Use1 probability for reg(a) would be just 1 * 1 (excluding
4909 /// probabilty 1/3 of not selecting for Use1).
4911 /// reg(a) + reg({0,+,1}) 1 + 1/3 -- to be deleted
4912 /// reg(a) + reg({-1,+,1}) + 1 1 + 4/9 -- to be deleted
4915 /// reg(b) + reg({0,+,1}) 1/2 + 1/3 -- to be deleted
4916 /// reg(b) + reg({-1,+,1}) + 1 1/2 + 2/3 -- to be deleted
4917 /// reg({b,+,1}) 2/3
4919 /// reg(c) + reg(b) + reg({0,+,1}) 1 + 1/3 + 4/9 -- to be deleted
4920 /// reg(c) + reg({b,+,1}) 1 + 2/3
4921 void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() {
4922 if (EstimateSearchSpaceComplexity() < ComplexityLimit
)
4924 // Ok, we have too many of formulae on our hands to conveniently handle.
4925 // Use a rough heuristic to thin out the list.
4927 // Set of Regs wich will be 100% used in final solution.
4928 // Used in each formula of a solution (in example above this is reg(c)).
4929 // We can skip them in calculations.
4930 SmallPtrSet
<const SCEV
*, 4> UniqRegs
;
4931 LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
4933 // Map each register to probability of not selecting
4934 DenseMap
<const SCEV
*, float> RegNumMap
;
4935 for (const SCEV
*Reg
: RegUses
) {
4936 if (UniqRegs
.count(Reg
))
4939 for (const LSRUse
&LU
: Uses
) {
4940 if (!LU
.Regs
.count(Reg
))
4942 float P
= LU
.getNotSelectedProbability(Reg
);
4946 UniqRegs
.insert(Reg
);
4948 RegNumMap
.insert(std::make_pair(Reg
, PNotSel
));
4952 dbgs() << "Narrowing the search space by deleting costly formulas\n");
4954 // Delete formulas where registers number expectation is high.
4955 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
4956 LSRUse
&LU
= Uses
[LUIdx
];
4957 // If nothing to delete - continue.
4958 if (LU
.Formulae
.size() < 2)
4960 // This is temporary solution to test performance. Float should be
4961 // replaced with round independent type (based on integers) to avoid
4962 // different results for different target builds.
4963 float FMinRegNum
= LU
.Formulae
[0].getNumRegs();
4964 float FMinARegNum
= LU
.Formulae
[0].getNumRegs();
4966 for (size_t i
= 0, e
= LU
.Formulae
.size(); i
!= e
; ++i
) {
4967 Formula
&F
= LU
.Formulae
[i
];
4970 for (const SCEV
*BaseReg
: F
.BaseRegs
) {
4971 if (UniqRegs
.count(BaseReg
))
4973 FRegNum
+= RegNumMap
[BaseReg
] / LU
.getNotSelectedProbability(BaseReg
);
4974 if (isa
<SCEVAddRecExpr
>(BaseReg
))
4976 RegNumMap
[BaseReg
] / LU
.getNotSelectedProbability(BaseReg
);
4978 if (const SCEV
*ScaledReg
= F
.ScaledReg
) {
4979 if (!UniqRegs
.count(ScaledReg
)) {
4981 RegNumMap
[ScaledReg
] / LU
.getNotSelectedProbability(ScaledReg
);
4982 if (isa
<SCEVAddRecExpr
>(ScaledReg
))
4984 RegNumMap
[ScaledReg
] / LU
.getNotSelectedProbability(ScaledReg
);
4987 if (FMinRegNum
> FRegNum
||
4988 (FMinRegNum
== FRegNum
&& FMinARegNum
> FARegNum
)) {
4989 FMinRegNum
= FRegNum
;
4990 FMinARegNum
= FARegNum
;
4994 LLVM_DEBUG(dbgs() << " The formula "; LU
.Formulae
[MinIdx
].print(dbgs());
4995 dbgs() << " with min reg num " << FMinRegNum
<< '\n');
4997 std::swap(LU
.Formulae
[MinIdx
], LU
.Formulae
[0]);
4998 while (LU
.Formulae
.size() != 1) {
4999 LLVM_DEBUG(dbgs() << " Deleting "; LU
.Formulae
.back().print(dbgs());
5001 LU
.Formulae
.pop_back();
5003 LU
.RecomputeRegs(LUIdx
, RegUses
);
5004 assert(LU
.Formulae
.size() == 1 && "Should be exactly 1 min regs formula");
5005 Formula
&F
= LU
.Formulae
[0];
5006 LLVM_DEBUG(dbgs() << " Leaving only "; F
.print(dbgs()); dbgs() << '\n');
5007 // When we choose the formula, the regs become unique.
5008 UniqRegs
.insert(F
.BaseRegs
.begin(), F
.BaseRegs
.end());
5010 UniqRegs
.insert(F
.ScaledReg
);
5012 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
5015 // Check if Best and Reg are SCEVs separated by a constant amount C, and if so
5016 // would the addressing offset +C would be legal where the negative offset -C is
5018 static bool IsSimplerBaseSCEVForTarget(const TargetTransformInfo
&TTI
,
5019 ScalarEvolution
&SE
, const SCEV
*Best
,
5021 MemAccessTy AccessType
) {
5022 if (Best
->getType() != Reg
->getType() ||
5023 (isa
<SCEVAddRecExpr
>(Best
) && isa
<SCEVAddRecExpr
>(Reg
) &&
5024 cast
<SCEVAddRecExpr
>(Best
)->getLoop() !=
5025 cast
<SCEVAddRecExpr
>(Reg
)->getLoop()))
5027 const auto *Diff
= dyn_cast
<SCEVConstant
>(SE
.getMinusSCEV(Best
, Reg
));
5031 return TTI
.isLegalAddressingMode(
5032 AccessType
.MemTy
, /*BaseGV=*/nullptr,
5033 /*BaseOffset=*/Diff
->getAPInt().getSExtValue(),
5034 /*HasBaseReg=*/true, /*Scale=*/0, AccessType
.AddrSpace
) &&
5035 !TTI
.isLegalAddressingMode(
5036 AccessType
.MemTy
, /*BaseGV=*/nullptr,
5037 /*BaseOffset=*/-Diff
->getAPInt().getSExtValue(),
5038 /*HasBaseReg=*/true, /*Scale=*/0, AccessType
.AddrSpace
);
5041 /// Pick a register which seems likely to be profitable, and then in any use
5042 /// which has any reference to that register, delete all formulae which do not
5043 /// reference that register.
5044 void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
5045 // With all other options exhausted, loop until the system is simple
5046 // enough to handle.
5047 SmallPtrSet
<const SCEV
*, 4> Taken
;
5048 while (EstimateSearchSpaceComplexity() >= ComplexityLimit
) {
5049 // Ok, we have too many of formulae on our hands to conveniently handle.
5050 // Use a rough heuristic to thin out the list.
5051 LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
5053 // Pick the register which is used by the most LSRUses, which is likely
5054 // to be a good reuse register candidate.
5055 const SCEV
*Best
= nullptr;
5056 unsigned BestNum
= 0;
5057 for (const SCEV
*Reg
: RegUses
) {
5058 if (Taken
.count(Reg
))
5062 BestNum
= RegUses
.getUsedByIndices(Reg
).count();
5064 unsigned Count
= RegUses
.getUsedByIndices(Reg
).count();
5065 if (Count
> BestNum
) {
5070 // If the scores are the same, but the Reg is simpler for the target
5071 // (for example {x,+,1} as opposed to {x+C,+,1}, where the target can
5072 // handle +C but not -C), opt for the simpler formula.
5073 if (Count
== BestNum
) {
5074 int LUIdx
= RegUses
.getUsedByIndices(Reg
).find_first();
5075 if (LUIdx
>= 0 && Uses
[LUIdx
].Kind
== LSRUse::Address
&&
5076 IsSimplerBaseSCEVForTarget(TTI
, SE
, Best
, Reg
,
5077 Uses
[LUIdx
].AccessTy
)) {
5084 assert(Best
&& "Failed to find best LSRUse candidate");
5086 LLVM_DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best
5087 << " will yield profitable reuse.\n");
5090 // In any use with formulae which references this register, delete formulae
5091 // which don't reference it.
5092 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
) {
5093 LSRUse
&LU
= Uses
[LUIdx
];
5094 if (!LU
.Regs
.count(Best
)) continue;
5097 for (size_t i
= 0, e
= LU
.Formulae
.size(); i
!= e
; ++i
) {
5098 Formula
&F
= LU
.Formulae
[i
];
5099 if (!F
.referencesReg(Best
)) {
5100 LLVM_DEBUG(dbgs() << " Deleting "; F
.print(dbgs()); dbgs() << '\n');
5101 LU
.DeleteFormula(F
);
5105 assert(e
!= 0 && "Use has no formulae left! Is Regs inconsistent?");
5111 LU
.RecomputeRegs(LUIdx
, RegUses
);
5114 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
5118 /// If there are an extraordinary number of formulae to choose from, use some
5119 /// rough heuristics to prune down the number of formulae. This keeps the main
5120 /// solver from taking an extraordinary amount of time in some worst-case
5122 void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
5123 NarrowSearchSpaceByDetectingSupersets();
5124 NarrowSearchSpaceByCollapsingUnrolledCode();
5125 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
5126 if (FilterSameScaledReg
)
5127 NarrowSearchSpaceByFilterFormulaWithSameScaledReg();
5128 NarrowSearchSpaceByFilterPostInc();
5130 NarrowSearchSpaceByDeletingCostlyFormulas();
5132 NarrowSearchSpaceByPickingWinnerRegs();
5135 /// This is the recursive solver.
5136 void LSRInstance::SolveRecurse(SmallVectorImpl
<const Formula
*> &Solution
,
5138 SmallVectorImpl
<const Formula
*> &Workspace
,
5139 const Cost
&CurCost
,
5140 const SmallPtrSet
<const SCEV
*, 16> &CurRegs
,
5141 DenseSet
<const SCEV
*> &VisitedRegs
) const {
5144 // - use more aggressive filtering
5145 // - sort the formula so that the most profitable solutions are found first
5146 // - sort the uses too
5148 // - don't compute a cost, and then compare. compare while computing a cost
5150 // - track register sets with SmallBitVector
5152 const LSRUse
&LU
= Uses
[Workspace
.size()];
5154 // If this use references any register that's already a part of the
5155 // in-progress solution, consider it a requirement that a formula must
5156 // reference that register in order to be considered. This prunes out
5157 // unprofitable searching.
5158 SmallSetVector
<const SCEV
*, 4> ReqRegs
;
5159 for (const SCEV
*S
: CurRegs
)
5160 if (LU
.Regs
.count(S
))
5163 SmallPtrSet
<const SCEV
*, 16> NewRegs
;
5164 Cost
NewCost(L
, SE
, TTI
, AMK
);
5165 for (const Formula
&F
: LU
.Formulae
) {
5166 // Ignore formulae which may not be ideal in terms of register reuse of
5167 // ReqRegs. The formula should use all required registers before
5168 // introducing new ones.
5169 // This can sometimes (notably when trying to favour postinc) lead to
5170 // sub-optimial decisions. There it is best left to the cost modelling to
5172 if (AMK
!= TTI::AMK_PostIndexed
|| LU
.Kind
!= LSRUse::Address
) {
5173 int NumReqRegsToFind
= std::min(F
.getNumRegs(), ReqRegs
.size());
5174 for (const SCEV
*Reg
: ReqRegs
) {
5175 if ((F
.ScaledReg
&& F
.ScaledReg
== Reg
) ||
5176 is_contained(F
.BaseRegs
, Reg
)) {
5178 if (NumReqRegsToFind
== 0)
5182 if (NumReqRegsToFind
!= 0) {
5183 // If none of the formulae satisfied the required registers, then we could
5184 // clear ReqRegs and try again. Currently, we simply give up in this case.
5189 // Evaluate the cost of the current formula. If it's already worse than
5190 // the current best, prune the search at that point.
5193 NewCost
.RateFormula(F
, NewRegs
, VisitedRegs
, LU
);
5194 if (NewCost
.isLess(SolutionCost
)) {
5195 Workspace
.push_back(&F
);
5196 if (Workspace
.size() != Uses
.size()) {
5197 SolveRecurse(Solution
, SolutionCost
, Workspace
, NewCost
,
5198 NewRegs
, VisitedRegs
);
5199 if (F
.getNumRegs() == 1 && Workspace
.size() == 1)
5200 VisitedRegs
.insert(F
.ScaledReg
? F
.ScaledReg
: F
.BaseRegs
[0]);
5202 LLVM_DEBUG(dbgs() << "New best at "; NewCost
.print(dbgs());
5203 dbgs() << ".\nRegs:\n";
5204 for (const SCEV
*S
: NewRegs
) dbgs()
5205 << "- " << *S
<< "\n";
5208 SolutionCost
= NewCost
;
5209 Solution
= Workspace
;
5211 Workspace
.pop_back();
5216 /// Choose one formula from each use. Return the results in the given Solution
5218 void LSRInstance::Solve(SmallVectorImpl
<const Formula
*> &Solution
) const {
5219 SmallVector
<const Formula
*, 8> Workspace
;
5220 Cost
SolutionCost(L
, SE
, TTI
, AMK
);
5221 SolutionCost
.Lose();
5222 Cost
CurCost(L
, SE
, TTI
, AMK
);
5223 SmallPtrSet
<const SCEV
*, 16> CurRegs
;
5224 DenseSet
<const SCEV
*> VisitedRegs
;
5225 Workspace
.reserve(Uses
.size());
5227 // SolveRecurse does all the work.
5228 SolveRecurse(Solution
, SolutionCost
, Workspace
, CurCost
,
5229 CurRegs
, VisitedRegs
);
5230 if (Solution
.empty()) {
5231 LLVM_DEBUG(dbgs() << "\nNo Satisfactory Solution\n");
5235 // Ok, we've now made all our decisions.
5236 LLVM_DEBUG(dbgs() << "\n"
5237 "The chosen solution requires ";
5238 SolutionCost
.print(dbgs()); dbgs() << ":\n";
5239 for (size_t i
= 0, e
= Uses
.size(); i
!= e
; ++i
) {
5241 Uses
[i
].print(dbgs());
5244 Solution
[i
]->print(dbgs());
5248 assert(Solution
.size() == Uses
.size() && "Malformed solution!");
5250 if (BaselineCost
.isLess(SolutionCost
)) {
5251 LLVM_DEBUG(dbgs() << "The baseline solution requires ";
5252 BaselineCost
.print(dbgs()); dbgs() << "\n");
5253 if (!AllowDropSolutionIfLessProfitable
)
5255 dbgs() << "Baseline is more profitable than chosen solution, "
5256 "add option 'lsr-drop-solution' to drop LSR solution.\n");
5258 LLVM_DEBUG(dbgs() << "Baseline is more profitable than chosen "
5259 "solution, dropping LSR solution.\n";);
5265 /// Helper for AdjustInsertPositionForExpand. Climb up the dominator tree far as
5266 /// we can go while still being dominated by the input positions. This helps
5267 /// canonicalize the insert position, which encourages sharing.
5268 BasicBlock::iterator
5269 LSRInstance::HoistInsertPosition(BasicBlock::iterator IP
,
5270 const SmallVectorImpl
<Instruction
*> &Inputs
)
5272 Instruction
*Tentative
= &*IP
;
5274 bool AllDominate
= true;
5275 Instruction
*BetterPos
= nullptr;
5276 // Don't bother attempting to insert before a catchswitch, their basic block
5277 // cannot have other non-PHI instructions.
5278 if (isa
<CatchSwitchInst
>(Tentative
))
5281 for (Instruction
*Inst
: Inputs
) {
5282 if (Inst
== Tentative
|| !DT
.dominates(Inst
, Tentative
)) {
5283 AllDominate
= false;
5286 // Attempt to find an insert position in the middle of the block,
5287 // instead of at the end, so that it can be used for other expansions.
5288 if (Tentative
->getParent() == Inst
->getParent() &&
5289 (!BetterPos
|| !DT
.dominates(Inst
, BetterPos
)))
5290 BetterPos
= &*std::next(BasicBlock::iterator(Inst
));
5295 IP
= BetterPos
->getIterator();
5297 IP
= Tentative
->getIterator();
5299 const Loop
*IPLoop
= LI
.getLoopFor(IP
->getParent());
5300 unsigned IPLoopDepth
= IPLoop
? IPLoop
->getLoopDepth() : 0;
5303 for (DomTreeNode
*Rung
= DT
.getNode(IP
->getParent()); ; ) {
5304 if (!Rung
) return IP
;
5305 Rung
= Rung
->getIDom();
5306 if (!Rung
) return IP
;
5307 IDom
= Rung
->getBlock();
5309 // Don't climb into a loop though.
5310 const Loop
*IDomLoop
= LI
.getLoopFor(IDom
);
5311 unsigned IDomDepth
= IDomLoop
? IDomLoop
->getLoopDepth() : 0;
5312 if (IDomDepth
<= IPLoopDepth
&&
5313 (IDomDepth
!= IPLoopDepth
|| IDomLoop
== IPLoop
))
5317 Tentative
= IDom
->getTerminator();
5323 /// Determine an input position which will be dominated by the operands and
5324 /// which will dominate the result.
5325 BasicBlock::iterator
LSRInstance::AdjustInsertPositionForExpand(
5326 BasicBlock::iterator LowestIP
, const LSRFixup
&LF
, const LSRUse
&LU
) const {
5327 // Collect some instructions which must be dominated by the
5328 // expanding replacement. These must be dominated by any operands that
5329 // will be required in the expansion.
5330 SmallVector
<Instruction
*, 4> Inputs
;
5331 if (Instruction
*I
= dyn_cast
<Instruction
>(LF
.OperandValToReplace
))
5332 Inputs
.push_back(I
);
5333 if (LU
.Kind
== LSRUse::ICmpZero
)
5334 if (Instruction
*I
=
5335 dyn_cast
<Instruction
>(cast
<ICmpInst
>(LF
.UserInst
)->getOperand(1)))
5336 Inputs
.push_back(I
);
5337 if (LF
.PostIncLoops
.count(L
)) {
5338 if (LF
.isUseFullyOutsideLoop(L
))
5339 Inputs
.push_back(L
->getLoopLatch()->getTerminator());
5341 Inputs
.push_back(IVIncInsertPos
);
5343 // The expansion must also be dominated by the increment positions of any
5344 // loops it for which it is using post-inc mode.
5345 for (const Loop
*PIL
: LF
.PostIncLoops
) {
5346 if (PIL
== L
) continue;
5348 // Be dominated by the loop exit.
5349 SmallVector
<BasicBlock
*, 4> ExitingBlocks
;
5350 PIL
->getExitingBlocks(ExitingBlocks
);
5351 if (!ExitingBlocks
.empty()) {
5352 BasicBlock
*BB
= ExitingBlocks
[0];
5353 for (unsigned i
= 1, e
= ExitingBlocks
.size(); i
!= e
; ++i
)
5354 BB
= DT
.findNearestCommonDominator(BB
, ExitingBlocks
[i
]);
5355 Inputs
.push_back(BB
->getTerminator());
5359 assert(!isa
<PHINode
>(LowestIP
) && !LowestIP
->isEHPad()
5360 && !isa
<DbgInfoIntrinsic
>(LowestIP
) &&
5361 "Insertion point must be a normal instruction");
5363 // Then, climb up the immediate dominator tree as far as we can go while
5364 // still being dominated by the input positions.
5365 BasicBlock::iterator IP
= HoistInsertPosition(LowestIP
, Inputs
);
5367 // Don't insert instructions before PHI nodes.
5368 while (isa
<PHINode
>(IP
)) ++IP
;
5370 // Ignore landingpad instructions.
5371 while (IP
->isEHPad()) ++IP
;
5373 // Ignore debug intrinsics.
5374 while (isa
<DbgInfoIntrinsic
>(IP
)) ++IP
;
5376 // Set IP below instructions recently inserted by SCEVExpander. This keeps the
5377 // IP consistent across expansions and allows the previously inserted
5378 // instructions to be reused by subsequent expansion.
5379 while (Rewriter
.isInsertedInstruction(&*IP
) && IP
!= LowestIP
)
5385 /// Emit instructions for the leading candidate expression for this LSRUse (this
5386 /// is called "expanding").
5387 Value
*LSRInstance::Expand(const LSRUse
&LU
, const LSRFixup
&LF
,
5388 const Formula
&F
, BasicBlock::iterator IP
,
5389 SmallVectorImpl
<WeakTrackingVH
> &DeadInsts
) const {
5390 if (LU
.RigidFormula
)
5391 return LF
.OperandValToReplace
;
5393 // Determine an input position which will be dominated by the operands and
5394 // which will dominate the result.
5395 IP
= AdjustInsertPositionForExpand(IP
, LF
, LU
);
5396 Rewriter
.setInsertPoint(&*IP
);
5398 // Inform the Rewriter if we have a post-increment use, so that it can
5399 // perform an advantageous expansion.
5400 Rewriter
.setPostInc(LF
.PostIncLoops
);
5402 // This is the type that the user actually needs.
5403 Type
*OpTy
= LF
.OperandValToReplace
->getType();
5404 // This will be the type that we'll initially expand to.
5405 Type
*Ty
= F
.getType();
5407 // No type known; just expand directly to the ultimate type.
5409 else if (SE
.getEffectiveSCEVType(Ty
) == SE
.getEffectiveSCEVType(OpTy
))
5410 // Expand directly to the ultimate type if it's the right size.
5412 // This is the type to do integer arithmetic in.
5413 Type
*IntTy
= SE
.getEffectiveSCEVType(Ty
);
5415 // Build up a list of operands to add together to form the full base.
5416 SmallVector
<const SCEV
*, 8> Ops
;
5418 // Expand the BaseRegs portion.
5419 for (const SCEV
*Reg
: F
.BaseRegs
) {
5420 assert(!Reg
->isZero() && "Zero allocated in a base register!");
5422 // If we're expanding for a post-inc user, make the post-inc adjustment.
5423 Reg
= denormalizeForPostIncUse(Reg
, LF
.PostIncLoops
, SE
);
5424 Ops
.push_back(SE
.getUnknown(Rewriter
.expandCodeFor(Reg
, nullptr)));
5427 // Expand the ScaledReg portion.
5428 Value
*ICmpScaledV
= nullptr;
5430 const SCEV
*ScaledS
= F
.ScaledReg
;
5432 // If we're expanding for a post-inc user, make the post-inc adjustment.
5433 PostIncLoopSet
&Loops
= const_cast<PostIncLoopSet
&>(LF
.PostIncLoops
);
5434 ScaledS
= denormalizeForPostIncUse(ScaledS
, Loops
, SE
);
5436 if (LU
.Kind
== LSRUse::ICmpZero
) {
5437 // Expand ScaleReg as if it was part of the base regs.
5440 SE
.getUnknown(Rewriter
.expandCodeFor(ScaledS
, nullptr)));
5442 // An interesting way of "folding" with an icmp is to use a negated
5443 // scale, which we'll implement by inserting it into the other operand
5445 assert(F
.Scale
== -1 &&
5446 "The only scale supported by ICmpZero uses is -1!");
5447 ICmpScaledV
= Rewriter
.expandCodeFor(ScaledS
, nullptr);
5450 // Otherwise just expand the scaled register and an explicit scale,
5451 // which is expected to be matched as part of the address.
5453 // Flush the operand list to suppress SCEVExpander hoisting address modes.
5454 // Unless the addressing mode will not be folded.
5455 if (!Ops
.empty() && LU
.Kind
== LSRUse::Address
&&
5456 isAMCompletelyFolded(TTI
, LU
, F
)) {
5457 Value
*FullV
= Rewriter
.expandCodeFor(SE
.getAddExpr(Ops
), nullptr);
5459 Ops
.push_back(SE
.getUnknown(FullV
));
5461 ScaledS
= SE
.getUnknown(Rewriter
.expandCodeFor(ScaledS
, nullptr));
5464 SE
.getMulExpr(ScaledS
, SE
.getConstant(ScaledS
->getType(), F
.Scale
));
5465 Ops
.push_back(ScaledS
);
5469 // Expand the GV portion.
5471 // Flush the operand list to suppress SCEVExpander hoisting.
5473 Value
*FullV
= Rewriter
.expandCodeFor(SE
.getAddExpr(Ops
), IntTy
);
5475 Ops
.push_back(SE
.getUnknown(FullV
));
5477 Ops
.push_back(SE
.getUnknown(F
.BaseGV
));
5480 // Flush the operand list to suppress SCEVExpander hoisting of both folded and
5481 // unfolded offsets. LSR assumes they both live next to their uses.
5483 Value
*FullV
= Rewriter
.expandCodeFor(SE
.getAddExpr(Ops
), Ty
);
5485 Ops
.push_back(SE
.getUnknown(FullV
));
5488 // Expand the immediate portion.
5489 int64_t Offset
= (uint64_t)F
.BaseOffset
+ LF
.Offset
;
5491 if (LU
.Kind
== LSRUse::ICmpZero
) {
5492 // The other interesting way of "folding" with an ICmpZero is to use a
5493 // negated immediate.
5495 ICmpScaledV
= ConstantInt::get(IntTy
, -(uint64_t)Offset
);
5497 Ops
.push_back(SE
.getUnknown(ICmpScaledV
));
5498 ICmpScaledV
= ConstantInt::get(IntTy
, Offset
);
5501 // Just add the immediate values. These again are expected to be matched
5502 // as part of the address.
5503 Ops
.push_back(SE
.getUnknown(ConstantInt::getSigned(IntTy
, Offset
)));
5507 // Expand the unfolded offset portion.
5508 int64_t UnfoldedOffset
= F
.UnfoldedOffset
;
5509 if (UnfoldedOffset
!= 0) {
5510 // Just add the immediate values.
5511 Ops
.push_back(SE
.getUnknown(ConstantInt::getSigned(IntTy
,
5515 // Emit instructions summing all the operands.
5516 const SCEV
*FullS
= Ops
.empty() ?
5517 SE
.getConstant(IntTy
, 0) :
5519 Value
*FullV
= Rewriter
.expandCodeFor(FullS
, Ty
);
5521 // We're done expanding now, so reset the rewriter.
5522 Rewriter
.clearPostInc();
5524 // An ICmpZero Formula represents an ICmp which we're handling as a
5525 // comparison against zero. Now that we've expanded an expression for that
5526 // form, update the ICmp's other operand.
5527 if (LU
.Kind
== LSRUse::ICmpZero
) {
5528 ICmpInst
*CI
= cast
<ICmpInst
>(LF
.UserInst
);
5529 if (auto *OperandIsInstr
= dyn_cast
<Instruction
>(CI
->getOperand(1)))
5530 DeadInsts
.emplace_back(OperandIsInstr
);
5531 assert(!F
.BaseGV
&& "ICmp does not support folding a global value and "
5532 "a scale at the same time!");
5533 if (F
.Scale
== -1) {
5534 if (ICmpScaledV
->getType() != OpTy
) {
5536 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV
, false,
5538 ICmpScaledV
, OpTy
, "tmp", CI
);
5541 CI
->setOperand(1, ICmpScaledV
);
5543 // A scale of 1 means that the scale has been expanded as part of the
5545 assert((F
.Scale
== 0 || F
.Scale
== 1) &&
5546 "ICmp does not support folding a global value and "
5547 "a scale at the same time!");
5548 Constant
*C
= ConstantInt::getSigned(SE
.getEffectiveSCEVType(OpTy
),
5550 if (C
->getType() != OpTy
) {
5551 C
= ConstantFoldCastOperand(
5552 CastInst::getCastOpcode(C
, false, OpTy
, false), C
, OpTy
,
5553 CI
->getModule()->getDataLayout());
5554 assert(C
&& "Cast of ConstantInt should have folded");
5557 CI
->setOperand(1, C
);
5564 /// Helper for Rewrite. PHI nodes are special because the use of their operands
5565 /// effectively happens in their predecessor blocks, so the expression may need
5566 /// to be expanded in multiple places.
5567 void LSRInstance::RewriteForPHI(
5568 PHINode
*PN
, const LSRUse
&LU
, const LSRFixup
&LF
, const Formula
&F
,
5569 SmallVectorImpl
<WeakTrackingVH
> &DeadInsts
) const {
5570 DenseMap
<BasicBlock
*, Value
*> Inserted
;
5572 // Inserting instructions in the loop and using them as PHI's input could
5573 // break LCSSA in case if PHI's parent block is not a loop exit (i.e. the
5574 // corresponding incoming block is not loop exiting). So collect all such
5575 // instructions to form LCSSA for them later.
5576 SmallVector
<Instruction
*, 4> InsertedNonLCSSAInsts
;
5578 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
5579 if (PN
->getIncomingValue(i
) == LF
.OperandValToReplace
) {
5580 bool needUpdateFixups
= false;
5581 BasicBlock
*BB
= PN
->getIncomingBlock(i
);
5583 // If this is a critical edge, split the edge so that we do not insert
5584 // the code on all predecessor/successor paths. We do this unless this
5585 // is the canonical backedge for this loop, which complicates post-inc
5587 if (e
!= 1 && BB
->getTerminator()->getNumSuccessors() > 1 &&
5588 !isa
<IndirectBrInst
>(BB
->getTerminator()) &&
5589 !isa
<CatchSwitchInst
>(BB
->getTerminator())) {
5590 BasicBlock
*Parent
= PN
->getParent();
5591 Loop
*PNLoop
= LI
.getLoopFor(Parent
);
5592 if (!PNLoop
|| Parent
!= PNLoop
->getHeader()) {
5593 // Split the critical edge.
5594 BasicBlock
*NewBB
= nullptr;
5595 if (!Parent
->isLandingPad()) {
5597 SplitCriticalEdge(BB
, Parent
,
5598 CriticalEdgeSplittingOptions(&DT
, &LI
, MSSAU
)
5599 .setMergeIdenticalEdges()
5600 .setKeepOneInputPHIs());
5602 SmallVector
<BasicBlock
*, 2> NewBBs
;
5603 DomTreeUpdater
DTU(DT
, DomTreeUpdater::UpdateStrategy::Eager
);
5604 SplitLandingPadPredecessors(Parent
, BB
, "", "", NewBBs
, &DTU
, &LI
);
5607 // If NewBB==NULL, then SplitCriticalEdge refused to split because all
5608 // phi predecessors are identical. The simple thing to do is skip
5609 // splitting in this case rather than complicate the API.
5611 // If PN is outside of the loop and BB is in the loop, we want to
5612 // move the block to be immediately before the PHI block, not
5613 // immediately after BB.
5614 if (L
->contains(BB
) && !L
->contains(PN
))
5615 NewBB
->moveBefore(PN
->getParent());
5617 // Splitting the edge can reduce the number of PHI entries we have.
5618 e
= PN
->getNumIncomingValues();
5620 i
= PN
->getBasicBlockIndex(BB
);
5622 needUpdateFixups
= true;
5627 std::pair
<DenseMap
<BasicBlock
*, Value
*>::iterator
, bool> Pair
=
5628 Inserted
.insert(std::make_pair(BB
, static_cast<Value
*>(nullptr)));
5630 PN
->setIncomingValue(i
, Pair
.first
->second
);
5633 Expand(LU
, LF
, F
, BB
->getTerminator()->getIterator(), DeadInsts
);
5635 // If this is reuse-by-noop-cast, insert the noop cast.
5636 Type
*OpTy
= LF
.OperandValToReplace
->getType();
5637 if (FullV
->getType() != OpTy
)
5639 CastInst::Create(CastInst::getCastOpcode(FullV
, false,
5641 FullV
, LF
.OperandValToReplace
->getType(),
5642 "tmp", BB
->getTerminator());
5644 // If the incoming block for this value is not in the loop, it means the
5645 // current PHI is not in a loop exit, so we must create a LCSSA PHI for
5646 // the inserted value.
5647 if (auto *I
= dyn_cast
<Instruction
>(FullV
))
5648 if (L
->contains(I
) && !L
->contains(BB
))
5649 InsertedNonLCSSAInsts
.push_back(I
);
5651 PN
->setIncomingValue(i
, FullV
);
5652 Pair
.first
->second
= FullV
;
5655 // If LSR splits critical edge and phi node has other pending
5656 // fixup operands, we need to update those pending fixups. Otherwise
5657 // formulae will not be implemented completely and some instructions
5658 // will not be eliminated.
5659 if (needUpdateFixups
) {
5660 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
)
5661 for (LSRFixup
&Fixup
: Uses
[LUIdx
].Fixups
)
5662 // If fixup is supposed to rewrite some operand in the phi
5663 // that was just updated, it may be already moved to
5664 // another phi node. Such fixup requires update.
5665 if (Fixup
.UserInst
== PN
) {
5666 // Check if the operand we try to replace still exists in the
5668 bool foundInOriginalPHI
= false;
5669 for (const auto &val
: PN
->incoming_values())
5670 if (val
== Fixup
.OperandValToReplace
) {
5671 foundInOriginalPHI
= true;
5675 // If fixup operand found in original PHI - nothing to do.
5676 if (foundInOriginalPHI
)
5679 // Otherwise it might be moved to another PHI and requires update.
5680 // If fixup operand not found in any of the incoming blocks that
5681 // means we have already rewritten it - nothing to do.
5682 for (const auto &Block
: PN
->blocks())
5683 for (BasicBlock::iterator I
= Block
->begin(); isa
<PHINode
>(I
);
5685 PHINode
*NewPN
= cast
<PHINode
>(I
);
5686 for (const auto &val
: NewPN
->incoming_values())
5687 if (val
== Fixup
.OperandValToReplace
)
5688 Fixup
.UserInst
= NewPN
;
5694 formLCSSAForInstructions(InsertedNonLCSSAInsts
, DT
, LI
, &SE
);
5697 /// Emit instructions for the leading candidate expression for this LSRUse (this
5698 /// is called "expanding"), and update the UserInst to reference the newly
5700 void LSRInstance::Rewrite(const LSRUse
&LU
, const LSRFixup
&LF
,
5702 SmallVectorImpl
<WeakTrackingVH
> &DeadInsts
) const {
5703 // First, find an insertion point that dominates UserInst. For PHI nodes,
5704 // find the nearest block which dominates all the relevant uses.
5705 if (PHINode
*PN
= dyn_cast
<PHINode
>(LF
.UserInst
)) {
5706 RewriteForPHI(PN
, LU
, LF
, F
, DeadInsts
);
5708 Value
*FullV
= Expand(LU
, LF
, F
, LF
.UserInst
->getIterator(), DeadInsts
);
5710 // If this is reuse-by-noop-cast, insert the noop cast.
5711 Type
*OpTy
= LF
.OperandValToReplace
->getType();
5712 if (FullV
->getType() != OpTy
) {
5714 CastInst::Create(CastInst::getCastOpcode(FullV
, false, OpTy
, false),
5715 FullV
, OpTy
, "tmp", LF
.UserInst
);
5719 // Update the user. ICmpZero is handled specially here (for now) because
5720 // Expand may have updated one of the operands of the icmp already, and
5721 // its new value may happen to be equal to LF.OperandValToReplace, in
5722 // which case doing replaceUsesOfWith leads to replacing both operands
5723 // with the same value. TODO: Reorganize this.
5724 if (LU
.Kind
== LSRUse::ICmpZero
)
5725 LF
.UserInst
->setOperand(0, FullV
);
5727 LF
.UserInst
->replaceUsesOfWith(LF
.OperandValToReplace
, FullV
);
5730 if (auto *OperandIsInstr
= dyn_cast
<Instruction
>(LF
.OperandValToReplace
))
5731 DeadInsts
.emplace_back(OperandIsInstr
);
5734 // Trying to hoist the IVInc to loop header if all IVInc users are in
5735 // the loop header. It will help backend to generate post index load/store
5736 // when the latch block is different from loop header block.
5737 static bool canHoistIVInc(const TargetTransformInfo
&TTI
, const LSRFixup
&Fixup
,
5738 const LSRUse
&LU
, Instruction
*IVIncInsertPos
,
5740 if (LU
.Kind
!= LSRUse::Address
)
5743 // For now this code do the conservative optimization, only work for
5744 // the header block. Later we can hoist the IVInc to the block post
5745 // dominate all users.
5746 BasicBlock
*LHeader
= L
->getHeader();
5747 if (IVIncInsertPos
->getParent() == LHeader
)
5750 if (!Fixup
.OperandValToReplace
||
5751 any_of(Fixup
.OperandValToReplace
->users(), [&LHeader
](User
*U
) {
5752 Instruction
*UI
= cast
<Instruction
>(U
);
5753 return UI
->getParent() != LHeader
;
5757 Instruction
*I
= Fixup
.UserInst
;
5758 Type
*Ty
= I
->getType();
5759 return Ty
->isIntegerTy() &&
5760 ((isa
<LoadInst
>(I
) && TTI
.isIndexedLoadLegal(TTI
.MIM_PostInc
, Ty
)) ||
5761 (isa
<StoreInst
>(I
) && TTI
.isIndexedStoreLegal(TTI
.MIM_PostInc
, Ty
)));
5764 /// Rewrite all the fixup locations with new values, following the chosen
5766 void LSRInstance::ImplementSolution(
5767 const SmallVectorImpl
<const Formula
*> &Solution
) {
5768 // Keep track of instructions we may have made dead, so that
5769 // we can remove them after we are done working.
5770 SmallVector
<WeakTrackingVH
, 16> DeadInsts
;
5772 // Mark phi nodes that terminate chains so the expander tries to reuse them.
5773 for (const IVChain
&Chain
: IVChainVec
) {
5774 if (PHINode
*PN
= dyn_cast
<PHINode
>(Chain
.tailUserInst()))
5775 Rewriter
.setChainedPhi(PN
);
5778 // Expand the new value definitions and update the users.
5779 for (size_t LUIdx
= 0, NumUses
= Uses
.size(); LUIdx
!= NumUses
; ++LUIdx
)
5780 for (const LSRFixup
&Fixup
: Uses
[LUIdx
].Fixups
) {
5781 Instruction
*InsertPos
=
5782 canHoistIVInc(TTI
, Fixup
, Uses
[LUIdx
], IVIncInsertPos
, L
)
5783 ? L
->getHeader()->getTerminator()
5785 Rewriter
.setIVIncInsertPos(L
, InsertPos
);
5786 Rewrite(Uses
[LUIdx
], Fixup
, *Solution
[LUIdx
], DeadInsts
);
5790 for (const IVChain
&Chain
: IVChainVec
) {
5791 GenerateIVChain(Chain
, DeadInsts
);
5795 for (const WeakVH
&IV
: Rewriter
.getInsertedIVs())
5796 if (IV
&& dyn_cast
<Instruction
>(&*IV
)->getParent())
5797 ScalarEvolutionIVs
.push_back(IV
);
5799 // Clean up after ourselves. This must be done before deleting any
5803 Changed
|= RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts
,
5806 // In our cost analysis above, we assume that each addrec consumes exactly
5807 // one register, and arrange to have increments inserted just before the
5808 // latch to maximimize the chance this is true. However, if we reused
5809 // existing IVs, we now need to move the increments to match our
5810 // expectations. Otherwise, our cost modeling results in us having a
5811 // chosen a non-optimal result for the actual schedule. (And yes, this
5812 // scheduling decision does impact later codegen.)
5813 for (PHINode
&PN
: L
->getHeader()->phis()) {
5814 BinaryOperator
*BO
= nullptr;
5815 Value
*Start
= nullptr, *Step
= nullptr;
5816 if (!matchSimpleRecurrence(&PN
, BO
, Start
, Step
))
5819 switch (BO
->getOpcode()) {
5820 case Instruction::Sub
:
5821 if (BO
->getOperand(0) != &PN
)
5822 // sub is non-commutative - match handling elsewhere in LSR
5825 case Instruction::Add
:
5831 if (!isa
<Constant
>(Step
))
5832 // If not a constant step, might increase register pressure
5833 // (We assume constants have been canonicalized to RHS)
5836 if (BO
->getParent() == IVIncInsertPos
->getParent())
5837 // Only bother moving across blocks. Isel can handle block local case.
5840 // Can we legally schedule inc at the desired point?
5841 if (!llvm::all_of(BO
->uses(),
5842 [&](Use
&U
) {return DT
.dominates(IVIncInsertPos
, U
);}))
5844 BO
->moveBefore(IVIncInsertPos
);
5851 LSRInstance::LSRInstance(Loop
*L
, IVUsers
&IU
, ScalarEvolution
&SE
,
5852 DominatorTree
&DT
, LoopInfo
&LI
,
5853 const TargetTransformInfo
&TTI
, AssumptionCache
&AC
,
5854 TargetLibraryInfo
&TLI
, MemorySSAUpdater
*MSSAU
)
5855 : IU(IU
), SE(SE
), DT(DT
), LI(LI
), AC(AC
), TLI(TLI
), TTI(TTI
), L(L
),
5856 MSSAU(MSSAU
), AMK(PreferredAddresingMode
.getNumOccurrences() > 0
5857 ? PreferredAddresingMode
5858 : TTI
.getPreferredAddressingMode(L
, &SE
)),
5859 Rewriter(SE
, L
->getHeader()->getModule()->getDataLayout(), "lsr", false),
5860 BaselineCost(L
, SE
, TTI
, AMK
) {
5861 // If LoopSimplify form is not available, stay out of trouble.
5862 if (!L
->isLoopSimplifyForm())
5865 // If there's no interesting work to be done, bail early.
5866 if (IU
.empty()) return;
5868 // If there's too much analysis to be done, bail early. We won't be able to
5869 // model the problem anyway.
5870 unsigned NumUsers
= 0;
5871 for (const IVStrideUse
&U
: IU
) {
5872 if (++NumUsers
> MaxIVUsers
) {
5874 LLVM_DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << U
5878 // Bail out if we have a PHI on an EHPad that gets a value from a
5879 // CatchSwitchInst. Because the CatchSwitchInst cannot be split, there is
5880 // no good place to stick any instructions.
5881 if (auto *PN
= dyn_cast
<PHINode
>(U
.getUser())) {
5882 auto *FirstNonPHI
= PN
->getParent()->getFirstNonPHI();
5883 if (isa
<FuncletPadInst
>(FirstNonPHI
) ||
5884 isa
<CatchSwitchInst
>(FirstNonPHI
))
5885 for (BasicBlock
*PredBB
: PN
->blocks())
5886 if (isa
<CatchSwitchInst
>(PredBB
->getFirstNonPHI()))
5891 LLVM_DEBUG(dbgs() << "\nLSR on loop ";
5892 L
->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false);
5895 // Configure SCEVExpander already now, so the correct mode is used for
5896 // isSafeToExpand() checks.
5898 Rewriter
.setDebugType(DEBUG_TYPE
);
5900 Rewriter
.disableCanonicalMode();
5901 Rewriter
.enableLSRMode();
5903 // First, perform some low-level loop optimizations.
5905 OptimizeLoopTermCond();
5907 // If loop preparation eliminates all interesting IV users, bail.
5908 if (IU
.empty()) return;
5910 // Skip nested loops until we can model them better with formulae.
5911 if (!L
->isInnermost()) {
5912 LLVM_DEBUG(dbgs() << "LSR skipping outer loop " << *L
<< "\n");
5916 // Start collecting data and preparing for the solver.
5917 // If number of registers is not the major cost, we cannot benefit from the
5918 // current profitable chain optimization which is based on number of
5920 // FIXME: add profitable chain optimization for other kinds major cost, for
5921 // example number of instructions.
5922 if (TTI
.isNumRegsMajorCostOfLSR() || StressIVChain
)
5924 CollectInterestingTypesAndFactors();
5925 CollectFixupsAndInitialFormulae();
5926 CollectLoopInvariantFixupsAndFormulae();
5931 LLVM_DEBUG(dbgs() << "LSR found " << Uses
.size() << " uses:\n";
5932 print_uses(dbgs()));
5934 // Now use the reuse data to generate a bunch of interesting ways
5935 // to formulate the values needed for the uses.
5936 GenerateAllReuseFormulae();
5938 FilterOutUndesirableDedicatedRegisters();
5939 NarrowSearchSpaceUsingHeuristics();
5941 SmallVector
<const Formula
*, 8> Solution
;
5944 // Release memory that is no longer needed.
5949 if (Solution
.empty())
5953 // Formulae should be legal.
5954 for (const LSRUse
&LU
: Uses
) {
5955 for (const Formula
&F
: LU
.Formulae
)
5956 assert(isLegalUse(TTI
, LU
.MinOffset
, LU
.MaxOffset
, LU
.Kind
, LU
.AccessTy
,
5957 F
) && "Illegal formula generated!");
5961 // Now that we've decided what we want, make it so.
5962 ImplementSolution(Solution
);
5965 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
5966 void LSRInstance::print_factors_and_types(raw_ostream
&OS
) const {
5967 if (Factors
.empty() && Types
.empty()) return;
5969 OS
<< "LSR has identified the following interesting factors and types: ";
5972 for (int64_t Factor
: Factors
) {
5973 if (!First
) OS
<< ", ";
5975 OS
<< '*' << Factor
;
5978 for (Type
*Ty
: Types
) {
5979 if (!First
) OS
<< ", ";
5981 OS
<< '(' << *Ty
<< ')';
5986 void LSRInstance::print_fixups(raw_ostream
&OS
) const {
5987 OS
<< "LSR is examining the following fixup sites:\n";
5988 for (const LSRUse
&LU
: Uses
)
5989 for (const LSRFixup
&LF
: LU
.Fixups
) {
5996 void LSRInstance::print_uses(raw_ostream
&OS
) const {
5997 OS
<< "LSR is examining the following uses:\n";
5998 for (const LSRUse
&LU
: Uses
) {
6002 for (const Formula
&F
: LU
.Formulae
) {
6010 void LSRInstance::print(raw_ostream
&OS
) const {
6011 print_factors_and_types(OS
);
6016 LLVM_DUMP_METHOD
void LSRInstance::dump() const {
6017 print(errs()); errs() << '\n';
6023 class LoopStrengthReduce
: public LoopPass
{
6025 static char ID
; // Pass ID, replacement for typeid
6027 LoopStrengthReduce();
6030 bool runOnLoop(Loop
*L
, LPPassManager
&LPM
) override
;
6031 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
6034 } // end anonymous namespace
6036 LoopStrengthReduce::LoopStrengthReduce() : LoopPass(ID
) {
6037 initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry());
6040 void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage
&AU
) const {
6041 // We split critical edges, so we change the CFG. However, we do update
6042 // many analyses if they are around.
6043 AU
.addPreservedID(LoopSimplifyID
);
6045 AU
.addRequired
<LoopInfoWrapperPass
>();
6046 AU
.addPreserved
<LoopInfoWrapperPass
>();
6047 AU
.addRequiredID(LoopSimplifyID
);
6048 AU
.addRequired
<DominatorTreeWrapperPass
>();
6049 AU
.addPreserved
<DominatorTreeWrapperPass
>();
6050 AU
.addRequired
<ScalarEvolutionWrapperPass
>();
6051 AU
.addPreserved
<ScalarEvolutionWrapperPass
>();
6052 AU
.addRequired
<AssumptionCacheTracker
>();
6053 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
6054 // Requiring LoopSimplify a second time here prevents IVUsers from running
6055 // twice, since LoopSimplify was invalidated by running ScalarEvolution.
6056 AU
.addRequiredID(LoopSimplifyID
);
6057 AU
.addRequired
<IVUsersWrapperPass
>();
6058 AU
.addPreserved
<IVUsersWrapperPass
>();
6059 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
6060 AU
.addPreserved
<MemorySSAWrapperPass
>();
6065 /// Enables more convenient iteration over a DWARF expression vector.
6066 static iterator_range
<llvm::DIExpression::expr_op_iterator
>
6067 ToDwarfOpIter(SmallVectorImpl
<uint64_t> &Expr
) {
6068 llvm::DIExpression::expr_op_iterator Begin
=
6069 llvm::DIExpression::expr_op_iterator(Expr
.begin());
6070 llvm::DIExpression::expr_op_iterator End
=
6071 llvm::DIExpression::expr_op_iterator(Expr
.end());
6072 return {Begin
, End
};
6075 struct SCEVDbgValueBuilder
{
6076 SCEVDbgValueBuilder() = default;
6077 SCEVDbgValueBuilder(const SCEVDbgValueBuilder
&Base
) { clone(Base
); }
6079 void clone(const SCEVDbgValueBuilder
&Base
) {
6080 LocationOps
= Base
.LocationOps
;
6085 LocationOps
.clear();
6089 /// The DIExpression as we translate the SCEV.
6090 SmallVector
<uint64_t, 6> Expr
;
6091 /// The location ops of the DIExpression.
6092 SmallVector
<Value
*, 2> LocationOps
;
6094 void pushOperator(uint64_t Op
) { Expr
.push_back(Op
); }
6095 void pushUInt(uint64_t Operand
) { Expr
.push_back(Operand
); }
6097 /// Add a DW_OP_LLVM_arg to the expression, followed by the index of the value
6098 /// in the set of values referenced by the expression.
6099 void pushLocation(llvm::Value
*V
) {
6100 Expr
.push_back(llvm::dwarf::DW_OP_LLVM_arg
);
6101 auto *It
= llvm::find(LocationOps
, V
);
6102 unsigned ArgIndex
= 0;
6103 if (It
!= LocationOps
.end()) {
6104 ArgIndex
= std::distance(LocationOps
.begin(), It
);
6106 ArgIndex
= LocationOps
.size();
6107 LocationOps
.push_back(V
);
6109 Expr
.push_back(ArgIndex
);
6112 void pushValue(const SCEVUnknown
*U
) {
6113 llvm::Value
*V
= cast
<SCEVUnknown
>(U
)->getValue();
6117 bool pushConst(const SCEVConstant
*C
) {
6118 if (C
->getAPInt().getSignificantBits() > 64)
6120 Expr
.push_back(llvm::dwarf::DW_OP_consts
);
6121 Expr
.push_back(C
->getAPInt().getSExtValue());
6125 // Iterating the expression as DWARF ops is convenient when updating
6126 // DWARF_OP_LLVM_args.
6127 iterator_range
<llvm::DIExpression::expr_op_iterator
> expr_ops() {
6128 return ToDwarfOpIter(Expr
);
6131 /// Several SCEV types are sequences of the same arithmetic operator applied
6132 /// to constants and values that may be extended or truncated.
6133 bool pushArithmeticExpr(const llvm::SCEVCommutativeExpr
*CommExpr
,
6135 assert((isa
<llvm::SCEVAddExpr
>(CommExpr
) || isa
<SCEVMulExpr
>(CommExpr
)) &&
6136 "Expected arithmetic SCEV type");
6137 bool Success
= true;
6138 unsigned EmitOperator
= 0;
6139 for (const auto &Op
: CommExpr
->operands()) {
6140 Success
&= pushSCEV(Op
);
6142 if (EmitOperator
>= 1)
6143 pushOperator(DwarfOp
);
6149 // TODO: Identify and omit noop casts.
6150 bool pushCast(const llvm::SCEVCastExpr
*C
, bool IsSigned
) {
6151 const llvm::SCEV
*Inner
= C
->getOperand(0);
6152 const llvm::Type
*Type
= C
->getType();
6153 uint64_t ToWidth
= Type
->getIntegerBitWidth();
6154 bool Success
= pushSCEV(Inner
);
6155 uint64_t CastOps
[] = {dwarf::DW_OP_LLVM_convert
, ToWidth
,
6156 IsSigned
? llvm::dwarf::DW_ATE_signed
6157 : llvm::dwarf::DW_ATE_unsigned
};
6158 for (const auto &Op
: CastOps
)
6163 // TODO: MinMax - although these haven't been encountered in the test suite.
6164 bool pushSCEV(const llvm::SCEV
*S
) {
6165 bool Success
= true;
6166 if (const SCEVConstant
*StartInt
= dyn_cast
<SCEVConstant
>(S
)) {
6167 Success
&= pushConst(StartInt
);
6169 } else if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
6172 pushLocation(U
->getValue());
6174 } else if (const SCEVMulExpr
*MulRec
= dyn_cast
<SCEVMulExpr
>(S
)) {
6175 Success
&= pushArithmeticExpr(MulRec
, llvm::dwarf::DW_OP_mul
);
6177 } else if (const SCEVUDivExpr
*UDiv
= dyn_cast
<SCEVUDivExpr
>(S
)) {
6178 Success
&= pushSCEV(UDiv
->getLHS());
6179 Success
&= pushSCEV(UDiv
->getRHS());
6180 pushOperator(llvm::dwarf::DW_OP_div
);
6182 } else if (const SCEVCastExpr
*Cast
= dyn_cast
<SCEVCastExpr
>(S
)) {
6183 // Assert if a new and unknown SCEVCastEXpr type is encountered.
6184 assert((isa
<SCEVZeroExtendExpr
>(Cast
) || isa
<SCEVTruncateExpr
>(Cast
) ||
6185 isa
<SCEVPtrToIntExpr
>(Cast
) || isa
<SCEVSignExtendExpr
>(Cast
)) &&
6186 "Unexpected cast type in SCEV.");
6187 Success
&= pushCast(Cast
, (isa
<SCEVSignExtendExpr
>(Cast
)));
6189 } else if (const SCEVAddExpr
*AddExpr
= dyn_cast
<SCEVAddExpr
>(S
)) {
6190 Success
&= pushArithmeticExpr(AddExpr
, llvm::dwarf::DW_OP_plus
);
6192 } else if (isa
<SCEVAddRecExpr
>(S
)) {
6193 // Nested SCEVAddRecExpr are generated by nested loops and are currently
6203 /// Return true if the combination of arithmetic operator and underlying
6204 /// SCEV constant value is an identity function.
6205 bool isIdentityFunction(uint64_t Op
, const SCEV
*S
) {
6206 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
)) {
6207 if (C
->getAPInt().getSignificantBits() > 64)
6209 int64_t I
= C
->getAPInt().getSExtValue();
6211 case llvm::dwarf::DW_OP_plus
:
6212 case llvm::dwarf::DW_OP_minus
:
6214 case llvm::dwarf::DW_OP_mul
:
6215 case llvm::dwarf::DW_OP_div
:
6222 /// Convert a SCEV of a value to a DIExpression that is pushed onto the
6223 /// builder's expression stack. The stack should already contain an
6224 /// expression for the iteration count, so that it can be multiplied by
6225 /// the stride and added to the start.
6226 /// Components of the expression are omitted if they are an identity function.
6227 /// Chain (non-affine) SCEVs are not supported.
6228 bool SCEVToValueExpr(const llvm::SCEVAddRecExpr
&SAR
, ScalarEvolution
&SE
) {
6229 assert(SAR
.isAffine() && "Expected affine SCEV");
6230 // TODO: Is this check needed?
6231 if (isa
<SCEVAddRecExpr
>(SAR
.getStart()))
6234 const SCEV
*Start
= SAR
.getStart();
6235 const SCEV
*Stride
= SAR
.getStepRecurrence(SE
);
6237 // Skip pushing arithmetic noops.
6238 if (!isIdentityFunction(llvm::dwarf::DW_OP_mul
, Stride
)) {
6239 if (!pushSCEV(Stride
))
6241 pushOperator(llvm::dwarf::DW_OP_mul
);
6243 if (!isIdentityFunction(llvm::dwarf::DW_OP_plus
, Start
)) {
6244 if (!pushSCEV(Start
))
6246 pushOperator(llvm::dwarf::DW_OP_plus
);
6251 /// Create an expression that is an offset from a value (usually the IV).
6252 void createOffsetExpr(int64_t Offset
, Value
*OffsetValue
) {
6253 pushLocation(OffsetValue
);
6254 DIExpression::appendOffset(Expr
, Offset
);
6256 dbgs() << "scev-salvage: Generated IV offset expression. Offset: "
6257 << std::to_string(Offset
) << "\n");
6260 /// Combine a translation of the SCEV and the IV to create an expression that
6261 /// recovers a location's value.
6262 /// returns true if an expression was created.
6263 bool createIterCountExpr(const SCEV
*S
,
6264 const SCEVDbgValueBuilder
&IterationCount
,
6265 ScalarEvolution
&SE
) {
6266 // SCEVs for SSA values are most frquently of the form
6267 // {start,+,stride}, but sometimes they are ({start,+,stride} + %a + ..).
6268 // This is because %a is a PHI node that is not the IV. However, these
6269 // SCEVs have not been observed to result in debuginfo-lossy optimisations,
6270 // so its not expected this point will be reached.
6271 if (!isa
<SCEVAddRecExpr
>(S
))
6274 LLVM_DEBUG(dbgs() << "scev-salvage: Location to salvage SCEV: " << *S
6277 const auto *Rec
= cast
<SCEVAddRecExpr
>(S
);
6278 if (!Rec
->isAffine())
6281 if (S
->getExpressionSize() > MaxSCEVSalvageExpressionSize
)
6284 // Initialise a new builder with the iteration count expression. In
6285 // combination with the value's SCEV this enables recovery.
6286 clone(IterationCount
);
6287 if (!SCEVToValueExpr(*Rec
, SE
))
6293 /// Convert a SCEV of a value to a DIExpression that is pushed onto the
6294 /// builder's expression stack. The stack should already contain an
6295 /// expression for the iteration count, so that it can be multiplied by
6296 /// the stride and added to the start.
6297 /// Components of the expression are omitted if they are an identity function.
6298 bool SCEVToIterCountExpr(const llvm::SCEVAddRecExpr
&SAR
,
6299 ScalarEvolution
&SE
) {
6300 assert(SAR
.isAffine() && "Expected affine SCEV");
6301 if (isa
<SCEVAddRecExpr
>(SAR
.getStart())) {
6302 LLVM_DEBUG(dbgs() << "scev-salvage: IV SCEV. Unsupported nested AddRec: "
6306 const SCEV
*Start
= SAR
.getStart();
6307 const SCEV
*Stride
= SAR
.getStepRecurrence(SE
);
6309 // Skip pushing arithmetic noops.
6310 if (!isIdentityFunction(llvm::dwarf::DW_OP_minus
, Start
)) {
6311 if (!pushSCEV(Start
))
6313 pushOperator(llvm::dwarf::DW_OP_minus
);
6315 if (!isIdentityFunction(llvm::dwarf::DW_OP_div
, Stride
)) {
6316 if (!pushSCEV(Stride
))
6318 pushOperator(llvm::dwarf::DW_OP_div
);
6323 // Append the current expression and locations to a location list and an
6324 // expression list. Modify the DW_OP_LLVM_arg indexes to account for
6325 // the locations already present in the destination list.
6326 void appendToVectors(SmallVectorImpl
<uint64_t> &DestExpr
,
6327 SmallVectorImpl
<Value
*> &DestLocations
) {
6328 assert(!DestLocations
.empty() &&
6329 "Expected the locations vector to contain the IV");
6330 // The DWARF_OP_LLVM_arg arguments of the expression being appended must be
6331 // modified to account for the locations already in the destination vector.
6332 // All builders contain the IV as the first location op.
6333 assert(!LocationOps
.empty() &&
6334 "Expected the location ops to contain the IV.");
6335 // DestIndexMap[n] contains the index in DestLocations for the nth
6336 // location in this SCEVDbgValueBuilder.
6337 SmallVector
<uint64_t, 2> DestIndexMap
;
6338 for (const auto &Op
: LocationOps
) {
6339 auto It
= find(DestLocations
, Op
);
6340 if (It
!= DestLocations
.end()) {
6341 // Location already exists in DestLocations, reuse existing ArgIndex.
6342 DestIndexMap
.push_back(std::distance(DestLocations
.begin(), It
));
6345 // Location is not in DestLocations, add it.
6346 DestIndexMap
.push_back(DestLocations
.size());
6347 DestLocations
.push_back(Op
);
6350 for (const auto &Op
: expr_ops()) {
6351 if (Op
.getOp() != dwarf::DW_OP_LLVM_arg
) {
6352 Op
.appendToVector(DestExpr
);
6356 DestExpr
.push_back(dwarf::DW_OP_LLVM_arg
);
6357 // `DW_OP_LLVM_arg n` represents the nth LocationOp in this SCEV,
6358 // DestIndexMap[n] contains its new index in DestLocations.
6359 uint64_t NewIndex
= DestIndexMap
[Op
.getArg(0)];
6360 DestExpr
.push_back(NewIndex
);
6365 /// Holds all the required data to salvage a dbg.value using the pre-LSR SCEVs
6366 /// and DIExpression.
6367 struct DVIRecoveryRec
{
6368 DVIRecoveryRec(DbgValueInst
*DbgValue
)
6369 : DbgRef(DbgValue
), Expr(DbgValue
->getExpression()),
6370 HadLocationArgList(false) {}
6371 DVIRecoveryRec(DPValue
*DPV
)
6372 : DbgRef(DPV
), Expr(DPV
->getExpression()), HadLocationArgList(false) {}
6374 PointerUnion
<DbgValueInst
*, DPValue
*> DbgRef
;
6376 bool HadLocationArgList
;
6377 SmallVector
<WeakVH
, 2> LocationOps
;
6378 SmallVector
<const llvm::SCEV
*, 2> SCEVs
;
6379 SmallVector
<std::unique_ptr
<SCEVDbgValueBuilder
>, 2> RecoveryExprs
;
6382 for (auto &RE
: RecoveryExprs
)
6384 RecoveryExprs
.clear();
6387 ~DVIRecoveryRec() { clear(); }
6391 /// Returns the total number of DW_OP_llvm_arg operands in the expression.
6392 /// This helps in determining if a DIArglist is necessary or can be omitted from
6394 static unsigned numLLVMArgOps(SmallVectorImpl
<uint64_t> &Expr
) {
6395 auto expr_ops
= ToDwarfOpIter(Expr
);
6397 for (auto Op
: expr_ops
)
6398 if (Op
.getOp() == dwarf::DW_OP_LLVM_arg
)
6403 /// Overwrites DVI with the location and Ops as the DIExpression. This will
6404 /// create an invalid expression if Ops has any dwarf::DW_OP_llvm_arg operands,
6405 /// because a DIArglist is not created for the first argument of the dbg.value.
6406 template <typename T
>
6407 static void updateDVIWithLocation(T
&DbgVal
, Value
*Location
,
6408 SmallVectorImpl
<uint64_t> &Ops
) {
6409 assert(numLLVMArgOps(Ops
) == 0 && "Expected expression that does not "
6410 "contain any DW_OP_llvm_arg operands.");
6411 DbgVal
.setRawLocation(ValueAsMetadata::get(Location
));
6412 DbgVal
.setExpression(DIExpression::get(DbgVal
.getContext(), Ops
));
6413 DbgVal
.setExpression(DIExpression::get(DbgVal
.getContext(), Ops
));
6416 /// Overwrite DVI with locations placed into a DIArglist.
6417 template <typename T
>
6418 static void updateDVIWithLocations(T
&DbgVal
,
6419 SmallVectorImpl
<Value
*> &Locations
,
6420 SmallVectorImpl
<uint64_t> &Ops
) {
6421 assert(numLLVMArgOps(Ops
) != 0 &&
6422 "Expected expression that references DIArglist locations using "
6423 "DW_OP_llvm_arg operands.");
6424 SmallVector
<ValueAsMetadata
*, 3> MetadataLocs
;
6425 for (Value
*V
: Locations
)
6426 MetadataLocs
.push_back(ValueAsMetadata::get(V
));
6427 auto ValArrayRef
= llvm::ArrayRef
<llvm::ValueAsMetadata
*>(MetadataLocs
);
6428 DbgVal
.setRawLocation(llvm::DIArgList::get(DbgVal
.getContext(), ValArrayRef
));
6429 DbgVal
.setExpression(DIExpression::get(DbgVal
.getContext(), Ops
));
6432 /// Write the new expression and new location ops for the dbg.value. If possible
6433 /// reduce the szie of the dbg.value intrinsic by omitting DIArglist. This
6434 /// can be omitted if:
6435 /// 1. There is only a single location, refenced by a single DW_OP_llvm_arg.
6436 /// 2. The DW_OP_LLVM_arg is the first operand in the expression.
6437 static void UpdateDbgValueInst(DVIRecoveryRec
&DVIRec
,
6438 SmallVectorImpl
<Value
*> &NewLocationOps
,
6439 SmallVectorImpl
<uint64_t> &NewExpr
) {
6440 auto UpdateDbgValueInstImpl
= [&](auto *DbgVal
) {
6441 unsigned NumLLVMArgs
= numLLVMArgOps(NewExpr
);
6442 if (NumLLVMArgs
== 0) {
6443 // Location assumed to be on the stack.
6444 updateDVIWithLocation(*DbgVal
, NewLocationOps
[0], NewExpr
);
6445 } else if (NumLLVMArgs
== 1 && NewExpr
[0] == dwarf::DW_OP_LLVM_arg
) {
6446 // There is only a single DW_OP_llvm_arg at the start of the expression,
6447 // so it can be omitted along with DIArglist.
6448 assert(NewExpr
[1] == 0 &&
6449 "Lone LLVM_arg in a DIExpression should refer to location-op 0.");
6450 llvm::SmallVector
<uint64_t, 6> ShortenedOps(llvm::drop_begin(NewExpr
, 2));
6451 updateDVIWithLocation(*DbgVal
, NewLocationOps
[0], ShortenedOps
);
6453 // Multiple DW_OP_llvm_arg, so DIArgList is strictly necessary.
6454 updateDVIWithLocations(*DbgVal
, NewLocationOps
, NewExpr
);
6457 // If the DIExpression was previously empty then add the stack terminator.
6458 // Non-empty expressions have only had elements inserted into them and so
6459 // the terminator should already be present e.g. stack_value or fragment.
6460 DIExpression
*SalvageExpr
= DbgVal
->getExpression();
6461 if (!DVIRec
.Expr
->isComplex() && SalvageExpr
->isComplex()) {
6463 DIExpression::append(SalvageExpr
, {dwarf::DW_OP_stack_value
});
6464 DbgVal
->setExpression(SalvageExpr
);
6467 if (isa
<DbgValueInst
*>(DVIRec
.DbgRef
))
6468 UpdateDbgValueInstImpl(cast
<DbgValueInst
*>(DVIRec
.DbgRef
));
6470 UpdateDbgValueInstImpl(cast
<DPValue
*>(DVIRec
.DbgRef
));
6473 /// Cached location ops may be erased during LSR, in which case a poison is
6474 /// required when restoring from the cache. The type of that location is no
6475 /// longer available, so just use int8. The poison will be replaced by one or
6476 /// more locations later when a SCEVDbgValueBuilder selects alternative
6477 /// locations to use for the salvage.
6478 static Value
*getValueOrPoison(WeakVH
&VH
, LLVMContext
&C
) {
6479 return (VH
) ? VH
: PoisonValue::get(llvm::Type::getInt8Ty(C
));
6482 /// Restore the DVI's pre-LSR arguments. Substitute undef for any erased values.
6483 static void restorePreTransformState(DVIRecoveryRec
&DVIRec
) {
6484 auto RestorePreTransformStateImpl
= [&](auto *DbgVal
) {
6485 LLVM_DEBUG(dbgs() << "scev-salvage: restore dbg.value to pre-LSR state\n"
6486 << "scev-salvage: post-LSR: " << *DbgVal
<< '\n');
6487 assert(DVIRec
.Expr
&& "Expected an expression");
6488 DbgVal
->setExpression(DVIRec
.Expr
);
6490 // Even a single location-op may be inside a DIArgList and referenced with
6491 // DW_OP_LLVM_arg, which is valid only with a DIArgList.
6492 if (!DVIRec
.HadLocationArgList
) {
6493 assert(DVIRec
.LocationOps
.size() == 1 &&
6494 "Unexpected number of location ops.");
6495 // LSR's unsuccessful salvage attempt may have added DIArgList, which in
6496 // this case was not present before, so force the location back to a
6497 // single uncontained Value.
6498 Value
*CachedValue
=
6499 getValueOrPoison(DVIRec
.LocationOps
[0], DbgVal
->getContext());
6500 DbgVal
->setRawLocation(ValueAsMetadata::get(CachedValue
));
6502 SmallVector
<ValueAsMetadata
*, 3> MetadataLocs
;
6503 for (WeakVH VH
: DVIRec
.LocationOps
) {
6504 Value
*CachedValue
= getValueOrPoison(VH
, DbgVal
->getContext());
6505 MetadataLocs
.push_back(ValueAsMetadata::get(CachedValue
));
6507 auto ValArrayRef
= llvm::ArrayRef
<llvm::ValueAsMetadata
*>(MetadataLocs
);
6508 DbgVal
->setRawLocation(
6509 llvm::DIArgList::get(DbgVal
->getContext(), ValArrayRef
));
6511 LLVM_DEBUG(dbgs() << "scev-salvage: pre-LSR: " << *DbgVal
<< '\n');
6513 if (isa
<DbgValueInst
*>(DVIRec
.DbgRef
))
6514 RestorePreTransformStateImpl(cast
<DbgValueInst
*>(DVIRec
.DbgRef
));
6516 RestorePreTransformStateImpl(cast
<DPValue
*>(DVIRec
.DbgRef
));
6519 static bool SalvageDVI(llvm::Loop
*L
, ScalarEvolution
&SE
,
6520 llvm::PHINode
*LSRInductionVar
, DVIRecoveryRec
&DVIRec
,
6521 const SCEV
*SCEVInductionVar
,
6522 SCEVDbgValueBuilder IterCountExpr
) {
6524 if (isa
<DbgValueInst
*>(DVIRec
.DbgRef
)
6525 ? !cast
<DbgValueInst
*>(DVIRec
.DbgRef
)->isKillLocation()
6526 : !cast
<DPValue
*>(DVIRec
.DbgRef
)->isKillLocation())
6529 // LSR may have caused several changes to the dbg.value in the failed salvage
6530 // attempt. So restore the DIExpression, the location ops and also the
6531 // location ops format, which is always DIArglist for multiple ops, but only
6532 // sometimes for a single op.
6533 restorePreTransformState(DVIRec
);
6535 // LocationOpIndexMap[i] will store the post-LSR location index of
6536 // the non-optimised out location at pre-LSR index i.
6537 SmallVector
<int64_t, 2> LocationOpIndexMap
;
6538 LocationOpIndexMap
.assign(DVIRec
.LocationOps
.size(), -1);
6539 SmallVector
<Value
*, 2> NewLocationOps
;
6540 NewLocationOps
.push_back(LSRInductionVar
);
6542 for (unsigned i
= 0; i
< DVIRec
.LocationOps
.size(); i
++) {
6543 WeakVH VH
= DVIRec
.LocationOps
[i
];
6544 // Place the locations not optimised out in the list first, avoiding
6545 // inserts later. The map is used to update the DIExpression's
6546 // DW_OP_LLVM_arg arguments as the expression is updated.
6547 if (VH
&& !isa
<UndefValue
>(VH
)) {
6548 NewLocationOps
.push_back(VH
);
6549 LocationOpIndexMap
[i
] = NewLocationOps
.size() - 1;
6550 LLVM_DEBUG(dbgs() << "scev-salvage: Location index " << i
6551 << " now at index " << LocationOpIndexMap
[i
] << "\n");
6555 // It's possible that a value referred to in the SCEV may have been
6556 // optimised out by LSR.
6557 if (SE
.containsErasedValue(DVIRec
.SCEVs
[i
]) ||
6558 SE
.containsUndefs(DVIRec
.SCEVs
[i
])) {
6559 LLVM_DEBUG(dbgs() << "scev-salvage: SCEV for location at index: " << i
6560 << " refers to a location that is now undef or erased. "
6561 "Salvage abandoned.\n");
6565 LLVM_DEBUG(dbgs() << "scev-salvage: salvaging location at index " << i
6566 << " with SCEV: " << *DVIRec
.SCEVs
[i
] << "\n");
6568 DVIRec
.RecoveryExprs
[i
] = std::make_unique
<SCEVDbgValueBuilder
>();
6569 SCEVDbgValueBuilder
*SalvageExpr
= DVIRec
.RecoveryExprs
[i
].get();
6571 // Create an offset-based salvage expression if possible, as it requires
6572 // less DWARF ops than an iteration count-based expression.
6573 if (std::optional
<APInt
> Offset
=
6574 SE
.computeConstantDifference(DVIRec
.SCEVs
[i
], SCEVInductionVar
)) {
6575 if (Offset
->getSignificantBits() <= 64)
6576 SalvageExpr
->createOffsetExpr(Offset
->getSExtValue(), LSRInductionVar
);
6577 } else if (!SalvageExpr
->createIterCountExpr(DVIRec
.SCEVs
[i
], IterCountExpr
,
6582 // Merge the DbgValueBuilder generated expressions and the original
6583 // DIExpression, place the result into an new vector.
6584 SmallVector
<uint64_t, 3> NewExpr
;
6585 if (DVIRec
.Expr
->getNumElements() == 0) {
6586 assert(DVIRec
.RecoveryExprs
.size() == 1 &&
6587 "Expected only a single recovery expression for an empty "
6589 assert(DVIRec
.RecoveryExprs
[0] &&
6590 "Expected a SCEVDbgSalvageBuilder for location 0");
6591 SCEVDbgValueBuilder
*B
= DVIRec
.RecoveryExprs
[0].get();
6592 B
->appendToVectors(NewExpr
, NewLocationOps
);
6594 for (const auto &Op
: DVIRec
.Expr
->expr_ops()) {
6595 // Most Ops needn't be updated.
6596 if (Op
.getOp() != dwarf::DW_OP_LLVM_arg
) {
6597 Op
.appendToVector(NewExpr
);
6601 uint64_t LocationArgIndex
= Op
.getArg(0);
6602 SCEVDbgValueBuilder
*DbgBuilder
=
6603 DVIRec
.RecoveryExprs
[LocationArgIndex
].get();
6604 // The location doesn't have s SCEVDbgValueBuilder, so LSR did not
6605 // optimise it away. So just translate the argument to the updated
6608 NewExpr
.push_back(dwarf::DW_OP_LLVM_arg
);
6609 assert(LocationOpIndexMap
[Op
.getArg(0)] != -1 &&
6610 "Expected a positive index for the location-op position.");
6611 NewExpr
.push_back(LocationOpIndexMap
[Op
.getArg(0)]);
6614 // The location has a recovery expression.
6615 DbgBuilder
->appendToVectors(NewExpr
, NewLocationOps
);
6618 UpdateDbgValueInst(DVIRec
, NewLocationOps
, NewExpr
);
6619 if (isa
<DbgValueInst
*>(DVIRec
.DbgRef
))
6620 LLVM_DEBUG(dbgs() << "scev-salvage: Updated DVI: "
6621 << *cast
<DbgValueInst
*>(DVIRec
.DbgRef
) << "\n");
6623 LLVM_DEBUG(dbgs() << "scev-salvage: Updated DVI: "
6624 << *cast
<DPValue
*>(DVIRec
.DbgRef
) << "\n");
6628 /// Obtain an expression for the iteration count, then attempt to salvage the
6629 /// dbg.value intrinsics.
6630 static void DbgRewriteSalvageableDVIs(
6631 llvm::Loop
*L
, ScalarEvolution
&SE
, llvm::PHINode
*LSRInductionVar
,
6632 SmallVector
<std::unique_ptr
<DVIRecoveryRec
>, 2> &DVIToUpdate
) {
6633 if (DVIToUpdate
.empty())
6636 const llvm::SCEV
*SCEVInductionVar
= SE
.getSCEV(LSRInductionVar
);
6637 assert(SCEVInductionVar
&&
6638 "Anticipated a SCEV for the post-LSR induction variable");
6640 if (const SCEVAddRecExpr
*IVAddRec
=
6641 dyn_cast
<SCEVAddRecExpr
>(SCEVInductionVar
)) {
6642 if (!IVAddRec
->isAffine())
6645 // Prevent translation using excessive resources.
6646 if (IVAddRec
->getExpressionSize() > MaxSCEVSalvageExpressionSize
)
6649 // The iteration count is required to recover location values.
6650 SCEVDbgValueBuilder IterCountExpr
;
6651 IterCountExpr
.pushLocation(LSRInductionVar
);
6652 if (!IterCountExpr
.SCEVToIterCountExpr(*IVAddRec
, SE
))
6655 LLVM_DEBUG(dbgs() << "scev-salvage: IV SCEV: " << *SCEVInductionVar
6658 for (auto &DVIRec
: DVIToUpdate
) {
6659 SalvageDVI(L
, SE
, LSRInductionVar
, *DVIRec
, SCEVInductionVar
,
6665 /// Identify and cache salvageable DVI locations and expressions along with the
6666 /// corresponding SCEV(s). Also ensure that the DVI is not deleted between
6667 /// cacheing and salvaging.
6668 static void DbgGatherSalvagableDVI(
6669 Loop
*L
, ScalarEvolution
&SE
,
6670 SmallVector
<std::unique_ptr
<DVIRecoveryRec
>, 2> &SalvageableDVISCEVs
,
6671 SmallSet
<AssertingVH
<DbgValueInst
>, 2> &DVIHandles
) {
6672 for (const auto &B
: L
->getBlocks()) {
6673 for (auto &I
: *B
) {
6674 auto ProcessDbgValue
= [&](auto *DbgVal
) -> bool {
6675 // Ensure that if any location op is undef that the dbg.vlue is not
6677 if (DbgVal
->isKillLocation())
6680 // Check that the location op SCEVs are suitable for translation to
6682 const auto &HasTranslatableLocationOps
=
6683 [&](const auto *DbgValToTranslate
) -> bool {
6684 for (const auto LocOp
: DbgValToTranslate
->location_ops()) {
6688 if (!SE
.isSCEVable(LocOp
->getType()))
6691 const SCEV
*S
= SE
.getSCEV(LocOp
);
6692 if (SE
.containsUndefs(S
))
6698 if (!HasTranslatableLocationOps(DbgVal
))
6701 std::unique_ptr
<DVIRecoveryRec
> NewRec
=
6702 std::make_unique
<DVIRecoveryRec
>(DbgVal
);
6703 // Each location Op may need a SCEVDbgValueBuilder in order to recover
6704 // it. Pre-allocating a vector will enable quick lookups of the builder
6705 // later during the salvage.
6706 NewRec
->RecoveryExprs
.resize(DbgVal
->getNumVariableLocationOps());
6707 for (const auto LocOp
: DbgVal
->location_ops()) {
6708 NewRec
->SCEVs
.push_back(SE
.getSCEV(LocOp
));
6709 NewRec
->LocationOps
.push_back(LocOp
);
6710 NewRec
->HadLocationArgList
= DbgVal
->hasArgList();
6712 SalvageableDVISCEVs
.push_back(std::move(NewRec
));
6715 for (auto &DPV
: I
.getDbgValueRange()) {
6716 if (DPV
.isDbgValue() || DPV
.isDbgAssign())
6717 ProcessDbgValue(&DPV
);
6719 auto DVI
= dyn_cast
<DbgValueInst
>(&I
);
6722 if (ProcessDbgValue(DVI
))
6723 DVIHandles
.insert(DVI
);
6728 /// Ideally pick the PHI IV inserted by ScalarEvolutionExpander. As a fallback
6729 /// any PHi from the loop header is usable, but may have less chance of
6730 /// surviving subsequent transforms.
6731 static llvm::PHINode
*GetInductionVariable(const Loop
&L
, ScalarEvolution
&SE
,
6732 const LSRInstance
&LSR
) {
6734 auto IsSuitableIV
= [&](PHINode
*P
) {
6735 if (!SE
.isSCEVable(P
->getType()))
6737 if (const SCEVAddRecExpr
*Rec
= dyn_cast
<SCEVAddRecExpr
>(SE
.getSCEV(P
)))
6738 return Rec
->isAffine() && !SE
.containsUndefs(SE
.getSCEV(P
));
6742 // For now, just pick the first IV that was generated and inserted by
6743 // ScalarEvolution. Ideally pick an IV that is unlikely to be optimised away
6744 // by subsequent transforms.
6745 for (const WeakVH
&IV
: LSR
.getScalarEvolutionIVs()) {
6749 // There should only be PHI node IVs.
6750 PHINode
*P
= cast
<PHINode
>(&*IV
);
6752 if (IsSuitableIV(P
))
6756 for (PHINode
&P
: L
.getHeader()->phis()) {
6757 if (IsSuitableIV(&P
))
6763 static std::optional
<std::tuple
<PHINode
*, PHINode
*, const SCEV
*, bool>>
6764 canFoldTermCondOfLoop(Loop
*L
, ScalarEvolution
&SE
, DominatorTree
&DT
,
6765 const LoopInfo
&LI
) {
6766 if (!L
->isInnermost()) {
6767 LLVM_DEBUG(dbgs() << "Cannot fold on non-innermost loop\n");
6768 return std::nullopt
;
6770 // Only inspect on simple loop structure
6771 if (!L
->isLoopSimplifyForm()) {
6772 LLVM_DEBUG(dbgs() << "Cannot fold on non-simple loop\n");
6773 return std::nullopt
;
6776 if (!SE
.hasLoopInvariantBackedgeTakenCount(L
)) {
6777 LLVM_DEBUG(dbgs() << "Cannot fold on backedge that is loop variant\n");
6778 return std::nullopt
;
6781 BasicBlock
*LoopLatch
= L
->getLoopLatch();
6782 BranchInst
*BI
= dyn_cast
<BranchInst
>(LoopLatch
->getTerminator());
6783 if (!BI
|| BI
->isUnconditional())
6784 return std::nullopt
;
6785 auto *TermCond
= dyn_cast
<ICmpInst
>(BI
->getCondition());
6788 dbgs() << "Cannot fold on branching condition that is not an ICmpInst");
6789 return std::nullopt
;
6791 if (!TermCond
->hasOneUse()) {
6794 << "Cannot replace terminating condition with more than one use\n");
6795 return std::nullopt
;
6798 BinaryOperator
*LHS
= dyn_cast
<BinaryOperator
>(TermCond
->getOperand(0));
6799 Value
*RHS
= TermCond
->getOperand(1);
6800 if (!LHS
|| !L
->isLoopInvariant(RHS
))
6801 // We could pattern match the inverse form of the icmp, but that is
6802 // non-canonical, and this pass is running *very* late in the pipeline.
6803 return std::nullopt
;
6805 // Find the IV used by the current exit condition.
6807 Value
*ToFoldStart
, *ToFoldStep
;
6808 if (!matchSimpleRecurrence(LHS
, ToFold
, ToFoldStart
, ToFoldStep
))
6809 return std::nullopt
;
6811 // If that IV isn't dead after we rewrite the exit condition in terms of
6812 // another IV, there's no point in doing the transform.
6813 if (!isAlmostDeadIV(ToFold
, LoopLatch
, TermCond
))
6814 return std::nullopt
;
6816 const SCEV
*BECount
= SE
.getBackedgeTakenCount(L
);
6817 const DataLayout
&DL
= L
->getHeader()->getModule()->getDataLayout();
6818 SCEVExpander
Expander(SE
, DL
, "lsr_fold_term_cond");
6820 PHINode
*ToHelpFold
= nullptr;
6821 const SCEV
*TermValueS
= nullptr;
6822 bool MustDropPoison
= false;
6823 for (PHINode
&PN
: L
->getHeader()->phis()) {
6827 if (!SE
.isSCEVable(PN
.getType())) {
6828 LLVM_DEBUG(dbgs() << "IV of phi '" << PN
6829 << "' is not SCEV-able, not qualified for the "
6830 "terminating condition folding.\n");
6833 const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(SE
.getSCEV(&PN
));
6834 // Only speculate on affine AddRec
6835 if (!AddRec
|| !AddRec
->isAffine()) {
6836 LLVM_DEBUG(dbgs() << "SCEV of phi '" << PN
6837 << "' is not an affine add recursion, not qualified "
6838 "for the terminating condition folding.\n");
6842 // Check that we can compute the value of AddRec on the exiting iteration
6843 // without soundness problems. evaluateAtIteration internally needs
6844 // to multiply the stride of the iteration number - which may wrap around.
6845 // The issue here is subtle because computing the result accounting for
6846 // wrap is insufficient. In order to use the result in an exit test, we
6847 // must also know that AddRec doesn't take the same value on any previous
6848 // iteration. The simplest case to consider is a candidate IV which is
6849 // narrower than the trip count (and thus original IV), but this can
6850 // also happen due to non-unit strides on the candidate IVs.
6851 if (!AddRec
->hasNoSelfWrap() ||
6852 !SE
.isKnownNonZero(AddRec
->getStepRecurrence(SE
)))
6855 const SCEVAddRecExpr
*PostInc
= AddRec
->getPostIncExpr(SE
);
6856 const SCEV
*TermValueSLocal
= PostInc
->evaluateAtIteration(BECount
, SE
);
6857 if (!Expander
.isSafeToExpand(TermValueSLocal
)) {
6859 dbgs() << "Is not safe to expand terminating value for phi node" << PN
6864 // The candidate IV may have been otherwise dead and poison from the
6865 // very first iteration. If we can't disprove that, we can't use the IV.
6866 if (!mustExecuteUBIfPoisonOnPathTo(&PN
, LoopLatch
->getTerminator(), &DT
)) {
6867 LLVM_DEBUG(dbgs() << "Can not prove poison safety for IV "
6872 // The candidate IV may become poison on the last iteration. If this
6873 // value is not branched on, this is a well defined program. We're
6874 // about to add a new use to this IV, and we have to ensure we don't
6875 // insert UB which didn't previously exist.
6876 bool MustDropPoisonLocal
= false;
6877 Instruction
*PostIncV
=
6878 cast
<Instruction
>(PN
.getIncomingValueForBlock(LoopLatch
));
6879 if (!mustExecuteUBIfPoisonOnPathTo(PostIncV
, LoopLatch
->getTerminator(),
6881 LLVM_DEBUG(dbgs() << "Can not prove poison safety to insert use"
6884 // If this is a complex recurrance with multiple instructions computing
6885 // the backedge value, we might need to strip poison flags from all of
6887 if (PostIncV
->getOperand(0) != &PN
)
6890 // In order to perform the transform, we need to drop the poison generating
6891 // flags on this instruction (if any).
6892 MustDropPoisonLocal
= PostIncV
->hasPoisonGeneratingFlags();
6895 // We pick the last legal alternate IV. We could expore choosing an optimal
6896 // alternate IV if we had a decent heuristic to do so.
6898 TermValueS
= TermValueSLocal
;
6899 MustDropPoison
= MustDropPoisonLocal
;
6902 LLVM_DEBUG(if (ToFold
&& !ToHelpFold
) dbgs()
6903 << "Cannot find other AddRec IV to help folding\n";);
6905 LLVM_DEBUG(if (ToFold
&& ToHelpFold
) dbgs()
6906 << "\nFound loop that can fold terminating condition\n"
6907 << " BECount (SCEV): " << *SE
.getBackedgeTakenCount(L
) << "\n"
6908 << " TermCond: " << *TermCond
<< "\n"
6909 << " BrandInst: " << *BI
<< "\n"
6910 << " ToFold: " << *ToFold
<< "\n"
6911 << " ToHelpFold: " << *ToHelpFold
<< "\n");
6913 if (!ToFold
|| !ToHelpFold
)
6914 return std::nullopt
;
6915 return std::make_tuple(ToFold
, ToHelpFold
, TermValueS
, MustDropPoison
);
6918 static bool ReduceLoopStrength(Loop
*L
, IVUsers
&IU
, ScalarEvolution
&SE
,
6919 DominatorTree
&DT
, LoopInfo
&LI
,
6920 const TargetTransformInfo
&TTI
,
6921 AssumptionCache
&AC
, TargetLibraryInfo
&TLI
,
6924 // Debug preservation - before we start removing anything identify which DVI
6925 // meet the salvageable criteria and store their DIExpression and SCEVs.
6926 SmallVector
<std::unique_ptr
<DVIRecoveryRec
>, 2> SalvageableDVIRecords
;
6927 SmallSet
<AssertingVH
<DbgValueInst
>, 2> DVIHandles
;
6928 DbgGatherSalvagableDVI(L
, SE
, SalvageableDVIRecords
, DVIHandles
);
6930 bool Changed
= false;
6931 std::unique_ptr
<MemorySSAUpdater
> MSSAU
;
6933 MSSAU
= std::make_unique
<MemorySSAUpdater
>(MSSA
);
6935 // Run the main LSR transformation.
6936 const LSRInstance
&Reducer
=
6937 LSRInstance(L
, IU
, SE
, DT
, LI
, TTI
, AC
, TLI
, MSSAU
.get());
6938 Changed
|= Reducer
.getChanged();
6940 // Remove any extra phis created by processing inner loops.
6941 Changed
|= DeleteDeadPHIs(L
->getHeader(), &TLI
, MSSAU
.get());
6942 if (EnablePhiElim
&& L
->isLoopSimplifyForm()) {
6943 SmallVector
<WeakTrackingVH
, 16> DeadInsts
;
6944 const DataLayout
&DL
= L
->getHeader()->getModule()->getDataLayout();
6945 SCEVExpander
Rewriter(SE
, DL
, "lsr", false);
6947 Rewriter
.setDebugType(DEBUG_TYPE
);
6949 unsigned numFolded
= Rewriter
.replaceCongruentIVs(L
, &DT
, DeadInsts
, &TTI
);
6952 RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts
, &TLI
,
6954 DeleteDeadPHIs(L
->getHeader(), &TLI
, MSSAU
.get());
6957 // LSR may at times remove all uses of an induction variable from a loop.
6958 // The only remaining use is the PHI in the exit block.
6959 // When this is the case, if the exit value of the IV can be calculated using
6960 // SCEV, we can replace the exit block PHI with the final value of the IV and
6961 // skip the updates in each loop iteration.
6962 if (L
->isRecursivelyLCSSAForm(DT
, LI
) && L
->getExitBlock()) {
6963 SmallVector
<WeakTrackingVH
, 16> DeadInsts
;
6964 const DataLayout
&DL
= L
->getHeader()->getModule()->getDataLayout();
6965 SCEVExpander
Rewriter(SE
, DL
, "lsr", true);
6966 int Rewrites
= rewriteLoopExitValues(L
, &LI
, &TLI
, &SE
, &TTI
, Rewriter
, &DT
,
6967 UnusedIndVarInLoop
, DeadInsts
);
6970 RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts
, &TLI
,
6972 DeleteDeadPHIs(L
->getHeader(), &TLI
, MSSAU
.get());
6976 const bool EnableFormTerm
= [&] {
6977 switch (AllowTerminatingConditionFoldingAfterLSR
) {
6983 return TTI
.shouldFoldTerminatingConditionAfterLSR();
6985 llvm_unreachable("Unhandled cl::boolOrDefault enum");
6988 if (EnableFormTerm
) {
6989 if (auto Opt
= canFoldTermCondOfLoop(L
, SE
, DT
, LI
)) {
6990 auto [ToFold
, ToHelpFold
, TermValueS
, MustDrop
] = *Opt
;
6995 BasicBlock
*LoopPreheader
= L
->getLoopPreheader();
6996 BasicBlock
*LoopLatch
= L
->getLoopLatch();
6999 LLVM_DEBUG(dbgs() << "To fold phi-node:\n"
7001 << "New term-cond phi-node:\n"
7002 << *ToHelpFold
<< "\n");
7004 Value
*StartValue
= ToHelpFold
->getIncomingValueForBlock(LoopPreheader
);
7006 Value
*LoopValue
= ToHelpFold
->getIncomingValueForBlock(LoopLatch
);
7008 // See comment in canFoldTermCondOfLoop on why this is sufficient.
7010 cast
<Instruction
>(LoopValue
)->dropPoisonGeneratingFlags();
7012 // SCEVExpander for both use in preheader and latch
7013 const DataLayout
&DL
= L
->getHeader()->getModule()->getDataLayout();
7014 SCEVExpander
Expander(SE
, DL
, "lsr_fold_term_cond");
7015 SCEVExpanderCleaner
ExpCleaner(Expander
);
7017 assert(Expander
.isSafeToExpand(TermValueS
) &&
7018 "Terminating value was checked safe in canFoldTerminatingCondition");
7020 // Create new terminating value at loop preheader
7021 Value
*TermValue
= Expander
.expandCodeFor(TermValueS
, ToHelpFold
->getType(),
7022 LoopPreheader
->getTerminator());
7024 LLVM_DEBUG(dbgs() << "Start value of new term-cond phi-node:\n"
7025 << *StartValue
<< "\n"
7026 << "Terminating value of new term-cond phi-node:\n"
7027 << *TermValue
<< "\n");
7029 // Create new terminating condition at loop latch
7030 BranchInst
*BI
= cast
<BranchInst
>(LoopLatch
->getTerminator());
7031 ICmpInst
*OldTermCond
= cast
<ICmpInst
>(BI
->getCondition());
7032 IRBuilder
<> LatchBuilder(LoopLatch
->getTerminator());
7033 Value
*NewTermCond
=
7034 LatchBuilder
.CreateICmp(CmpInst::ICMP_EQ
, LoopValue
, TermValue
,
7035 "lsr_fold_term_cond.replaced_term_cond");
7036 // Swap successors to exit loop body if IV equals to new TermValue
7037 if (BI
->getSuccessor(0) == L
->getHeader())
7038 BI
->swapSuccessors();
7040 LLVM_DEBUG(dbgs() << "Old term-cond:\n"
7041 << *OldTermCond
<< "\n"
7042 << "New term-cond:\n" << *NewTermCond
<< "\n");
7044 BI
->setCondition(NewTermCond
);
7046 OldTermCond
->eraseFromParent();
7047 DeleteDeadPHIs(L
->getHeader(), &TLI
, MSSAU
.get());
7049 ExpCleaner
.markResultUsed();
7053 if (SalvageableDVIRecords
.empty())
7056 // Obtain relevant IVs and attempt to rewrite the salvageable DVIs with
7057 // expressions composed using the derived iteration count.
7058 // TODO: Allow for multiple IV references for nested AddRecSCEVs
7059 for (const auto &L
: LI
) {
7060 if (llvm::PHINode
*IV
= GetInductionVariable(*L
, SE
, Reducer
))
7061 DbgRewriteSalvageableDVIs(L
, SE
, IV
, SalvageableDVIRecords
);
7063 LLVM_DEBUG(dbgs() << "scev-salvage: SCEV salvaging not possible. An IV "
7064 "could not be identified.\n");
7068 for (auto &Rec
: SalvageableDVIRecords
)
7070 SalvageableDVIRecords
.clear();
7075 bool LoopStrengthReduce::runOnLoop(Loop
*L
, LPPassManager
& /*LPM*/) {
7079 auto &IU
= getAnalysis
<IVUsersWrapperPass
>().getIU();
7080 auto &SE
= getAnalysis
<ScalarEvolutionWrapperPass
>().getSE();
7081 auto &DT
= getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
7082 auto &LI
= getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
7083 const auto &TTI
= getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(
7084 *L
->getHeader()->getParent());
7085 auto &AC
= getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(
7086 *L
->getHeader()->getParent());
7087 auto &TLI
= getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(
7088 *L
->getHeader()->getParent());
7089 auto *MSSAAnalysis
= getAnalysisIfAvailable
<MemorySSAWrapperPass
>();
7090 MemorySSA
*MSSA
= nullptr;
7092 MSSA
= &MSSAAnalysis
->getMSSA();
7093 return ReduceLoopStrength(L
, IU
, SE
, DT
, LI
, TTI
, AC
, TLI
, MSSA
);
7096 PreservedAnalyses
LoopStrengthReducePass::run(Loop
&L
, LoopAnalysisManager
&AM
,
7097 LoopStandardAnalysisResults
&AR
,
7099 if (!ReduceLoopStrength(&L
, AM
.getResult
<IVUsersAnalysis
>(L
, AR
), AR
.SE
,
7100 AR
.DT
, AR
.LI
, AR
.TTI
, AR
.AC
, AR
.TLI
, AR
.MSSA
))
7101 return PreservedAnalyses::all();
7103 auto PA
= getLoopPassPreservedAnalyses();
7105 PA
.preserve
<MemorySSAAnalysis
>();
7109 char LoopStrengthReduce::ID
= 0;
7111 INITIALIZE_PASS_BEGIN(LoopStrengthReduce
, "loop-reduce",
7112 "Loop Strength Reduction", false, false)
7113 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass
)
7114 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
7115 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass
)
7116 INITIALIZE_PASS_DEPENDENCY(IVUsersWrapperPass
)
7117 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
)
7118 INITIALIZE_PASS_DEPENDENCY(LoopSimplify
)
7119 INITIALIZE_PASS_END(LoopStrengthReduce
, "loop-reduce",
7120 "Loop Strength Reduction", false, false)
7122 Pass
*llvm::createLoopStrengthReducePass() { return new LoopStrengthReduce(); }