1 //===-- LoopPredication.cpp - Guard based loop predication pass -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // The LoopPredication pass tries to convert loop variant range checks to loop
10 // invariant by widening checks across loop iterations. For example, it will
13 // for (i = 0; i < n; i++) {
20 // for (i = 0; i < n; i++) {
21 // guard(n - 1 < len);
25 // After this transformation the condition of the guard is loop invariant, so
26 // loop-unswitch can later unswitch the loop by this condition which basically
27 // predicates the loop by the widened condition:
30 // for (i = 0; i < n; i++) {
36 // It's tempting to rely on SCEV here, but it has proven to be problematic.
37 // Generally the facts SCEV provides about the increment step of add
38 // recurrences are true if the backedge of the loop is taken, which implicitly
39 // assumes that the guard doesn't fail. Using these facts to optimize the
40 // guard results in a circular logic where the guard is optimized under the
41 // assumption that it never fails.
43 // For example, in the loop below the induction variable will be marked as nuw
44 // basing on the guard. Basing on nuw the guard predicate will be considered
45 // monotonic. Given a monotonic condition it's tempting to replace the induction
46 // variable in the condition with its value on the last iteration. But this
47 // transformation is not correct, e.g. e = 4, b = 5 breaks the loop.
49 // for (int i = b; i != e; i++)
52 // One of the ways to reason about this problem is to use an inductive proof
53 // approach. Given the loop:
63 // where B(x) and G(x) are predicates that map integers to booleans, we want a
64 // loop invariant expression M such the following program has the same semantics
75 // One solution for M is M = forall X . (G(X) && B(X)) => G(X + Step)
77 // Informal proof that the transformation above is correct:
79 // By the definition of guards we can rewrite the guard condition to:
82 // Let's prove that for each iteration of the loop:
84 // And the condition above can be simplified to G(Start) && M.
89 // Induction step. Assuming G(0) && M => G(I) on the subsequent
92 // B(I) is true because it's the backedge condition.
93 // G(I) is true because the backedge is guarded by this condition.
95 // So M = forall X . (G(X) && B(X)) => G(X + Step) implies G(I + Step).
97 // Note that we can use anything stronger than M, i.e. any condition which
100 // When S = 1 (i.e. forward iterating loop), the transformation is supported
102 // * The loop has a single latch with the condition of the form:
103 // B(X) = latchStart + X <pred> latchLimit,
104 // where <pred> is u<, u<=, s<, or s<=.
105 // * The guard condition is of the form
106 // G(X) = guardStart + X u< guardLimit
108 // For the ult latch comparison case M is:
109 // forall X . guardStart + X u< guardLimit && latchStart + X <u latchLimit =>
110 // guardStart + X + 1 u< guardLimit
112 // The only way the antecedent can be true and the consequent can be false is
114 // X == guardLimit - 1 - guardStart
115 // (and guardLimit is non-zero, but we won't use this latter fact).
116 // If X == guardLimit - 1 - guardStart then the second half of the antecedent is
117 // latchStart + guardLimit - 1 - guardStart u< latchLimit
118 // and its negation is
119 // latchStart + guardLimit - 1 - guardStart u>= latchLimit
121 // In other words, if
122 // latchLimit u<= latchStart + guardLimit - 1 - guardStart
124 // (the ranges below are written in ConstantRange notation, where [A, B) is the
125 // set for (I = A; I != B; I++ /*maywrap*/) yield(I);)
127 // forall X . guardStart + X u< guardLimit &&
128 // latchStart + X u< latchLimit =>
129 // guardStart + X + 1 u< guardLimit
130 // == forall X . guardStart + X u< guardLimit &&
131 // latchStart + X u< latchStart + guardLimit - 1 - guardStart =>
132 // guardStart + X + 1 u< guardLimit
133 // == forall X . (guardStart + X) in [0, guardLimit) &&
134 // (latchStart + X) in [0, latchStart + guardLimit - 1 - guardStart) =>
135 // (guardStart + X + 1) in [0, guardLimit)
136 // == forall X . X in [-guardStart, guardLimit - guardStart) &&
137 // X in [-latchStart, guardLimit - 1 - guardStart) =>
138 // X in [-guardStart - 1, guardLimit - guardStart - 1)
141 // So the widened condition is:
142 // guardStart u< guardLimit &&
143 // latchStart + guardLimit - 1 - guardStart u>= latchLimit
144 // Similarly for ule condition the widened condition is:
145 // guardStart u< guardLimit &&
146 // latchStart + guardLimit - 1 - guardStart u> latchLimit
147 // For slt condition the widened condition is:
148 // guardStart u< guardLimit &&
149 // latchStart + guardLimit - 1 - guardStart s>= latchLimit
150 // For sle condition the widened condition is:
151 // guardStart u< guardLimit &&
152 // latchStart + guardLimit - 1 - guardStart s> latchLimit
154 // When S = -1 (i.e. reverse iterating loop), the transformation is supported
156 // * The loop has a single latch with the condition of the form:
157 // B(X) = X <pred> latchLimit, where <pred> is u>, u>=, s>, or s>=.
158 // * The guard condition is of the form
159 // G(X) = X - 1 u< guardLimit
161 // For the ugt latch comparison case M is:
162 // forall X. X-1 u< guardLimit and X u> latchLimit => X-2 u< guardLimit
164 // The only way the antecedent can be true and the consequent can be false is if
166 // If X == 1 then the second half of the antecedent is
167 // 1 u> latchLimit, and its negation is latchLimit u>= 1.
169 // So the widened condition is:
170 // guardStart u< guardLimit && latchLimit u>= 1.
171 // Similarly for sgt condition the widened condition is:
172 // guardStart u< guardLimit && latchLimit s>= 1.
173 // For uge condition the widened condition is:
174 // guardStart u< guardLimit && latchLimit u> 1.
175 // For sge condition the widened condition is:
176 // guardStart u< guardLimit && latchLimit s> 1.
177 //===----------------------------------------------------------------------===//
179 #include "llvm/Transforms/Scalar/LoopPredication.h"
180 #include "llvm/ADT/Statistic.h"
181 #include "llvm/Analysis/AliasAnalysis.h"
182 #include "llvm/Analysis/BranchProbabilityInfo.h"
183 #include "llvm/Analysis/GuardUtils.h"
184 #include "llvm/Analysis/LoopInfo.h"
185 #include "llvm/Analysis/LoopPass.h"
186 #include "llvm/Analysis/ScalarEvolution.h"
187 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
188 #include "llvm/IR/Function.h"
189 #include "llvm/IR/GlobalValue.h"
190 #include "llvm/IR/IntrinsicInst.h"
191 #include "llvm/IR/Module.h"
192 #include "llvm/IR/PatternMatch.h"
193 #include "llvm/InitializePasses.h"
194 #include "llvm/Pass.h"
195 #include "llvm/Support/CommandLine.h"
196 #include "llvm/Support/Debug.h"
197 #include "llvm/Transforms/Scalar.h"
198 #include "llvm/Transforms/Utils/GuardUtils.h"
199 #include "llvm/Transforms/Utils/Local.h"
200 #include "llvm/Transforms/Utils/LoopUtils.h"
201 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
203 #define DEBUG_TYPE "loop-predication"
205 STATISTIC(TotalConsidered
, "Number of guards considered");
206 STATISTIC(TotalWidened
, "Number of checks widened");
208 using namespace llvm
;
210 static cl::opt
<bool> EnableIVTruncation("loop-predication-enable-iv-truncation",
211 cl::Hidden
, cl::init(true));
213 static cl::opt
<bool> EnableCountDownLoop("loop-predication-enable-count-down-loop",
214 cl::Hidden
, cl::init(true));
217 SkipProfitabilityChecks("loop-predication-skip-profitability-checks",
218 cl::Hidden
, cl::init(false));
220 // This is the scale factor for the latch probability. We use this during
221 // profitability analysis to find other exiting blocks that have a much higher
222 // probability of exiting the loop instead of loop exiting via latch.
223 // This value should be greater than 1 for a sane profitability check.
224 static cl::opt
<float> LatchExitProbabilityScale(
225 "loop-predication-latch-probability-scale", cl::Hidden
, cl::init(2.0),
226 cl::desc("scale factor for the latch probability. Value should be greater "
227 "than 1. Lower values are ignored"));
229 static cl::opt
<bool> PredicateWidenableBranchGuards(
230 "loop-predication-predicate-widenable-branches-to-deopt", cl::Hidden
,
231 cl::desc("Whether or not we should predicate guards "
232 "expressed as widenable branches to deoptimize blocks"),
236 /// Represents an induction variable check:
237 /// icmp Pred, <induction variable>, <loop invariant limit>
239 ICmpInst::Predicate Pred
;
240 const SCEVAddRecExpr
*IV
;
242 LoopICmp(ICmpInst::Predicate Pred
, const SCEVAddRecExpr
*IV
,
244 : Pred(Pred
), IV(IV
), Limit(Limit
) {}
247 dbgs() << "LoopICmp Pred = " << Pred
<< ", IV = " << *IV
248 << ", Limit = " << *Limit
<< "\n";
252 class LoopPredication
{
257 BranchProbabilityInfo
*BPI
;
260 const DataLayout
*DL
;
261 BasicBlock
*Preheader
;
264 bool isSupportedStep(const SCEV
* Step
);
265 Optional
<LoopICmp
> parseLoopICmp(ICmpInst
*ICI
);
266 Optional
<LoopICmp
> parseLoopLatchICmp();
268 /// Return an insertion point suitable for inserting a safe to speculate
269 /// instruction whose only user will be 'User' which has operands 'Ops'. A
270 /// trivial result would be the at the User itself, but we try to return a
271 /// loop invariant location if possible.
272 Instruction
*findInsertPt(Instruction
*User
, ArrayRef
<Value
*> Ops
);
273 /// Same as above, *except* that this uses the SCEV definition of invariant
274 /// which is that an expression *can be made* invariant via SCEVExpander.
275 /// Thus, this version is only suitable for finding an insert point to be be
276 /// passed to SCEVExpander!
277 Instruction
*findInsertPt(Instruction
*User
, ArrayRef
<const SCEV
*> Ops
);
279 /// Return true if the value is known to produce a single fixed value across
280 /// all iterations on which it executes. Note that this does not imply
281 /// speculation safety. That must be established separately.
282 bool isLoopInvariantValue(const SCEV
* S
);
284 Value
*expandCheck(SCEVExpander
&Expander
, Instruction
*Guard
,
285 ICmpInst::Predicate Pred
, const SCEV
*LHS
,
288 Optional
<Value
*> widenICmpRangeCheck(ICmpInst
*ICI
, SCEVExpander
&Expander
,
290 Optional
<Value
*> widenICmpRangeCheckIncrementingLoop(LoopICmp LatchCheck
,
292 SCEVExpander
&Expander
,
294 Optional
<Value
*> widenICmpRangeCheckDecrementingLoop(LoopICmp LatchCheck
,
296 SCEVExpander
&Expander
,
298 unsigned collectChecks(SmallVectorImpl
<Value
*> &Checks
, Value
*Condition
,
299 SCEVExpander
&Expander
, Instruction
*Guard
);
300 bool widenGuardConditions(IntrinsicInst
*II
, SCEVExpander
&Expander
);
301 bool widenWidenableBranchGuardConditions(BranchInst
*Guard
, SCEVExpander
&Expander
);
302 // If the loop always exits through another block in the loop, we should not
303 // predicate based on the latch check. For example, the latch check can be a
304 // very coarse grained check and there can be more fine grained exit checks
305 // within the loop. We identify such unprofitable loops through BPI.
306 bool isLoopProfitableToPredicate();
308 bool predicateLoopExits(Loop
*L
, SCEVExpander
&Rewriter
);
311 LoopPredication(AliasAnalysis
*AA
, DominatorTree
*DT
,
312 ScalarEvolution
*SE
, LoopInfo
*LI
,
313 BranchProbabilityInfo
*BPI
)
314 : AA(AA
), DT(DT
), SE(SE
), LI(LI
), BPI(BPI
) {};
315 bool runOnLoop(Loop
*L
);
318 class LoopPredicationLegacyPass
: public LoopPass
{
321 LoopPredicationLegacyPass() : LoopPass(ID
) {
322 initializeLoopPredicationLegacyPassPass(*PassRegistry::getPassRegistry());
325 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
326 AU
.addRequired
<BranchProbabilityInfoWrapperPass
>();
327 getLoopAnalysisUsage(AU
);
330 bool runOnLoop(Loop
*L
, LPPassManager
&LPM
) override
{
333 auto *SE
= &getAnalysis
<ScalarEvolutionWrapperPass
>().getSE();
334 auto *LI
= &getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
335 auto *DT
= &getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
336 BranchProbabilityInfo
&BPI
=
337 getAnalysis
<BranchProbabilityInfoWrapperPass
>().getBPI();
338 auto *AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
339 LoopPredication
LP(AA
, DT
, SE
, LI
, &BPI
);
340 return LP
.runOnLoop(L
);
344 char LoopPredicationLegacyPass::ID
= 0;
347 INITIALIZE_PASS_BEGIN(LoopPredicationLegacyPass
, "loop-predication",
348 "Loop predication", false, false)
349 INITIALIZE_PASS_DEPENDENCY(BranchProbabilityInfoWrapperPass
)
350 INITIALIZE_PASS_DEPENDENCY(LoopPass
)
351 INITIALIZE_PASS_END(LoopPredicationLegacyPass
, "loop-predication",
352 "Loop predication", false, false)
354 Pass
*llvm::createLoopPredicationPass() {
355 return new LoopPredicationLegacyPass();
358 PreservedAnalyses
LoopPredicationPass::run(Loop
&L
, LoopAnalysisManager
&AM
,
359 LoopStandardAnalysisResults
&AR
,
361 Function
*F
= L
.getHeader()->getParent();
362 // For the new PM, we also can't use BranchProbabilityInfo as an analysis
363 // pass. Function analyses need to be preserved across loop transformations
364 // but BPI is not preserved, hence a newly built one is needed.
365 BranchProbabilityInfo
BPI(*F
, AR
.LI
, &AR
.TLI
, &AR
.DT
, nullptr);
366 LoopPredication
LP(&AR
.AA
, &AR
.DT
, &AR
.SE
, &AR
.LI
, &BPI
);
367 if (!LP
.runOnLoop(&L
))
368 return PreservedAnalyses::all();
370 return getLoopPassPreservedAnalyses();
374 LoopPredication::parseLoopICmp(ICmpInst
*ICI
) {
375 auto Pred
= ICI
->getPredicate();
376 auto *LHS
= ICI
->getOperand(0);
377 auto *RHS
= ICI
->getOperand(1);
379 const SCEV
*LHSS
= SE
->getSCEV(LHS
);
380 if (isa
<SCEVCouldNotCompute
>(LHSS
))
382 const SCEV
*RHSS
= SE
->getSCEV(RHS
);
383 if (isa
<SCEVCouldNotCompute
>(RHSS
))
386 // Canonicalize RHS to be loop invariant bound, LHS - a loop computable IV
387 if (SE
->isLoopInvariant(LHSS
, L
)) {
389 std::swap(LHSS
, RHSS
);
390 Pred
= ICmpInst::getSwappedPredicate(Pred
);
393 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(LHSS
);
394 if (!AR
|| AR
->getLoop() != L
)
397 return LoopICmp(Pred
, AR
, RHSS
);
400 Value
*LoopPredication::expandCheck(SCEVExpander
&Expander
,
402 ICmpInst::Predicate Pred
, const SCEV
*LHS
,
404 Type
*Ty
= LHS
->getType();
405 assert(Ty
== RHS
->getType() && "expandCheck operands have different types?");
407 if (SE
->isLoopInvariant(LHS
, L
) && SE
->isLoopInvariant(RHS
, L
)) {
408 IRBuilder
<> Builder(Guard
);
409 if (SE
->isLoopEntryGuardedByCond(L
, Pred
, LHS
, RHS
))
410 return Builder
.getTrue();
411 if (SE
->isLoopEntryGuardedByCond(L
, ICmpInst::getInversePredicate(Pred
),
413 return Builder
.getFalse();
416 Value
*LHSV
= Expander
.expandCodeFor(LHS
, Ty
, findInsertPt(Guard
, {LHS
}));
417 Value
*RHSV
= Expander
.expandCodeFor(RHS
, Ty
, findInsertPt(Guard
, {RHS
}));
418 IRBuilder
<> Builder(findInsertPt(Guard
, {LHSV
, RHSV
}));
419 return Builder
.CreateICmp(Pred
, LHSV
, RHSV
);
423 // Returns true if its safe to truncate the IV to RangeCheckType.
424 // When the IV type is wider than the range operand type, we can still do loop
425 // predication, by generating SCEVs for the range and latch that are of the
426 // same type. We achieve this by generating a SCEV truncate expression for the
427 // latch IV. This is done iff truncation of the IV is a safe operation,
428 // without loss of information.
429 // Another way to achieve this is by generating a wider type SCEV for the
430 // range check operand, however, this needs a more involved check that
431 // operands do not overflow. This can lead to loss of information when the
432 // range operand is of the form: add i32 %offset, %iv. We need to prove that
433 // sext(x + y) is same as sext(x) + sext(y).
434 // This function returns true if we can safely represent the IV type in
435 // the RangeCheckType without loss of information.
436 static bool isSafeToTruncateWideIVType(const DataLayout
&DL
,
438 const LoopICmp LatchCheck
,
439 Type
*RangeCheckType
) {
440 if (!EnableIVTruncation
)
442 assert(DL
.getTypeSizeInBits(LatchCheck
.IV
->getType()).getFixedSize() >
443 DL
.getTypeSizeInBits(RangeCheckType
).getFixedSize() &&
444 "Expected latch check IV type to be larger than range check operand "
446 // The start and end values of the IV should be known. This is to guarantee
447 // that truncating the wide type will not lose information.
448 auto *Limit
= dyn_cast
<SCEVConstant
>(LatchCheck
.Limit
);
449 auto *Start
= dyn_cast
<SCEVConstant
>(LatchCheck
.IV
->getStart());
450 if (!Limit
|| !Start
)
452 // This check makes sure that the IV does not change sign during loop
453 // iterations. Consider latchType = i64, LatchStart = 5, Pred = ICMP_SGE,
454 // LatchEnd = 2, rangeCheckType = i32. If it's not a monotonic predicate, the
455 // IV wraps around, and the truncation of the IV would lose the range of
456 // iterations between 2^32 and 2^64.
457 if (!SE
.getMonotonicPredicateType(LatchCheck
.IV
, LatchCheck
.Pred
))
459 // The active bits should be less than the bits in the RangeCheckType. This
460 // guarantees that truncating the latch check to RangeCheckType is a safe
462 auto RangeCheckTypeBitSize
=
463 DL
.getTypeSizeInBits(RangeCheckType
).getFixedSize();
464 return Start
->getAPInt().getActiveBits() < RangeCheckTypeBitSize
&&
465 Limit
->getAPInt().getActiveBits() < RangeCheckTypeBitSize
;
469 // Return an LoopICmp describing a latch check equivlent to LatchCheck but with
470 // the requested type if safe to do so. May involve the use of a new IV.
471 static Optional
<LoopICmp
> generateLoopLatchCheck(const DataLayout
&DL
,
473 const LoopICmp LatchCheck
,
474 Type
*RangeCheckType
) {
476 auto *LatchType
= LatchCheck
.IV
->getType();
477 if (RangeCheckType
== LatchType
)
479 // For now, bail out if latch type is narrower than range type.
480 if (DL
.getTypeSizeInBits(LatchType
).getFixedSize() <
481 DL
.getTypeSizeInBits(RangeCheckType
).getFixedSize())
483 if (!isSafeToTruncateWideIVType(DL
, SE
, LatchCheck
, RangeCheckType
))
485 // We can now safely identify the truncated version of the IV and limit for
487 LoopICmp NewLatchCheck
;
488 NewLatchCheck
.Pred
= LatchCheck
.Pred
;
489 NewLatchCheck
.IV
= dyn_cast
<SCEVAddRecExpr
>(
490 SE
.getTruncateExpr(LatchCheck
.IV
, RangeCheckType
));
491 if (!NewLatchCheck
.IV
)
493 NewLatchCheck
.Limit
= SE
.getTruncateExpr(LatchCheck
.Limit
, RangeCheckType
);
494 LLVM_DEBUG(dbgs() << "IV of type: " << *LatchType
495 << "can be represented as range check type:"
496 << *RangeCheckType
<< "\n");
497 LLVM_DEBUG(dbgs() << "LatchCheck.IV: " << *NewLatchCheck
.IV
<< "\n");
498 LLVM_DEBUG(dbgs() << "LatchCheck.Limit: " << *NewLatchCheck
.Limit
<< "\n");
499 return NewLatchCheck
;
502 bool LoopPredication::isSupportedStep(const SCEV
* Step
) {
503 return Step
->isOne() || (Step
->isAllOnesValue() && EnableCountDownLoop
);
506 Instruction
*LoopPredication::findInsertPt(Instruction
*Use
,
507 ArrayRef
<Value
*> Ops
) {
508 for (Value
*Op
: Ops
)
509 if (!L
->isLoopInvariant(Op
))
511 return Preheader
->getTerminator();
514 Instruction
*LoopPredication::findInsertPt(Instruction
*Use
,
515 ArrayRef
<const SCEV
*> Ops
) {
516 // Subtlety: SCEV considers things to be invariant if the value produced is
517 // the same across iterations. This is not the same as being able to
518 // evaluate outside the loop, which is what we actually need here.
519 for (const SCEV
*Op
: Ops
)
520 if (!SE
->isLoopInvariant(Op
, L
) ||
521 !isSafeToExpandAt(Op
, Preheader
->getTerminator(), *SE
))
523 return Preheader
->getTerminator();
526 bool LoopPredication::isLoopInvariantValue(const SCEV
* S
) {
527 // Handling expressions which produce invariant results, but *haven't* yet
528 // been removed from the loop serves two important purposes.
529 // 1) Most importantly, it resolves a pass ordering cycle which would
530 // otherwise need us to iteration licm, loop-predication, and either
531 // loop-unswitch or loop-peeling to make progress on examples with lots of
532 // predicable range checks in a row. (Since, in the general case, we can't
533 // hoist the length checks until the dominating checks have been discharged
534 // as we can't prove doing so is safe.)
535 // 2) As a nice side effect, this exposes the value of peeling or unswitching
536 // much more obviously in the IR. Otherwise, the cost modeling for other
537 // transforms would end up needing to duplicate all of this logic to model a
538 // check which becomes predictable based on a modeled peel or unswitch.
540 // The cost of doing so in the worst case is an extra fill from the stack in
541 // the loop to materialize the loop invariant test value instead of checking
542 // against the original IV which is presumable in a register inside the loop.
543 // Such cases are presumably rare, and hint at missing oppurtunities for
546 if (SE
->isLoopInvariant(S
, L
))
547 // Note: This the SCEV variant, so the original Value* may be within the
548 // loop even though SCEV has proven it is loop invariant.
551 // Handle a particular important case which SCEV doesn't yet know about which
552 // shows up in range checks on arrays with immutable lengths.
553 // TODO: This should be sunk inside SCEV.
554 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
))
555 if (const auto *LI
= dyn_cast
<LoadInst
>(U
->getValue()))
556 if (LI
->isUnordered() && L
->hasLoopInvariantOperands(LI
))
557 if (AA
->pointsToConstantMemory(LI
->getOperand(0)) ||
558 LI
->hasMetadata(LLVMContext::MD_invariant_load
))
563 Optional
<Value
*> LoopPredication::widenICmpRangeCheckIncrementingLoop(
564 LoopICmp LatchCheck
, LoopICmp RangeCheck
,
565 SCEVExpander
&Expander
, Instruction
*Guard
) {
566 auto *Ty
= RangeCheck
.IV
->getType();
567 // Generate the widened condition for the forward loop:
568 // guardStart u< guardLimit &&
569 // latchLimit <pred> guardLimit - 1 - guardStart + latchStart
570 // where <pred> depends on the latch condition predicate. See the file
571 // header comment for the reasoning.
572 // guardLimit - guardStart + latchStart - 1
573 const SCEV
*GuardStart
= RangeCheck
.IV
->getStart();
574 const SCEV
*GuardLimit
= RangeCheck
.Limit
;
575 const SCEV
*LatchStart
= LatchCheck
.IV
->getStart();
576 const SCEV
*LatchLimit
= LatchCheck
.Limit
;
577 // Subtlety: We need all the values to be *invariant* across all iterations,
578 // but we only need to check expansion safety for those which *aren't*
579 // already guaranteed to dominate the guard.
580 if (!isLoopInvariantValue(GuardStart
) ||
581 !isLoopInvariantValue(GuardLimit
) ||
582 !isLoopInvariantValue(LatchStart
) ||
583 !isLoopInvariantValue(LatchLimit
)) {
584 LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
587 if (!isSafeToExpandAt(LatchStart
, Guard
, *SE
) ||
588 !isSafeToExpandAt(LatchLimit
, Guard
, *SE
)) {
589 LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
593 // guardLimit - guardStart + latchStart - 1
595 SE
->getAddExpr(SE
->getMinusSCEV(GuardLimit
, GuardStart
),
596 SE
->getMinusSCEV(LatchStart
, SE
->getOne(Ty
)));
597 auto LimitCheckPred
=
598 ICmpInst::getFlippedStrictnessPredicate(LatchCheck
.Pred
);
600 LLVM_DEBUG(dbgs() << "LHS: " << *LatchLimit
<< "\n");
601 LLVM_DEBUG(dbgs() << "RHS: " << *RHS
<< "\n");
602 LLVM_DEBUG(dbgs() << "Pred: " << LimitCheckPred
<< "\n");
605 expandCheck(Expander
, Guard
, LimitCheckPred
, LatchLimit
, RHS
);
606 auto *FirstIterationCheck
= expandCheck(Expander
, Guard
, RangeCheck
.Pred
,
607 GuardStart
, GuardLimit
);
608 IRBuilder
<> Builder(findInsertPt(Guard
, {FirstIterationCheck
, LimitCheck
}));
609 return Builder
.CreateAnd(FirstIterationCheck
, LimitCheck
);
612 Optional
<Value
*> LoopPredication::widenICmpRangeCheckDecrementingLoop(
613 LoopICmp LatchCheck
, LoopICmp RangeCheck
,
614 SCEVExpander
&Expander
, Instruction
*Guard
) {
615 auto *Ty
= RangeCheck
.IV
->getType();
616 const SCEV
*GuardStart
= RangeCheck
.IV
->getStart();
617 const SCEV
*GuardLimit
= RangeCheck
.Limit
;
618 const SCEV
*LatchStart
= LatchCheck
.IV
->getStart();
619 const SCEV
*LatchLimit
= LatchCheck
.Limit
;
620 // Subtlety: We need all the values to be *invariant* across all iterations,
621 // but we only need to check expansion safety for those which *aren't*
622 // already guaranteed to dominate the guard.
623 if (!isLoopInvariantValue(GuardStart
) ||
624 !isLoopInvariantValue(GuardLimit
) ||
625 !isLoopInvariantValue(LatchStart
) ||
626 !isLoopInvariantValue(LatchLimit
)) {
627 LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
630 if (!isSafeToExpandAt(LatchStart
, Guard
, *SE
) ||
631 !isSafeToExpandAt(LatchLimit
, Guard
, *SE
)) {
632 LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
635 // The decrement of the latch check IV should be the same as the
637 auto *PostDecLatchCheckIV
= LatchCheck
.IV
->getPostIncExpr(*SE
);
638 if (RangeCheck
.IV
!= PostDecLatchCheckIV
) {
639 LLVM_DEBUG(dbgs() << "Not the same. PostDecLatchCheckIV: "
640 << *PostDecLatchCheckIV
641 << " and RangeCheckIV: " << *RangeCheck
.IV
<< "\n");
645 // Generate the widened condition for CountDownLoop:
646 // guardStart u< guardLimit &&
647 // latchLimit <pred> 1.
648 // See the header comment for reasoning of the checks.
649 auto LimitCheckPred
=
650 ICmpInst::getFlippedStrictnessPredicate(LatchCheck
.Pred
);
651 auto *FirstIterationCheck
= expandCheck(Expander
, Guard
,
653 GuardStart
, GuardLimit
);
654 auto *LimitCheck
= expandCheck(Expander
, Guard
, LimitCheckPred
, LatchLimit
,
656 IRBuilder
<> Builder(findInsertPt(Guard
, {FirstIterationCheck
, LimitCheck
}));
657 return Builder
.CreateAnd(FirstIterationCheck
, LimitCheck
);
660 static void normalizePredicate(ScalarEvolution
*SE
, Loop
*L
,
662 // LFTR canonicalizes checks to the ICMP_NE/EQ form; normalize back to the
663 // ULT/UGE form for ease of handling by our caller.
664 if (ICmpInst::isEquality(RC
.Pred
) &&
665 RC
.IV
->getStepRecurrence(*SE
)->isOne() &&
666 SE
->isKnownPredicate(ICmpInst::ICMP_ULE
, RC
.IV
->getStart(), RC
.Limit
))
667 RC
.Pred
= RC
.Pred
== ICmpInst::ICMP_NE
?
668 ICmpInst::ICMP_ULT
: ICmpInst::ICMP_UGE
;
672 /// If ICI can be widened to a loop invariant condition emits the loop
673 /// invariant condition in the loop preheader and return it, otherwise
675 Optional
<Value
*> LoopPredication::widenICmpRangeCheck(ICmpInst
*ICI
,
676 SCEVExpander
&Expander
,
677 Instruction
*Guard
) {
678 LLVM_DEBUG(dbgs() << "Analyzing ICmpInst condition:\n");
679 LLVM_DEBUG(ICI
->dump());
681 // parseLoopStructure guarantees that the latch condition is:
682 // ++i <pred> latchLimit, where <pred> is u<, u<=, s<, or s<=.
683 // We are looking for the range checks of the form:
685 auto RangeCheck
= parseLoopICmp(ICI
);
687 LLVM_DEBUG(dbgs() << "Failed to parse the loop latch condition!\n");
690 LLVM_DEBUG(dbgs() << "Guard check:\n");
691 LLVM_DEBUG(RangeCheck
->dump());
692 if (RangeCheck
->Pred
!= ICmpInst::ICMP_ULT
) {
693 LLVM_DEBUG(dbgs() << "Unsupported range check predicate("
694 << RangeCheck
->Pred
<< ")!\n");
697 auto *RangeCheckIV
= RangeCheck
->IV
;
698 if (!RangeCheckIV
->isAffine()) {
699 LLVM_DEBUG(dbgs() << "Range check IV is not affine!\n");
702 auto *Step
= RangeCheckIV
->getStepRecurrence(*SE
);
703 // We cannot just compare with latch IV step because the latch and range IVs
704 // may have different types.
705 if (!isSupportedStep(Step
)) {
706 LLVM_DEBUG(dbgs() << "Range check and latch have IVs different steps!\n");
709 auto *Ty
= RangeCheckIV
->getType();
710 auto CurrLatchCheckOpt
= generateLoopLatchCheck(*DL
, *SE
, LatchCheck
, Ty
);
711 if (!CurrLatchCheckOpt
) {
712 LLVM_DEBUG(dbgs() << "Failed to generate a loop latch check "
713 "corresponding to range type: "
718 LoopICmp CurrLatchCheck
= *CurrLatchCheckOpt
;
719 // At this point, the range and latch step should have the same type, but need
720 // not have the same value (we support both 1 and -1 steps).
721 assert(Step
->getType() ==
722 CurrLatchCheck
.IV
->getStepRecurrence(*SE
)->getType() &&
723 "Range and latch steps should be of same type!");
724 if (Step
!= CurrLatchCheck
.IV
->getStepRecurrence(*SE
)) {
725 LLVM_DEBUG(dbgs() << "Range and latch have different step values!\n");
730 return widenICmpRangeCheckIncrementingLoop(CurrLatchCheck
, *RangeCheck
,
733 assert(Step
->isAllOnesValue() && "Step should be -1!");
734 return widenICmpRangeCheckDecrementingLoop(CurrLatchCheck
, *RangeCheck
,
739 unsigned LoopPredication::collectChecks(SmallVectorImpl
<Value
*> &Checks
,
741 SCEVExpander
&Expander
,
742 Instruction
*Guard
) {
743 unsigned NumWidened
= 0;
744 // The guard condition is expected to be in form of:
745 // cond1 && cond2 && cond3 ...
746 // Iterate over subconditions looking for icmp conditions which can be
747 // widened across loop iterations. Widening these conditions remember the
748 // resulting list of subconditions in Checks vector.
749 SmallVector
<Value
*, 4> Worklist(1, Condition
);
750 SmallPtrSet
<Value
*, 4> Visited
;
751 Value
*WideableCond
= nullptr;
753 Value
*Condition
= Worklist
.pop_back_val();
754 if (!Visited
.insert(Condition
).second
)
758 using namespace llvm::PatternMatch
;
759 if (match(Condition
, m_And(m_Value(LHS
), m_Value(RHS
)))) {
760 Worklist
.push_back(LHS
);
761 Worklist
.push_back(RHS
);
766 m_Intrinsic
<Intrinsic::experimental_widenable_condition
>())) {
767 // Pick any, we don't care which
768 WideableCond
= Condition
;
772 if (ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(Condition
)) {
773 if (auto NewRangeCheck
= widenICmpRangeCheck(ICI
, Expander
,
775 Checks
.push_back(NewRangeCheck
.getValue());
781 // Save the condition as is if we can't widen it
782 Checks
.push_back(Condition
);
783 } while (!Worklist
.empty());
784 // At the moment, our matching logic for wideable conditions implicitly
785 // assumes we preserve the form: (br (and Cond, WC())). FIXME
786 // Note that if there were multiple calls to wideable condition in the
787 // traversal, we only need to keep one, and which one is arbitrary.
789 Checks
.push_back(WideableCond
);
793 bool LoopPredication::widenGuardConditions(IntrinsicInst
*Guard
,
794 SCEVExpander
&Expander
) {
795 LLVM_DEBUG(dbgs() << "Processing guard:\n");
796 LLVM_DEBUG(Guard
->dump());
799 SmallVector
<Value
*, 4> Checks
;
800 unsigned NumWidened
= collectChecks(Checks
, Guard
->getOperand(0), Expander
,
805 TotalWidened
+= NumWidened
;
807 // Emit the new guard condition
808 IRBuilder
<> Builder(findInsertPt(Guard
, Checks
));
809 Value
*AllChecks
= Builder
.CreateAnd(Checks
);
810 auto *OldCond
= Guard
->getOperand(0);
811 Guard
->setOperand(0, AllChecks
);
812 RecursivelyDeleteTriviallyDeadInstructions(OldCond
);
814 LLVM_DEBUG(dbgs() << "Widened checks = " << NumWidened
<< "\n");
818 bool LoopPredication::widenWidenableBranchGuardConditions(
819 BranchInst
*BI
, SCEVExpander
&Expander
) {
820 assert(isGuardAsWidenableBranch(BI
) && "Must be!");
821 LLVM_DEBUG(dbgs() << "Processing guard:\n");
822 LLVM_DEBUG(BI
->dump());
825 SmallVector
<Value
*, 4> Checks
;
826 unsigned NumWidened
= collectChecks(Checks
, BI
->getCondition(),
831 TotalWidened
+= NumWidened
;
833 // Emit the new guard condition
834 IRBuilder
<> Builder(findInsertPt(BI
, Checks
));
835 Value
*AllChecks
= Builder
.CreateAnd(Checks
);
836 auto *OldCond
= BI
->getCondition();
837 BI
->setCondition(AllChecks
);
838 RecursivelyDeleteTriviallyDeadInstructions(OldCond
);
839 assert(isGuardAsWidenableBranch(BI
) &&
840 "Stopped being a guard after transform?");
842 LLVM_DEBUG(dbgs() << "Widened checks = " << NumWidened
<< "\n");
846 Optional
<LoopICmp
> LoopPredication::parseLoopLatchICmp() {
847 using namespace PatternMatch
;
849 BasicBlock
*LoopLatch
= L
->getLoopLatch();
851 LLVM_DEBUG(dbgs() << "The loop doesn't have a single latch!\n");
855 auto *BI
= dyn_cast
<BranchInst
>(LoopLatch
->getTerminator());
856 if (!BI
|| !BI
->isConditional()) {
857 LLVM_DEBUG(dbgs() << "Failed to match the latch terminator!\n");
860 BasicBlock
*TrueDest
= BI
->getSuccessor(0);
862 (TrueDest
== L
->getHeader() || BI
->getSuccessor(1) == L
->getHeader()) &&
863 "One of the latch's destinations must be the header");
865 auto *ICI
= dyn_cast
<ICmpInst
>(BI
->getCondition());
867 LLVM_DEBUG(dbgs() << "Failed to match the latch condition!\n");
870 auto Result
= parseLoopICmp(ICI
);
872 LLVM_DEBUG(dbgs() << "Failed to parse the loop latch condition!\n");
876 if (TrueDest
!= L
->getHeader())
877 Result
->Pred
= ICmpInst::getInversePredicate(Result
->Pred
);
879 // Check affine first, so if it's not we don't try to compute the step
881 if (!Result
->IV
->isAffine()) {
882 LLVM_DEBUG(dbgs() << "The induction variable is not affine!\n");
886 auto *Step
= Result
->IV
->getStepRecurrence(*SE
);
887 if (!isSupportedStep(Step
)) {
888 LLVM_DEBUG(dbgs() << "Unsupported loop stride(" << *Step
<< ")!\n");
892 auto IsUnsupportedPredicate
= [](const SCEV
*Step
, ICmpInst::Predicate Pred
) {
894 return Pred
!= ICmpInst::ICMP_ULT
&& Pred
!= ICmpInst::ICMP_SLT
&&
895 Pred
!= ICmpInst::ICMP_ULE
&& Pred
!= ICmpInst::ICMP_SLE
;
897 assert(Step
->isAllOnesValue() && "Step should be -1!");
898 return Pred
!= ICmpInst::ICMP_UGT
&& Pred
!= ICmpInst::ICMP_SGT
&&
899 Pred
!= ICmpInst::ICMP_UGE
&& Pred
!= ICmpInst::ICMP_SGE
;
903 normalizePredicate(SE
, L
, *Result
);
904 if (IsUnsupportedPredicate(Step
, Result
->Pred
)) {
905 LLVM_DEBUG(dbgs() << "Unsupported loop latch predicate(" << Result
->Pred
914 bool LoopPredication::isLoopProfitableToPredicate() {
915 if (SkipProfitabilityChecks
|| !BPI
)
918 SmallVector
<std::pair
<BasicBlock
*, BasicBlock
*>, 8> ExitEdges
;
919 L
->getExitEdges(ExitEdges
);
920 // If there is only one exiting edge in the loop, it is always profitable to
921 // predicate the loop.
922 if (ExitEdges
.size() == 1)
925 // Calculate the exiting probabilities of all exiting edges from the loop,
926 // starting with the LatchExitProbability.
927 // Heuristic for profitability: If any of the exiting blocks' probability of
928 // exiting the loop is larger than exiting through the latch block, it's not
929 // profitable to predicate the loop.
930 auto *LatchBlock
= L
->getLoopLatch();
931 assert(LatchBlock
&& "Should have a single latch at this point!");
932 auto *LatchTerm
= LatchBlock
->getTerminator();
933 assert(LatchTerm
->getNumSuccessors() == 2 &&
934 "expected to be an exiting block with 2 succs!");
935 unsigned LatchBrExitIdx
=
936 LatchTerm
->getSuccessor(0) == L
->getHeader() ? 1 : 0;
937 BranchProbability LatchExitProbability
=
938 BPI
->getEdgeProbability(LatchBlock
, LatchBrExitIdx
);
940 // Protect against degenerate inputs provided by the user. Providing a value
941 // less than one, can invert the definition of profitable loop predication.
942 float ScaleFactor
= LatchExitProbabilityScale
;
943 if (ScaleFactor
< 1) {
946 << "Ignored user setting for loop-predication-latch-probability-scale: "
947 << LatchExitProbabilityScale
<< "\n");
948 LLVM_DEBUG(dbgs() << "The value is set to 1.0\n");
951 const auto LatchProbabilityThreshold
=
952 LatchExitProbability
* ScaleFactor
;
954 for (const auto &ExitEdge
: ExitEdges
) {
955 BranchProbability ExitingBlockProbability
=
956 BPI
->getEdgeProbability(ExitEdge
.first
, ExitEdge
.second
);
957 // Some exiting edge has higher probability than the latch exiting edge.
958 // No longer profitable to predicate.
959 if (ExitingBlockProbability
> LatchProbabilityThreshold
)
962 // Using BPI, we have concluded that the most probable way to exit from the
963 // loop is through the latch (or there's no profile information and all
964 // exits are equally likely).
968 /// If we can (cheaply) find a widenable branch which controls entry into the
970 static BranchInst
*FindWidenableTerminatorAboveLoop(Loop
*L
, LoopInfo
&LI
) {
971 // Walk back through any unconditional executed blocks and see if we can find
972 // a widenable condition which seems to control execution of this loop. Note
973 // that we predict that maythrow calls are likely untaken and thus that it's
974 // profitable to widen a branch before a maythrow call with a condition
975 // afterwards even though that may cause the slow path to run in a case where
976 // it wouldn't have otherwise.
977 BasicBlock
*BB
= L
->getLoopPreheader();
981 if (BasicBlock
*Pred
= BB
->getSinglePredecessor())
982 if (BB
== Pred
->getSingleSuccessor()) {
989 if (BasicBlock
*Pred
= BB
->getSinglePredecessor()) {
990 auto *Term
= Pred
->getTerminator();
993 BasicBlock
*IfTrueBB
, *IfFalseBB
;
994 if (parseWidenableBranch(Term
, Cond
, WC
, IfTrueBB
, IfFalseBB
) &&
996 return cast
<BranchInst
>(Term
);
1001 /// Return the minimum of all analyzeable exit counts. This is an upper bound
1002 /// on the actual exit count. If there are not at least two analyzeable exits,
1003 /// returns SCEVCouldNotCompute.
1004 static const SCEV
*getMinAnalyzeableBackedgeTakenCount(ScalarEvolution
&SE
,
1007 SmallVector
<BasicBlock
*, 16> ExitingBlocks
;
1008 L
->getExitingBlocks(ExitingBlocks
);
1010 SmallVector
<const SCEV
*, 4> ExitCounts
;
1011 for (BasicBlock
*ExitingBB
: ExitingBlocks
) {
1012 const SCEV
*ExitCount
= SE
.getExitCount(L
, ExitingBB
);
1013 if (isa
<SCEVCouldNotCompute
>(ExitCount
))
1015 assert(DT
.dominates(ExitingBB
, L
->getLoopLatch()) &&
1016 "We should only have known counts for exiting blocks that "
1018 ExitCounts
.push_back(ExitCount
);
1020 if (ExitCounts
.size() < 2)
1021 return SE
.getCouldNotCompute();
1022 return SE
.getUMinFromMismatchedTypes(ExitCounts
);
1025 /// This implements an analogous, but entirely distinct transform from the main
1026 /// loop predication transform. This one is phrased in terms of using a
1027 /// widenable branch *outside* the loop to allow us to simplify loop exits in a
1028 /// following loop. This is close in spirit to the IndVarSimplify transform
1029 /// of the same name, but is materially different widening loosens legality
1031 bool LoopPredication::predicateLoopExits(Loop
*L
, SCEVExpander
&Rewriter
) {
1032 // The transformation performed here aims to widen a widenable condition
1033 // above the loop such that all analyzeable exit leading to deopt are dead.
1034 // It assumes that the latch is the dominant exit for profitability and that
1035 // exits branching to deoptimizing blocks are rarely taken. It relies on the
1036 // semantics of widenable expressions for legality. (i.e. being able to fall
1037 // down the widenable path spuriously allows us to ignore exit order,
1038 // unanalyzeable exits, side effects, exceptional exits, and other challenges
1039 // which restrict the applicability of the non-WC based version of this
1040 // transform in IndVarSimplify.)
1042 // NOTE ON POISON/UNDEF - We're hoisting an expression above guards which may
1043 // imply flags on the expression being hoisted and inserting new uses (flags
1044 // are only correct for current uses). The result is that we may be
1045 // inserting a branch on the value which can be either poison or undef. In
1046 // this case, the branch can legally go either way; we just need to avoid
1047 // introducing UB. This is achieved through the use of the freeze
1050 SmallVector
<BasicBlock
*, 16> ExitingBlocks
;
1051 L
->getExitingBlocks(ExitingBlocks
);
1053 if (ExitingBlocks
.empty())
1054 return false; // Nothing to do.
1056 auto *Latch
= L
->getLoopLatch();
1060 auto *WidenableBR
= FindWidenableTerminatorAboveLoop(L
, *LI
);
1064 const SCEV
*LatchEC
= SE
->getExitCount(L
, Latch
);
1065 if (isa
<SCEVCouldNotCompute
>(LatchEC
))
1066 return false; // profitability - want hot exit in analyzeable set
1068 // At this point, we have found an analyzeable latch, and a widenable
1069 // condition above the loop. If we have a widenable exit within the loop
1070 // (for which we can't compute exit counts), drop the ability to further
1071 // widen so that we gain ability to analyze it's exit count and perform this
1072 // transform. TODO: It'd be nice to know for sure the exit became
1073 // analyzeable after dropping widenability.
1075 bool Invalidate
= false;
1077 for (auto *ExitingBB
: ExitingBlocks
) {
1078 if (LI
->getLoopFor(ExitingBB
) != L
)
1081 auto *BI
= dyn_cast
<BranchInst
>(ExitingBB
->getTerminator());
1086 BasicBlock
*IfTrueBB
, *IfFalseBB
;
1087 if (parseWidenableBranch(BI
, Cond
, WC
, IfTrueBB
, IfFalseBB
) &&
1088 L
->contains(IfTrueBB
)) {
1089 WC
->set(ConstantInt::getTrue(IfTrueBB
->getContext()));
1097 // The use of umin(all analyzeable exits) instead of latch is subtle, but
1098 // important for profitability. We may have a loop which hasn't been fully
1099 // canonicalized just yet. If the exit we chose to widen is provably never
1100 // taken, we want the widened form to *also* be provably never taken. We
1101 // can't guarantee this as a current unanalyzeable exit may later become
1102 // analyzeable, but we can at least avoid the obvious cases.
1103 const SCEV
*MinEC
= getMinAnalyzeableBackedgeTakenCount(*SE
, *DT
, L
);
1104 if (isa
<SCEVCouldNotCompute
>(MinEC
) || MinEC
->getType()->isPointerTy() ||
1105 !SE
->isLoopInvariant(MinEC
, L
) ||
1106 !isSafeToExpandAt(MinEC
, WidenableBR
, *SE
))
1109 // Subtlety: We need to avoid inserting additional uses of the WC. We know
1110 // that it can only have one transitive use at the moment, and thus moving
1111 // that use to just before the branch and inserting code before it and then
1112 // modifying the operand is legal.
1113 auto *IP
= cast
<Instruction
>(WidenableBR
->getCondition());
1114 IP
->moveBefore(WidenableBR
);
1115 Rewriter
.setInsertPoint(IP
);
1118 bool Changed
= false;
1119 Value
*MinECV
= nullptr; // lazily generated if needed
1120 for (BasicBlock
*ExitingBB
: ExitingBlocks
) {
1121 // If our exiting block exits multiple loops, we can only rewrite the
1122 // innermost one. Otherwise, we're changing how many times the innermost
1123 // loop runs before it exits.
1124 if (LI
->getLoopFor(ExitingBB
) != L
)
1127 // Can't rewrite non-branch yet.
1128 auto *BI
= dyn_cast
<BranchInst
>(ExitingBB
->getTerminator());
1132 // If already constant, nothing to do.
1133 if (isa
<Constant
>(BI
->getCondition()))
1136 const SCEV
*ExitCount
= SE
->getExitCount(L
, ExitingBB
);
1137 if (isa
<SCEVCouldNotCompute
>(ExitCount
) ||
1138 ExitCount
->getType()->isPointerTy() ||
1139 !isSafeToExpandAt(ExitCount
, WidenableBR
, *SE
))
1142 const bool ExitIfTrue
= !L
->contains(*succ_begin(ExitingBB
));
1143 BasicBlock
*ExitBB
= BI
->getSuccessor(ExitIfTrue
? 0 : 1);
1144 if (!ExitBB
->getPostdominatingDeoptimizeCall())
1147 /// Here we can be fairly sure that executing this exit will most likely
1148 /// lead to executing llvm.experimental.deoptimize.
1149 /// This is a profitability heuristic, not a legality constraint.
1151 // If we found a widenable exit condition, do two things:
1152 // 1) fold the widened exit test into the widenable condition
1153 // 2) fold the branch to untaken - avoids infinite looping
1155 Value
*ECV
= Rewriter
.expandCodeFor(ExitCount
);
1157 MinECV
= Rewriter
.expandCodeFor(MinEC
);
1158 Value
*RHS
= MinECV
;
1159 if (ECV
->getType() != RHS
->getType()) {
1160 Type
*WiderTy
= SE
->getWiderType(ECV
->getType(), RHS
->getType());
1161 ECV
= B
.CreateZExt(ECV
, WiderTy
);
1162 RHS
= B
.CreateZExt(RHS
, WiderTy
);
1164 assert(!Latch
|| DT
->dominates(ExitingBB
, Latch
));
1165 Value
*NewCond
= B
.CreateICmp(ICmpInst::ICMP_UGT
, ECV
, RHS
);
1166 // Freeze poison or undef to an arbitrary bit pattern to ensure we can
1167 // branch without introducing UB. See NOTE ON POISON/UNDEF above for
1169 NewCond
= B
.CreateFreeze(NewCond
);
1171 widenWidenableBranch(WidenableBR
, NewCond
);
1173 Value
*OldCond
= BI
->getCondition();
1174 BI
->setCondition(ConstantInt::get(OldCond
->getType(), !ExitIfTrue
));
1179 // We just mutated a bunch of loop exits changing there exit counts
1180 // widely. We need to force recomputation of the exit counts given these
1181 // changes. Note that all of the inserted exits are never taken, and
1182 // should be removed next time the CFG is modified.
1187 bool LoopPredication::runOnLoop(Loop
*Loop
) {
1190 LLVM_DEBUG(dbgs() << "Analyzing ");
1191 LLVM_DEBUG(L
->dump());
1193 Module
*M
= L
->getHeader()->getModule();
1195 // There is nothing to do if the module doesn't use guards
1197 M
->getFunction(Intrinsic::getName(Intrinsic::experimental_guard
));
1198 bool HasIntrinsicGuards
= GuardDecl
&& !GuardDecl
->use_empty();
1199 auto *WCDecl
= M
->getFunction(
1200 Intrinsic::getName(Intrinsic::experimental_widenable_condition
));
1201 bool HasWidenableConditions
=
1202 PredicateWidenableBranchGuards
&& WCDecl
&& !WCDecl
->use_empty();
1203 if (!HasIntrinsicGuards
&& !HasWidenableConditions
)
1206 DL
= &M
->getDataLayout();
1208 Preheader
= L
->getLoopPreheader();
1212 auto LatchCheckOpt
= parseLoopLatchICmp();
1215 LatchCheck
= *LatchCheckOpt
;
1217 LLVM_DEBUG(dbgs() << "Latch check:\n");
1218 LLVM_DEBUG(LatchCheck
.dump());
1220 if (!isLoopProfitableToPredicate()) {
1221 LLVM_DEBUG(dbgs() << "Loop not profitable to predicate!\n");
1224 // Collect all the guards into a vector and process later, so as not
1225 // to invalidate the instruction iterator.
1226 SmallVector
<IntrinsicInst
*, 4> Guards
;
1227 SmallVector
<BranchInst
*, 4> GuardsAsWidenableBranches
;
1228 for (const auto BB
: L
->blocks()) {
1231 Guards
.push_back(cast
<IntrinsicInst
>(&I
));
1232 if (PredicateWidenableBranchGuards
&&
1233 isGuardAsWidenableBranch(BB
->getTerminator()))
1234 GuardsAsWidenableBranches
.push_back(
1235 cast
<BranchInst
>(BB
->getTerminator()));
1238 SCEVExpander
Expander(*SE
, *DL
, "loop-predication");
1239 bool Changed
= false;
1240 for (auto *Guard
: Guards
)
1241 Changed
|= widenGuardConditions(Guard
, Expander
);
1242 for (auto *Guard
: GuardsAsWidenableBranches
)
1243 Changed
|= widenWidenableBranchGuardConditions(Guard
, Expander
);
1244 Changed
|= predicateLoopExits(L
, Expander
);