1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the implementation of the scalar evolution analysis
10 // engine, which is used primarily to analyze expressions involving induction
11 // variables in loops.
13 // There are several aspects to this library. First is the representation of
14 // scalar expressions, which are represented as subclasses of the SCEV class.
15 // These classes are used to represent certain types of subexpressions that we
16 // can handle. We only create one SCEV of a particular shape, so
17 // pointer-comparisons for equality are legal.
19 // One important aspect of the SCEV objects is that they are never cyclic, even
20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
22 // recurrence) then we represent it directly as a recurrence node, otherwise we
23 // represent it as a SCEVUnknown node.
25 // In addition to being able to represent expressions of various types, we also
26 // have folders that are used to build the *canonical* representation for a
27 // particular expression. These folders are capable of using a variety of
28 // rewrite rules to simplify the expressions.
30 // Once the folders are defined, we can implement the more interesting
31 // higher-level code, such as the code that recognizes PHI nodes of various
32 // types, computes the execution count of a loop, etc.
34 // TODO: We should use these routines and value representations to implement
35 // dependence analysis!
37 //===----------------------------------------------------------------------===//
39 // There are several good references for the techniques used in this analysis.
41 // Chains of recurrences -- a method to expedite the evaluation
42 // of closed-form functions
43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45 // On computational properties of chains of recurrences
48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
49 // Robert A. van Engelen
51 // Efficient Symbolic Analysis for Optimizing Compilers
52 // Robert A. van Engelen
54 // Using the chains of recurrences algebra for data dependence testing and
55 // induction variable substitution
56 // MS Thesis, Johnie Birch
58 //===----------------------------------------------------------------------===//
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/ADT/APInt.h"
62 #include "llvm/ADT/ArrayRef.h"
63 #include "llvm/ADT/DenseMap.h"
64 #include "llvm/ADT/DepthFirstIterator.h"
65 #include "llvm/ADT/EquivalenceClasses.h"
66 #include "llvm/ADT/FoldingSet.h"
67 #include "llvm/ADT/STLExtras.h"
68 #include "llvm/ADT/ScopeExit.h"
69 #include "llvm/ADT/Sequence.h"
70 #include "llvm/ADT/SmallPtrSet.h"
71 #include "llvm/ADT/SmallSet.h"
72 #include "llvm/ADT/SmallVector.h"
73 #include "llvm/ADT/Statistic.h"
74 #include "llvm/ADT/StringExtras.h"
75 #include "llvm/ADT/StringRef.h"
76 #include "llvm/Analysis/AssumptionCache.h"
77 #include "llvm/Analysis/ConstantFolding.h"
78 #include "llvm/Analysis/InstructionSimplify.h"
79 #include "llvm/Analysis/LoopInfo.h"
80 #include "llvm/Analysis/MemoryBuiltins.h"
81 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
82 #include "llvm/Analysis/TargetLibraryInfo.h"
83 #include "llvm/Analysis/ValueTracking.h"
84 #include "llvm/Config/llvm-config.h"
85 #include "llvm/IR/Argument.h"
86 #include "llvm/IR/BasicBlock.h"
87 #include "llvm/IR/CFG.h"
88 #include "llvm/IR/Constant.h"
89 #include "llvm/IR/ConstantRange.h"
90 #include "llvm/IR/Constants.h"
91 #include "llvm/IR/DataLayout.h"
92 #include "llvm/IR/DerivedTypes.h"
93 #include "llvm/IR/Dominators.h"
94 #include "llvm/IR/Function.h"
95 #include "llvm/IR/GlobalAlias.h"
96 #include "llvm/IR/GlobalValue.h"
97 #include "llvm/IR/InstIterator.h"
98 #include "llvm/IR/InstrTypes.h"
99 #include "llvm/IR/Instruction.h"
100 #include "llvm/IR/Instructions.h"
101 #include "llvm/IR/IntrinsicInst.h"
102 #include "llvm/IR/Intrinsics.h"
103 #include "llvm/IR/LLVMContext.h"
104 #include "llvm/IR/Operator.h"
105 #include "llvm/IR/PatternMatch.h"
106 #include "llvm/IR/Type.h"
107 #include "llvm/IR/Use.h"
108 #include "llvm/IR/User.h"
109 #include "llvm/IR/Value.h"
110 #include "llvm/IR/Verifier.h"
111 #include "llvm/InitializePasses.h"
112 #include "llvm/Pass.h"
113 #include "llvm/Support/Casting.h"
114 #include "llvm/Support/CommandLine.h"
115 #include "llvm/Support/Compiler.h"
116 #include "llvm/Support/Debug.h"
117 #include "llvm/Support/ErrorHandling.h"
118 #include "llvm/Support/KnownBits.h"
119 #include "llvm/Support/SaveAndRestore.h"
120 #include "llvm/Support/raw_ostream.h"
134 using namespace llvm
;
135 using namespace PatternMatch
;
137 #define DEBUG_TYPE "scalar-evolution"
139 STATISTIC(NumExitCountsComputed
,
140 "Number of loop exits with predictable exit counts");
141 STATISTIC(NumExitCountsNotComputed
,
142 "Number of loop exits without predictable exit counts");
143 STATISTIC(NumBruteForceTripCountsComputed
,
144 "Number of loops with trip counts computed by force");
146 #ifdef EXPENSIVE_CHECKS
147 bool llvm::VerifySCEV
= true;
149 bool llvm::VerifySCEV
= false;
152 static cl::opt
<unsigned>
153 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden
,
154 cl::desc("Maximum number of iterations SCEV will "
155 "symbolically execute a constant "
159 static cl::opt
<bool, true> VerifySCEVOpt(
160 "verify-scev", cl::Hidden
, cl::location(VerifySCEV
),
161 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
162 static cl::opt
<bool> VerifySCEVStrict(
163 "verify-scev-strict", cl::Hidden
,
164 cl::desc("Enable stricter verification with -verify-scev is passed"));
166 static cl::opt
<bool> VerifyIR(
167 "scev-verify-ir", cl::Hidden
,
168 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"),
171 static cl::opt
<unsigned> MulOpsInlineThreshold(
172 "scev-mulops-inline-threshold", cl::Hidden
,
173 cl::desc("Threshold for inlining multiplication operands into a SCEV"),
176 static cl::opt
<unsigned> AddOpsInlineThreshold(
177 "scev-addops-inline-threshold", cl::Hidden
,
178 cl::desc("Threshold for inlining addition operands into a SCEV"),
181 static cl::opt
<unsigned> MaxSCEVCompareDepth(
182 "scalar-evolution-max-scev-compare-depth", cl::Hidden
,
183 cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
186 static cl::opt
<unsigned> MaxSCEVOperationsImplicationDepth(
187 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden
,
188 cl::desc("Maximum depth of recursive SCEV operations implication analysis"),
191 static cl::opt
<unsigned> MaxValueCompareDepth(
192 "scalar-evolution-max-value-compare-depth", cl::Hidden
,
193 cl::desc("Maximum depth of recursive value complexity comparisons"),
196 static cl::opt
<unsigned>
197 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden
,
198 cl::desc("Maximum depth of recursive arithmetics"),
201 static cl::opt
<unsigned> MaxConstantEvolvingDepth(
202 "scalar-evolution-max-constant-evolving-depth", cl::Hidden
,
203 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
205 static cl::opt
<unsigned>
206 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden
,
207 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"),
210 static cl::opt
<unsigned>
211 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden
,
212 cl::desc("Max coefficients in AddRec during evolving"),
215 static cl::opt
<unsigned>
216 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden
,
217 cl::desc("Size of the expression which is considered huge"),
220 static cl::opt
<unsigned> RangeIterThreshold(
221 "scev-range-iter-threshold", cl::Hidden
,
222 cl::desc("Threshold for switching to iteratively computing SCEV ranges"),
225 static cl::opt
<unsigned> MaxLoopGuardCollectionDepth(
226 "scalar-evolution-max-loop-guard-collection-depth", cl::Hidden
,
227 cl::desc("Maximum depth for recrusive loop guard collection"), cl::init(1));
230 ClassifyExpressions("scalar-evolution-classify-expressions",
231 cl::Hidden
, cl::init(true),
232 cl::desc("When printing analysis, include information on every instruction"));
234 static cl::opt
<bool> UseExpensiveRangeSharpening(
235 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden
,
237 cl::desc("Use more powerful methods of sharpening expression ranges. May "
238 "be costly in terms of compile time"));
240 static cl::opt
<unsigned> MaxPhiSCCAnalysisSize(
241 "scalar-evolution-max-scc-analysis-depth", cl::Hidden
,
242 cl::desc("Maximum amount of nodes to process while searching SCEVUnknown "
243 "Phi strongly connected components"),
247 EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden
,
248 cl::desc("Handle <= and >= in finite loops"),
251 static cl::opt
<bool> UseContextForNoWrapFlagInference(
252 "scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden
,
253 cl::desc("Infer nuw/nsw flags using context where suitable"),
256 //===----------------------------------------------------------------------===//
257 // SCEV class definitions
258 //===----------------------------------------------------------------------===//
260 //===----------------------------------------------------------------------===//
261 // Implementation of the SCEV class.
264 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
265 LLVM_DUMP_METHOD
void SCEV::dump() const {
271 void SCEV::print(raw_ostream
&OS
) const {
272 switch (getSCEVType()) {
274 cast
<SCEVConstant
>(this)->getValue()->printAsOperand(OS
, false);
280 const SCEVPtrToIntExpr
*PtrToInt
= cast
<SCEVPtrToIntExpr
>(this);
281 const SCEV
*Op
= PtrToInt
->getOperand();
282 OS
<< "(ptrtoint " << *Op
->getType() << " " << *Op
<< " to "
283 << *PtrToInt
->getType() << ")";
287 const SCEVTruncateExpr
*Trunc
= cast
<SCEVTruncateExpr
>(this);
288 const SCEV
*Op
= Trunc
->getOperand();
289 OS
<< "(trunc " << *Op
->getType() << " " << *Op
<< " to "
290 << *Trunc
->getType() << ")";
294 const SCEVZeroExtendExpr
*ZExt
= cast
<SCEVZeroExtendExpr
>(this);
295 const SCEV
*Op
= ZExt
->getOperand();
296 OS
<< "(zext " << *Op
->getType() << " " << *Op
<< " to "
297 << *ZExt
->getType() << ")";
301 const SCEVSignExtendExpr
*SExt
= cast
<SCEVSignExtendExpr
>(this);
302 const SCEV
*Op
= SExt
->getOperand();
303 OS
<< "(sext " << *Op
->getType() << " " << *Op
<< " to "
304 << *SExt
->getType() << ")";
308 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(this);
309 OS
<< "{" << *AR
->getOperand(0);
310 for (unsigned i
= 1, e
= AR
->getNumOperands(); i
!= e
; ++i
)
311 OS
<< ",+," << *AR
->getOperand(i
);
313 if (AR
->hasNoUnsignedWrap())
315 if (AR
->hasNoSignedWrap())
317 if (AR
->hasNoSelfWrap() &&
318 !AR
->getNoWrapFlags((NoWrapFlags
)(FlagNUW
| FlagNSW
)))
320 AR
->getLoop()->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
330 case scSequentialUMinExpr
: {
331 const SCEVNAryExpr
*NAry
= cast
<SCEVNAryExpr
>(this);
332 const char *OpStr
= nullptr;
333 switch (NAry
->getSCEVType()) {
334 case scAddExpr
: OpStr
= " + "; break;
335 case scMulExpr
: OpStr
= " * "; break;
336 case scUMaxExpr
: OpStr
= " umax "; break;
337 case scSMaxExpr
: OpStr
= " smax "; break;
344 case scSequentialUMinExpr
:
345 OpStr
= " umin_seq ";
348 llvm_unreachable("There are no other nary expression types.");
351 ListSeparator
LS(OpStr
);
352 for (const SCEV
*Op
: NAry
->operands())
355 switch (NAry
->getSCEVType()) {
358 if (NAry
->hasNoUnsignedWrap())
360 if (NAry
->hasNoSignedWrap())
364 // Nothing to print for other nary expressions.
370 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(this);
371 OS
<< "(" << *UDiv
->getLHS() << " /u " << *UDiv
->getRHS() << ")";
375 cast
<SCEVUnknown
>(this)->getValue()->printAsOperand(OS
, false);
377 case scCouldNotCompute
:
378 OS
<< "***COULDNOTCOMPUTE***";
381 llvm_unreachable("Unknown SCEV kind!");
384 Type
*SCEV::getType() const {
385 switch (getSCEVType()) {
387 return cast
<SCEVConstant
>(this)->getType();
389 return cast
<SCEVVScale
>(this)->getType();
394 return cast
<SCEVCastExpr
>(this)->getType();
396 return cast
<SCEVAddRecExpr
>(this)->getType();
398 return cast
<SCEVMulExpr
>(this)->getType();
403 return cast
<SCEVMinMaxExpr
>(this)->getType();
404 case scSequentialUMinExpr
:
405 return cast
<SCEVSequentialMinMaxExpr
>(this)->getType();
407 return cast
<SCEVAddExpr
>(this)->getType();
409 return cast
<SCEVUDivExpr
>(this)->getType();
411 return cast
<SCEVUnknown
>(this)->getType();
412 case scCouldNotCompute
:
413 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
415 llvm_unreachable("Unknown SCEV kind!");
418 ArrayRef
<const SCEV
*> SCEV::operands() const {
419 switch (getSCEVType()) {
428 return cast
<SCEVCastExpr
>(this)->operands();
436 case scSequentialUMinExpr
:
437 return cast
<SCEVNAryExpr
>(this)->operands();
439 return cast
<SCEVUDivExpr
>(this)->operands();
440 case scCouldNotCompute
:
441 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
443 llvm_unreachable("Unknown SCEV kind!");
446 bool SCEV::isZero() const {
447 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
448 return SC
->getValue()->isZero();
452 bool SCEV::isOne() const {
453 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
454 return SC
->getValue()->isOne();
458 bool SCEV::isAllOnesValue() const {
459 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
460 return SC
->getValue()->isMinusOne();
464 bool SCEV::isNonConstantNegative() const {
465 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(this);
466 if (!Mul
) return false;
468 // If there is a constant factor, it will be first.
469 const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Mul
->getOperand(0));
470 if (!SC
) return false;
472 // Return true if the value is negative, this matches things like (-42 * V).
473 return SC
->getAPInt().isNegative();
476 SCEVCouldNotCompute::SCEVCouldNotCompute() :
477 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute
, 0) {}
479 bool SCEVCouldNotCompute::classof(const SCEV
*S
) {
480 return S
->getSCEVType() == scCouldNotCompute
;
483 const SCEV
*ScalarEvolution::getConstant(ConstantInt
*V
) {
485 ID
.AddInteger(scConstant
);
488 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
489 SCEV
*S
= new (SCEVAllocator
) SCEVConstant(ID
.Intern(SCEVAllocator
), V
);
490 UniqueSCEVs
.InsertNode(S
, IP
);
494 const SCEV
*ScalarEvolution::getConstant(const APInt
&Val
) {
495 return getConstant(ConstantInt::get(getContext(), Val
));
499 ScalarEvolution::getConstant(Type
*Ty
, uint64_t V
, bool isSigned
) {
500 IntegerType
*ITy
= cast
<IntegerType
>(getEffectiveSCEVType(Ty
));
501 return getConstant(ConstantInt::get(ITy
, V
, isSigned
));
504 const SCEV
*ScalarEvolution::getVScale(Type
*Ty
) {
506 ID
.AddInteger(scVScale
);
509 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
))
511 SCEV
*S
= new (SCEVAllocator
) SCEVVScale(ID
.Intern(SCEVAllocator
), Ty
);
512 UniqueSCEVs
.InsertNode(S
, IP
);
516 const SCEV
*ScalarEvolution::getElementCount(Type
*Ty
, ElementCount EC
) {
517 const SCEV
*Res
= getConstant(Ty
, EC
.getKnownMinValue());
519 Res
= getMulExpr(Res
, getVScale(Ty
));
523 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID
, SCEVTypes SCEVTy
,
524 const SCEV
*op
, Type
*ty
)
525 : SCEV(ID
, SCEVTy
, computeExpressionSize(op
)), Op(op
), Ty(ty
) {}
527 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID
, const SCEV
*Op
,
529 : SCEVCastExpr(ID
, scPtrToInt
, Op
, ITy
) {
530 assert(getOperand()->getType()->isPointerTy() && Ty
->isIntegerTy() &&
531 "Must be a non-bit-width-changing pointer-to-integer cast!");
534 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID
,
535 SCEVTypes SCEVTy
, const SCEV
*op
,
537 : SCEVCastExpr(ID
, SCEVTy
, op
, ty
) {}
539 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID
, const SCEV
*op
,
541 : SCEVIntegralCastExpr(ID
, scTruncate
, op
, ty
) {
542 assert(getOperand()->getType()->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
543 "Cannot truncate non-integer value!");
546 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID
,
547 const SCEV
*op
, Type
*ty
)
548 : SCEVIntegralCastExpr(ID
, scZeroExtend
, op
, ty
) {
549 assert(getOperand()->getType()->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
550 "Cannot zero extend non-integer value!");
553 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID
,
554 const SCEV
*op
, Type
*ty
)
555 : SCEVIntegralCastExpr(ID
, scSignExtend
, op
, ty
) {
556 assert(getOperand()->getType()->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
557 "Cannot sign extend non-integer value!");
560 void SCEVUnknown::deleted() {
561 // Clear this SCEVUnknown from various maps.
562 SE
->forgetMemoizedResults(this);
564 // Remove this SCEVUnknown from the uniquing map.
565 SE
->UniqueSCEVs
.RemoveNode(this);
567 // Release the value.
571 void SCEVUnknown::allUsesReplacedWith(Value
*New
) {
572 // Clear this SCEVUnknown from various maps.
573 SE
->forgetMemoizedResults(this);
575 // Remove this SCEVUnknown from the uniquing map.
576 SE
->UniqueSCEVs
.RemoveNode(this);
578 // Replace the value pointer in case someone is still using this SCEVUnknown.
582 //===----------------------------------------------------------------------===//
584 //===----------------------------------------------------------------------===//
586 /// Compare the two values \p LV and \p RV in terms of their "complexity" where
587 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order
588 /// operands in SCEV expressions.
589 static int CompareValueComplexity(const LoopInfo
*const LI
, Value
*LV
,
590 Value
*RV
, unsigned Depth
) {
591 if (Depth
> MaxValueCompareDepth
)
594 // Order pointer values after integer values. This helps SCEVExpander form
596 bool LIsPointer
= LV
->getType()->isPointerTy(),
597 RIsPointer
= RV
->getType()->isPointerTy();
598 if (LIsPointer
!= RIsPointer
)
599 return (int)LIsPointer
- (int)RIsPointer
;
601 // Compare getValueID values.
602 unsigned LID
= LV
->getValueID(), RID
= RV
->getValueID();
604 return (int)LID
- (int)RID
;
606 // Sort arguments by their position.
607 if (const auto *LA
= dyn_cast
<Argument
>(LV
)) {
608 const auto *RA
= cast
<Argument
>(RV
);
609 unsigned LArgNo
= LA
->getArgNo(), RArgNo
= RA
->getArgNo();
610 return (int)LArgNo
- (int)RArgNo
;
613 if (const auto *LGV
= dyn_cast
<GlobalValue
>(LV
)) {
614 const auto *RGV
= cast
<GlobalValue
>(RV
);
616 const auto IsGVNameSemantic
= [&](const GlobalValue
*GV
) {
617 auto LT
= GV
->getLinkage();
618 return !(GlobalValue::isPrivateLinkage(LT
) ||
619 GlobalValue::isInternalLinkage(LT
));
622 // Use the names to distinguish the two values, but only if the
623 // names are semantically important.
624 if (IsGVNameSemantic(LGV
) && IsGVNameSemantic(RGV
))
625 return LGV
->getName().compare(RGV
->getName());
628 // For instructions, compare their loop depth, and their operand count. This
630 if (const auto *LInst
= dyn_cast
<Instruction
>(LV
)) {
631 const auto *RInst
= cast
<Instruction
>(RV
);
633 // Compare loop depths.
634 const BasicBlock
*LParent
= LInst
->getParent(),
635 *RParent
= RInst
->getParent();
636 if (LParent
!= RParent
) {
637 unsigned LDepth
= LI
->getLoopDepth(LParent
),
638 RDepth
= LI
->getLoopDepth(RParent
);
639 if (LDepth
!= RDepth
)
640 return (int)LDepth
- (int)RDepth
;
643 // Compare the number of operands.
644 unsigned LNumOps
= LInst
->getNumOperands(),
645 RNumOps
= RInst
->getNumOperands();
646 if (LNumOps
!= RNumOps
)
647 return (int)LNumOps
- (int)RNumOps
;
649 for (unsigned Idx
: seq(LNumOps
)) {
650 int Result
= CompareValueComplexity(LI
, LInst
->getOperand(Idx
),
651 RInst
->getOperand(Idx
), Depth
+ 1);
660 // Return negative, zero, or positive, if LHS is less than, equal to, or greater
661 // than RHS, respectively. A three-way result allows recursive comparisons to be
663 // If the max analysis depth was reached, return std::nullopt, assuming we do
664 // not know if they are equivalent for sure.
665 static std::optional
<int>
666 CompareSCEVComplexity(EquivalenceClasses
<const SCEV
*> &EqCacheSCEV
,
667 const LoopInfo
*const LI
, const SCEV
*LHS
,
668 const SCEV
*RHS
, DominatorTree
&DT
, unsigned Depth
= 0) {
669 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
673 // Primarily, sort the SCEVs by their getSCEVType().
674 SCEVTypes LType
= LHS
->getSCEVType(), RType
= RHS
->getSCEVType();
676 return (int)LType
- (int)RType
;
678 if (EqCacheSCEV
.isEquivalent(LHS
, RHS
))
681 if (Depth
> MaxSCEVCompareDepth
)
684 // Aside from the getSCEVType() ordering, the particular ordering
685 // isn't very important except that it's beneficial to be consistent,
686 // so that (a + b) and (b + a) don't end up as different expressions.
689 const SCEVUnknown
*LU
= cast
<SCEVUnknown
>(LHS
);
690 const SCEVUnknown
*RU
= cast
<SCEVUnknown
>(RHS
);
693 CompareValueComplexity(LI
, LU
->getValue(), RU
->getValue(), Depth
+ 1);
695 EqCacheSCEV
.unionSets(LHS
, RHS
);
700 const SCEVConstant
*LC
= cast
<SCEVConstant
>(LHS
);
701 const SCEVConstant
*RC
= cast
<SCEVConstant
>(RHS
);
703 // Compare constant values.
704 const APInt
&LA
= LC
->getAPInt();
705 const APInt
&RA
= RC
->getAPInt();
706 unsigned LBitWidth
= LA
.getBitWidth(), RBitWidth
= RA
.getBitWidth();
707 if (LBitWidth
!= RBitWidth
)
708 return (int)LBitWidth
- (int)RBitWidth
;
709 return LA
.ult(RA
) ? -1 : 1;
713 const auto *LTy
= cast
<IntegerType
>(cast
<SCEVVScale
>(LHS
)->getType());
714 const auto *RTy
= cast
<IntegerType
>(cast
<SCEVVScale
>(RHS
)->getType());
715 return LTy
->getBitWidth() - RTy
->getBitWidth();
719 const SCEVAddRecExpr
*LA
= cast
<SCEVAddRecExpr
>(LHS
);
720 const SCEVAddRecExpr
*RA
= cast
<SCEVAddRecExpr
>(RHS
);
722 // There is always a dominance between two recs that are used by one SCEV,
723 // so we can safely sort recs by loop header dominance. We require such
724 // order in getAddExpr.
725 const Loop
*LLoop
= LA
->getLoop(), *RLoop
= RA
->getLoop();
726 if (LLoop
!= RLoop
) {
727 const BasicBlock
*LHead
= LLoop
->getHeader(), *RHead
= RLoop
->getHeader();
728 assert(LHead
!= RHead
&& "Two loops share the same header?");
729 if (DT
.dominates(LHead
, RHead
))
731 assert(DT
.dominates(RHead
, LHead
) &&
732 "No dominance between recurrences used by one SCEV?");
750 case scSequentialUMinExpr
: {
751 ArrayRef
<const SCEV
*> LOps
= LHS
->operands();
752 ArrayRef
<const SCEV
*> ROps
= RHS
->operands();
754 // Lexicographically compare n-ary-like expressions.
755 unsigned LNumOps
= LOps
.size(), RNumOps
= ROps
.size();
756 if (LNumOps
!= RNumOps
)
757 return (int)LNumOps
- (int)RNumOps
;
759 for (unsigned i
= 0; i
!= LNumOps
; ++i
) {
760 auto X
= CompareSCEVComplexity(EqCacheSCEV
, LI
, LOps
[i
], ROps
[i
], DT
,
765 EqCacheSCEV
.unionSets(LHS
, RHS
);
769 case scCouldNotCompute
:
770 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
772 llvm_unreachable("Unknown SCEV kind!");
775 /// Given a list of SCEV objects, order them by their complexity, and group
776 /// objects of the same complexity together by value. When this routine is
777 /// finished, we know that any duplicates in the vector are consecutive and that
778 /// complexity is monotonically increasing.
780 /// Note that we go take special precautions to ensure that we get deterministic
781 /// results from this routine. In other words, we don't want the results of
782 /// this to depend on where the addresses of various SCEV objects happened to
784 static void GroupByComplexity(SmallVectorImpl
<const SCEV
*> &Ops
,
785 LoopInfo
*LI
, DominatorTree
&DT
) {
786 if (Ops
.size() < 2) return; // Noop
788 EquivalenceClasses
<const SCEV
*> EqCacheSCEV
;
790 // Whether LHS has provably less complexity than RHS.
791 auto IsLessComplex
= [&](const SCEV
*LHS
, const SCEV
*RHS
) {
792 auto Complexity
= CompareSCEVComplexity(EqCacheSCEV
, LI
, LHS
, RHS
, DT
);
793 return Complexity
&& *Complexity
< 0;
795 if (Ops
.size() == 2) {
796 // This is the common case, which also happens to be trivially simple.
798 const SCEV
*&LHS
= Ops
[0], *&RHS
= Ops
[1];
799 if (IsLessComplex(RHS
, LHS
))
804 // Do the rough sort by complexity.
805 llvm::stable_sort(Ops
, [&](const SCEV
*LHS
, const SCEV
*RHS
) {
806 return IsLessComplex(LHS
, RHS
);
809 // Now that we are sorted by complexity, group elements of the same
810 // complexity. Note that this is, at worst, N^2, but the vector is likely to
811 // be extremely short in practice. Note that we take this approach because we
812 // do not want to depend on the addresses of the objects we are grouping.
813 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
-2; ++i
) {
814 const SCEV
*S
= Ops
[i
];
815 unsigned Complexity
= S
->getSCEVType();
817 // If there are any objects of the same complexity and same value as this
819 for (unsigned j
= i
+1; j
!= e
&& Ops
[j
]->getSCEVType() == Complexity
; ++j
) {
820 if (Ops
[j
] == S
) { // Found a duplicate.
821 // Move it to immediately after i'th element.
822 std::swap(Ops
[i
+1], Ops
[j
]);
823 ++i
; // no need to rescan it.
824 if (i
== e
-2) return; // Done!
830 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at
831 /// least HugeExprThreshold nodes).
832 static bool hasHugeExpression(ArrayRef
<const SCEV
*> Ops
) {
833 return any_of(Ops
, [](const SCEV
*S
) {
834 return S
->getExpressionSize() >= HugeExprThreshold
;
838 /// Performs a number of common optimizations on the passed \p Ops. If the
839 /// whole expression reduces down to a single operand, it will be returned.
841 /// The following optimizations are performed:
842 /// * Fold constants using the \p Fold function.
843 /// * Remove identity constants satisfying \p IsIdentity.
844 /// * If a constant satisfies \p IsAbsorber, return it.
845 /// * Sort operands by complexity.
846 template <typename FoldT
, typename IsIdentityT
, typename IsAbsorberT
>
848 constantFoldAndGroupOps(ScalarEvolution
&SE
, LoopInfo
&LI
, DominatorTree
&DT
,
849 SmallVectorImpl
<const SCEV
*> &Ops
, FoldT Fold
,
850 IsIdentityT IsIdentity
, IsAbsorberT IsAbsorber
) {
851 const SCEVConstant
*Folded
= nullptr;
852 for (unsigned Idx
= 0; Idx
< Ops
.size();) {
853 const SCEV
*Op
= Ops
[Idx
];
854 if (const auto *C
= dyn_cast
<SCEVConstant
>(Op
)) {
858 Folded
= cast
<SCEVConstant
>(
859 SE
.getConstant(Fold(Folded
->getAPInt(), C
->getAPInt())));
860 Ops
.erase(Ops
.begin() + Idx
);
867 assert(Folded
&& "Must have folded value");
871 if (Folded
&& IsAbsorber(Folded
->getAPInt()))
874 GroupByComplexity(Ops
, &LI
, DT
);
875 if (Folded
&& !IsIdentity(Folded
->getAPInt()))
876 Ops
.insert(Ops
.begin(), Folded
);
878 return Ops
.size() == 1 ? Ops
[0] : nullptr;
881 //===----------------------------------------------------------------------===//
882 // Simple SCEV method implementations
883 //===----------------------------------------------------------------------===//
885 /// Compute BC(It, K). The result has width W. Assume, K > 0.
886 static const SCEV
*BinomialCoefficient(const SCEV
*It
, unsigned K
,
889 // Handle the simplest case efficiently.
891 return SE
.getTruncateOrZeroExtend(It
, ResultTy
);
893 // We are using the following formula for BC(It, K):
895 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
897 // Suppose, W is the bitwidth of the return value. We must be prepared for
898 // overflow. Hence, we must assure that the result of our computation is
899 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
900 // safe in modular arithmetic.
902 // However, this code doesn't use exactly that formula; the formula it uses
903 // is something like the following, where T is the number of factors of 2 in
904 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
907 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
909 // This formula is trivially equivalent to the previous formula. However,
910 // this formula can be implemented much more efficiently. The trick is that
911 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
912 // arithmetic. To do exact division in modular arithmetic, all we have
913 // to do is multiply by the inverse. Therefore, this step can be done at
916 // The next issue is how to safely do the division by 2^T. The way this
917 // is done is by doing the multiplication step at a width of at least W + T
918 // bits. This way, the bottom W+T bits of the product are accurate. Then,
919 // when we perform the division by 2^T (which is equivalent to a right shift
920 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
921 // truncated out after the division by 2^T.
923 // In comparison to just directly using the first formula, this technique
924 // is much more efficient; using the first formula requires W * K bits,
925 // but this formula less than W + K bits. Also, the first formula requires
926 // a division step, whereas this formula only requires multiplies and shifts.
928 // It doesn't matter whether the subtraction step is done in the calculation
929 // width or the input iteration count's width; if the subtraction overflows,
930 // the result must be zero anyway. We prefer here to do it in the width of
931 // the induction variable because it helps a lot for certain cases; CodeGen
932 // isn't smart enough to ignore the overflow, which leads to much less
933 // efficient code if the width of the subtraction is wider than the native
936 // (It's possible to not widen at all by pulling out factors of 2 before
937 // the multiplication; for example, K=2 can be calculated as
938 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
939 // extra arithmetic, so it's not an obvious win, and it gets
940 // much more complicated for K > 3.)
942 // Protection from insane SCEVs; this bound is conservative,
943 // but it probably doesn't matter.
945 return SE
.getCouldNotCompute();
947 unsigned W
= SE
.getTypeSizeInBits(ResultTy
);
949 // Calculate K! / 2^T and T; we divide out the factors of two before
950 // multiplying for calculating K! / 2^T to avoid overflow.
951 // Other overflow doesn't matter because we only care about the bottom
952 // W bits of the result.
953 APInt
OddFactorial(W
, 1);
955 for (unsigned i
= 3; i
<= K
; ++i
) {
956 unsigned TwoFactors
= countr_zero(i
);
958 OddFactorial
*= (i
>> TwoFactors
);
961 // We need at least W + T bits for the multiplication step
962 unsigned CalculationBits
= W
+ T
;
964 // Calculate 2^T, at width T+W.
965 APInt DivFactor
= APInt::getOneBitSet(CalculationBits
, T
);
967 // Calculate the multiplicative inverse of K! / 2^T;
968 // this multiplication factor will perform the exact division by
970 APInt MultiplyFactor
= OddFactorial
.multiplicativeInverse();
972 // Calculate the product, at width T+W
973 IntegerType
*CalculationTy
= IntegerType::get(SE
.getContext(),
975 const SCEV
*Dividend
= SE
.getTruncateOrZeroExtend(It
, CalculationTy
);
976 for (unsigned i
= 1; i
!= K
; ++i
) {
977 const SCEV
*S
= SE
.getMinusSCEV(It
, SE
.getConstant(It
->getType(), i
));
978 Dividend
= SE
.getMulExpr(Dividend
,
979 SE
.getTruncateOrZeroExtend(S
, CalculationTy
));
983 const SCEV
*DivResult
= SE
.getUDivExpr(Dividend
, SE
.getConstant(DivFactor
));
985 // Truncate the result, and divide by K! / 2^T.
987 return SE
.getMulExpr(SE
.getConstant(MultiplyFactor
),
988 SE
.getTruncateOrZeroExtend(DivResult
, ResultTy
));
991 /// Return the value of this chain of recurrences at the specified iteration
992 /// number. We can evaluate this recurrence by multiplying each element in the
993 /// chain by the binomial coefficient corresponding to it. In other words, we
994 /// can evaluate {A,+,B,+,C,+,D} as:
996 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
998 /// where BC(It, k) stands for binomial coefficient.
999 const SCEV
*SCEVAddRecExpr::evaluateAtIteration(const SCEV
*It
,
1000 ScalarEvolution
&SE
) const {
1001 return evaluateAtIteration(operands(), It
, SE
);
1005 SCEVAddRecExpr::evaluateAtIteration(ArrayRef
<const SCEV
*> Operands
,
1006 const SCEV
*It
, ScalarEvolution
&SE
) {
1007 assert(Operands
.size() > 0);
1008 const SCEV
*Result
= Operands
[0];
1009 for (unsigned i
= 1, e
= Operands
.size(); i
!= e
; ++i
) {
1010 // The computation is correct in the face of overflow provided that the
1011 // multiplication is performed _after_ the evaluation of the binomial
1013 const SCEV
*Coeff
= BinomialCoefficient(It
, i
, SE
, Result
->getType());
1014 if (isa
<SCEVCouldNotCompute
>(Coeff
))
1017 Result
= SE
.getAddExpr(Result
, SE
.getMulExpr(Operands
[i
], Coeff
));
1022 //===----------------------------------------------------------------------===//
1023 // SCEV Expression folder implementations
1024 //===----------------------------------------------------------------------===//
1026 const SCEV
*ScalarEvolution::getLosslessPtrToIntExpr(const SCEV
*Op
,
1028 assert(Depth
<= 1 &&
1029 "getLosslessPtrToIntExpr() should self-recurse at most once.");
1031 // We could be called with an integer-typed operands during SCEV rewrites.
1032 // Since the operand is an integer already, just perform zext/trunc/self cast.
1033 if (!Op
->getType()->isPointerTy())
1036 // What would be an ID for such a SCEV cast expression?
1037 FoldingSetNodeID ID
;
1038 ID
.AddInteger(scPtrToInt
);
1043 // Is there already an expression for such a cast?
1044 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
))
1047 // It isn't legal for optimizations to construct new ptrtoint expressions
1048 // for non-integral pointers.
1049 if (getDataLayout().isNonIntegralPointerType(Op
->getType()))
1050 return getCouldNotCompute();
1052 Type
*IntPtrTy
= getDataLayout().getIntPtrType(Op
->getType());
1054 // We can only trivially model ptrtoint if SCEV's effective (integer) type
1055 // is sufficiently wide to represent all possible pointer values.
1056 // We could theoretically teach SCEV to truncate wider pointers, but
1057 // that isn't implemented for now.
1058 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op
->getType())) !=
1059 getDataLayout().getTypeSizeInBits(IntPtrTy
))
1060 return getCouldNotCompute();
1062 // If not, is this expression something we can't reduce any further?
1063 if (auto *U
= dyn_cast
<SCEVUnknown
>(Op
)) {
1064 // Perform some basic constant folding. If the operand of the ptr2int cast
1065 // is a null pointer, don't create a ptr2int SCEV expression (that will be
1066 // left as-is), but produce a zero constant.
1067 // NOTE: We could handle a more general case, but lack motivational cases.
1068 if (isa
<ConstantPointerNull
>(U
->getValue()))
1069 return getZero(IntPtrTy
);
1071 // Create an explicit cast node.
1072 // We can reuse the existing insert position since if we get here,
1073 // we won't have made any changes which would invalidate it.
1074 SCEV
*S
= new (SCEVAllocator
)
1075 SCEVPtrToIntExpr(ID
.Intern(SCEVAllocator
), Op
, IntPtrTy
);
1076 UniqueSCEVs
.InsertNode(S
, IP
);
1077 registerUser(S
, Op
);
1081 assert(Depth
== 0 && "getLosslessPtrToIntExpr() should not self-recurse for "
1082 "non-SCEVUnknown's.");
1084 // Otherwise, we've got some expression that is more complex than just a
1085 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an
1086 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown
1087 // only, and the expressions must otherwise be integer-typed.
1088 // So sink the cast down to the SCEVUnknown's.
1090 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression,
1091 /// which computes a pointer-typed value, and rewrites the whole expression
1092 /// tree so that *all* the computations are done on integers, and the only
1093 /// pointer-typed operands in the expression are SCEVUnknown.
1094 class SCEVPtrToIntSinkingRewriter
1095 : public SCEVRewriteVisitor
<SCEVPtrToIntSinkingRewriter
> {
1096 using Base
= SCEVRewriteVisitor
<SCEVPtrToIntSinkingRewriter
>;
1099 SCEVPtrToIntSinkingRewriter(ScalarEvolution
&SE
) : SCEVRewriteVisitor(SE
) {}
1101 static const SCEV
*rewrite(const SCEV
*Scev
, ScalarEvolution
&SE
) {
1102 SCEVPtrToIntSinkingRewriter
Rewriter(SE
);
1103 return Rewriter
.visit(Scev
);
1106 const SCEV
*visit(const SCEV
*S
) {
1107 Type
*STy
= S
->getType();
1108 // If the expression is not pointer-typed, just keep it as-is.
1109 if (!STy
->isPointerTy())
1111 // Else, recursively sink the cast down into it.
1112 return Base::visit(S
);
1115 const SCEV
*visitAddExpr(const SCEVAddExpr
*Expr
) {
1116 SmallVector
<const SCEV
*, 2> Operands
;
1117 bool Changed
= false;
1118 for (const auto *Op
: Expr
->operands()) {
1119 Operands
.push_back(visit(Op
));
1120 Changed
|= Op
!= Operands
.back();
1122 return !Changed
? Expr
: SE
.getAddExpr(Operands
, Expr
->getNoWrapFlags());
1125 const SCEV
*visitMulExpr(const SCEVMulExpr
*Expr
) {
1126 SmallVector
<const SCEV
*, 2> Operands
;
1127 bool Changed
= false;
1128 for (const auto *Op
: Expr
->operands()) {
1129 Operands
.push_back(visit(Op
));
1130 Changed
|= Op
!= Operands
.back();
1132 return !Changed
? Expr
: SE
.getMulExpr(Operands
, Expr
->getNoWrapFlags());
1135 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
1136 assert(Expr
->getType()->isPointerTy() &&
1137 "Should only reach pointer-typed SCEVUnknown's.");
1138 return SE
.getLosslessPtrToIntExpr(Expr
, /*Depth=*/1);
1142 // And actually perform the cast sinking.
1143 const SCEV
*IntOp
= SCEVPtrToIntSinkingRewriter::rewrite(Op
, *this);
1144 assert(IntOp
->getType()->isIntegerTy() &&
1145 "We must have succeeded in sinking the cast, "
1146 "and ending up with an integer-typed expression!");
1150 const SCEV
*ScalarEvolution::getPtrToIntExpr(const SCEV
*Op
, Type
*Ty
) {
1151 assert(Ty
->isIntegerTy() && "Target type must be an integer type!");
1153 const SCEV
*IntOp
= getLosslessPtrToIntExpr(Op
);
1154 if (isa
<SCEVCouldNotCompute
>(IntOp
))
1157 return getTruncateOrZeroExtend(IntOp
, Ty
);
1160 const SCEV
*ScalarEvolution::getTruncateExpr(const SCEV
*Op
, Type
*Ty
,
1162 assert(getTypeSizeInBits(Op
->getType()) > getTypeSizeInBits(Ty
) &&
1163 "This is not a truncating conversion!");
1164 assert(isSCEVable(Ty
) &&
1165 "This is not a conversion to a SCEVable type!");
1166 assert(!Op
->getType()->isPointerTy() && "Can't truncate pointer!");
1167 Ty
= getEffectiveSCEVType(Ty
);
1169 FoldingSetNodeID ID
;
1170 ID
.AddInteger(scTruncate
);
1174 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1176 // Fold if the operand is constant.
1177 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1179 cast
<ConstantInt
>(ConstantExpr::getTrunc(SC
->getValue(), Ty
)));
1181 // trunc(trunc(x)) --> trunc(x)
1182 if (const SCEVTruncateExpr
*ST
= dyn_cast
<SCEVTruncateExpr
>(Op
))
1183 return getTruncateExpr(ST
->getOperand(), Ty
, Depth
+ 1);
1185 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1186 if (const SCEVSignExtendExpr
*SS
= dyn_cast
<SCEVSignExtendExpr
>(Op
))
1187 return getTruncateOrSignExtend(SS
->getOperand(), Ty
, Depth
+ 1);
1189 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1190 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
1191 return getTruncateOrZeroExtend(SZ
->getOperand(), Ty
, Depth
+ 1);
1193 if (Depth
> MaxCastDepth
) {
1195 new (SCEVAllocator
) SCEVTruncateExpr(ID
.Intern(SCEVAllocator
), Op
, Ty
);
1196 UniqueSCEVs
.InsertNode(S
, IP
);
1197 registerUser(S
, Op
);
1201 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
1202 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
1203 // if after transforming we have at most one truncate, not counting truncates
1204 // that replace other casts.
1205 if (isa
<SCEVAddExpr
>(Op
) || isa
<SCEVMulExpr
>(Op
)) {
1206 auto *CommOp
= cast
<SCEVCommutativeExpr
>(Op
);
1207 SmallVector
<const SCEV
*, 4> Operands
;
1208 unsigned numTruncs
= 0;
1209 for (unsigned i
= 0, e
= CommOp
->getNumOperands(); i
!= e
&& numTruncs
< 2;
1211 const SCEV
*S
= getTruncateExpr(CommOp
->getOperand(i
), Ty
, Depth
+ 1);
1212 if (!isa
<SCEVIntegralCastExpr
>(CommOp
->getOperand(i
)) &&
1213 isa
<SCEVTruncateExpr
>(S
))
1215 Operands
.push_back(S
);
1217 if (numTruncs
< 2) {
1218 if (isa
<SCEVAddExpr
>(Op
))
1219 return getAddExpr(Operands
);
1220 if (isa
<SCEVMulExpr
>(Op
))
1221 return getMulExpr(Operands
);
1222 llvm_unreachable("Unexpected SCEV type for Op.");
1224 // Although we checked in the beginning that ID is not in the cache, it is
1225 // possible that during recursion and different modification ID was inserted
1226 // into the cache. So if we find it, just return it.
1227 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
))
1231 // If the input value is a chrec scev, truncate the chrec's operands.
1232 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(Op
)) {
1233 SmallVector
<const SCEV
*, 4> Operands
;
1234 for (const SCEV
*Op
: AddRec
->operands())
1235 Operands
.push_back(getTruncateExpr(Op
, Ty
, Depth
+ 1));
1236 return getAddRecExpr(Operands
, AddRec
->getLoop(), SCEV::FlagAnyWrap
);
1239 // Return zero if truncating to known zeros.
1240 uint32_t MinTrailingZeros
= getMinTrailingZeros(Op
);
1241 if (MinTrailingZeros
>= getTypeSizeInBits(Ty
))
1244 // The cast wasn't folded; create an explicit cast node. We can reuse
1245 // the existing insert position since if we get here, we won't have
1246 // made any changes which would invalidate it.
1247 SCEV
*S
= new (SCEVAllocator
) SCEVTruncateExpr(ID
.Intern(SCEVAllocator
),
1249 UniqueSCEVs
.InsertNode(S
, IP
);
1250 registerUser(S
, Op
);
1254 // Get the limit of a recurrence such that incrementing by Step cannot cause
1255 // signed overflow as long as the value of the recurrence within the
1256 // loop does not exceed this limit before incrementing.
1257 static const SCEV
*getSignedOverflowLimitForStep(const SCEV
*Step
,
1258 ICmpInst::Predicate
*Pred
,
1259 ScalarEvolution
*SE
) {
1260 unsigned BitWidth
= SE
->getTypeSizeInBits(Step
->getType());
1261 if (SE
->isKnownPositive(Step
)) {
1262 *Pred
= ICmpInst::ICMP_SLT
;
1263 return SE
->getConstant(APInt::getSignedMinValue(BitWidth
) -
1264 SE
->getSignedRangeMax(Step
));
1266 if (SE
->isKnownNegative(Step
)) {
1267 *Pred
= ICmpInst::ICMP_SGT
;
1268 return SE
->getConstant(APInt::getSignedMaxValue(BitWidth
) -
1269 SE
->getSignedRangeMin(Step
));
1274 // Get the limit of a recurrence such that incrementing by Step cannot cause
1275 // unsigned overflow as long as the value of the recurrence within the loop does
1276 // not exceed this limit before incrementing.
1277 static const SCEV
*getUnsignedOverflowLimitForStep(const SCEV
*Step
,
1278 ICmpInst::Predicate
*Pred
,
1279 ScalarEvolution
*SE
) {
1280 unsigned BitWidth
= SE
->getTypeSizeInBits(Step
->getType());
1281 *Pred
= ICmpInst::ICMP_ULT
;
1283 return SE
->getConstant(APInt::getMinValue(BitWidth
) -
1284 SE
->getUnsignedRangeMax(Step
));
1289 struct ExtendOpTraitsBase
{
1290 typedef const SCEV
*(ScalarEvolution::*GetExtendExprTy
)(const SCEV
*, Type
*,
1294 // Used to make code generic over signed and unsigned overflow.
1295 template <typename ExtendOp
> struct ExtendOpTraits
{
1298 // static const SCEV::NoWrapFlags WrapType;
1300 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1302 // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1303 // ICmpInst::Predicate *Pred,
1304 // ScalarEvolution *SE);
1308 struct ExtendOpTraits
<SCEVSignExtendExpr
> : public ExtendOpTraitsBase
{
1309 static const SCEV::NoWrapFlags WrapType
= SCEV::FlagNSW
;
1311 static const GetExtendExprTy GetExtendExpr
;
1313 static const SCEV
*getOverflowLimitForStep(const SCEV
*Step
,
1314 ICmpInst::Predicate
*Pred
,
1315 ScalarEvolution
*SE
) {
1316 return getSignedOverflowLimitForStep(Step
, Pred
, SE
);
1320 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits
<
1321 SCEVSignExtendExpr
>::GetExtendExpr
= &ScalarEvolution::getSignExtendExpr
;
1324 struct ExtendOpTraits
<SCEVZeroExtendExpr
> : public ExtendOpTraitsBase
{
1325 static const SCEV::NoWrapFlags WrapType
= SCEV::FlagNUW
;
1327 static const GetExtendExprTy GetExtendExpr
;
1329 static const SCEV
*getOverflowLimitForStep(const SCEV
*Step
,
1330 ICmpInst::Predicate
*Pred
,
1331 ScalarEvolution
*SE
) {
1332 return getUnsignedOverflowLimitForStep(Step
, Pred
, SE
);
1336 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits
<
1337 SCEVZeroExtendExpr
>::GetExtendExpr
= &ScalarEvolution::getZeroExtendExpr
;
1339 } // end anonymous namespace
1341 // The recurrence AR has been shown to have no signed/unsigned wrap or something
1342 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1343 // easily prove NSW/NUW for its preincrement or postincrement sibling. This
1344 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1345 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1346 // expression "Step + sext/zext(PreIncAR)" is congruent with
1347 // "sext/zext(PostIncAR)"
1348 template <typename ExtendOpTy
>
1349 static const SCEV
*getPreStartForExtend(const SCEVAddRecExpr
*AR
, Type
*Ty
,
1350 ScalarEvolution
*SE
, unsigned Depth
) {
1351 auto WrapType
= ExtendOpTraits
<ExtendOpTy
>::WrapType
;
1352 auto GetExtendExpr
= ExtendOpTraits
<ExtendOpTy
>::GetExtendExpr
;
1354 const Loop
*L
= AR
->getLoop();
1355 const SCEV
*Start
= AR
->getStart();
1356 const SCEV
*Step
= AR
->getStepRecurrence(*SE
);
1358 // Check for a simple looking step prior to loop entry.
1359 const SCEVAddExpr
*SA
= dyn_cast
<SCEVAddExpr
>(Start
);
1363 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1364 // subtraction is expensive. For this purpose, perform a quick and dirty
1365 // difference, by checking for Step in the operand list. Note, that
1366 // SA might have repeated ops, like %a + %a + ..., so only remove one.
1367 SmallVector
<const SCEV
*, 4> DiffOps(SA
->operands());
1368 for (auto It
= DiffOps
.begin(); It
!= DiffOps
.end(); ++It
)
1374 if (DiffOps
.size() == SA
->getNumOperands())
1377 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1380 // 1. NSW/NUW flags on the step increment.
1381 auto PreStartFlags
=
1382 ScalarEvolution::maskFlags(SA
->getNoWrapFlags(), SCEV::FlagNUW
);
1383 const SCEV
*PreStart
= SE
->getAddExpr(DiffOps
, PreStartFlags
);
1384 const SCEVAddRecExpr
*PreAR
= dyn_cast
<SCEVAddRecExpr
>(
1385 SE
->getAddRecExpr(PreStart
, Step
, L
, SCEV::FlagAnyWrap
));
1387 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1388 // "S+X does not sign/unsign-overflow".
1391 const SCEV
*BECount
= SE
->getBackedgeTakenCount(L
);
1392 if (PreAR
&& PreAR
->getNoWrapFlags(WrapType
) &&
1393 !isa
<SCEVCouldNotCompute
>(BECount
) && SE
->isKnownPositive(BECount
))
1396 // 2. Direct overflow check on the step operation's expression.
1397 unsigned BitWidth
= SE
->getTypeSizeInBits(AR
->getType());
1398 Type
*WideTy
= IntegerType::get(SE
->getContext(), BitWidth
* 2);
1399 const SCEV
*OperandExtendedStart
=
1400 SE
->getAddExpr((SE
->*GetExtendExpr
)(PreStart
, WideTy
, Depth
),
1401 (SE
->*GetExtendExpr
)(Step
, WideTy
, Depth
));
1402 if ((SE
->*GetExtendExpr
)(Start
, WideTy
, Depth
) == OperandExtendedStart
) {
1403 if (PreAR
&& AR
->getNoWrapFlags(WrapType
)) {
1404 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1405 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1406 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1407 SE
->setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(PreAR
), WrapType
);
1412 // 3. Loop precondition.
1413 ICmpInst::Predicate Pred
;
1414 const SCEV
*OverflowLimit
=
1415 ExtendOpTraits
<ExtendOpTy
>::getOverflowLimitForStep(Step
, &Pred
, SE
);
1417 if (OverflowLimit
&&
1418 SE
->isLoopEntryGuardedByCond(L
, Pred
, PreStart
, OverflowLimit
))
1424 // Get the normalized zero or sign extended expression for this AddRec's Start.
1425 template <typename ExtendOpTy
>
1426 static const SCEV
*getExtendAddRecStart(const SCEVAddRecExpr
*AR
, Type
*Ty
,
1427 ScalarEvolution
*SE
,
1429 auto GetExtendExpr
= ExtendOpTraits
<ExtendOpTy
>::GetExtendExpr
;
1431 const SCEV
*PreStart
= getPreStartForExtend
<ExtendOpTy
>(AR
, Ty
, SE
, Depth
);
1433 return (SE
->*GetExtendExpr
)(AR
->getStart(), Ty
, Depth
);
1435 return SE
->getAddExpr((SE
->*GetExtendExpr
)(AR
->getStepRecurrence(*SE
), Ty
,
1437 (SE
->*GetExtendExpr
)(PreStart
, Ty
, Depth
));
1440 // Try to prove away overflow by looking at "nearby" add recurrences. A
1441 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1442 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1446 // {S,+,X} == {S-T,+,X} + T
1447 // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1449 // If ({S-T,+,X} + T) does not overflow ... (1)
1451 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1453 // If {S-T,+,X} does not overflow ... (2)
1455 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1456 // == {Ext(S-T)+Ext(T),+,Ext(X)}
1458 // If (S-T)+T does not overflow ... (3)
1460 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1461 // == {Ext(S),+,Ext(X)} == LHS
1463 // Thus, if (1), (2) and (3) are true for some T, then
1464 // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1466 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1467 // does not overflow" restricted to the 0th iteration. Therefore we only need
1468 // to check for (1) and (2).
1470 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1471 // is `Delta` (defined below).
1472 template <typename ExtendOpTy
>
1473 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV
*Start
,
1476 auto WrapType
= ExtendOpTraits
<ExtendOpTy
>::WrapType
;
1478 // We restrict `Start` to a constant to prevent SCEV from spending too much
1479 // time here. It is correct (but more expensive) to continue with a
1480 // non-constant `Start` and do a general SCEV subtraction to compute
1481 // `PreStart` below.
1482 const SCEVConstant
*StartC
= dyn_cast
<SCEVConstant
>(Start
);
1486 APInt StartAI
= StartC
->getAPInt();
1488 for (unsigned Delta
: {-2, -1, 1, 2}) {
1489 const SCEV
*PreStart
= getConstant(StartAI
- Delta
);
1491 FoldingSetNodeID ID
;
1492 ID
.AddInteger(scAddRecExpr
);
1493 ID
.AddPointer(PreStart
);
1494 ID
.AddPointer(Step
);
1498 static_cast<SCEVAddRecExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
1500 // Give up if we don't already have the add recurrence we need because
1501 // actually constructing an add recurrence is relatively expensive.
1502 if (PreAR
&& PreAR
->getNoWrapFlags(WrapType
)) { // proves (2)
1503 const SCEV
*DeltaS
= getConstant(StartC
->getType(), Delta
);
1504 ICmpInst::Predicate Pred
= ICmpInst::BAD_ICMP_PREDICATE
;
1505 const SCEV
*Limit
= ExtendOpTraits
<ExtendOpTy
>::getOverflowLimitForStep(
1506 DeltaS
, &Pred
, this);
1507 if (Limit
&& isKnownPredicate(Pred
, PreAR
, Limit
)) // proves (1)
1515 // Finds an integer D for an expression (C + x + y + ...) such that the top
1516 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
1517 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
1518 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
1519 // the (C + x + y + ...) expression is \p WholeAddExpr.
1520 static APInt
extractConstantWithoutWrapping(ScalarEvolution
&SE
,
1521 const SCEVConstant
*ConstantTerm
,
1522 const SCEVAddExpr
*WholeAddExpr
) {
1523 const APInt
&C
= ConstantTerm
->getAPInt();
1524 const unsigned BitWidth
= C
.getBitWidth();
1525 // Find number of trailing zeros of (x + y + ...) w/o the C first:
1526 uint32_t TZ
= BitWidth
;
1527 for (unsigned I
= 1, E
= WholeAddExpr
->getNumOperands(); I
< E
&& TZ
; ++I
)
1528 TZ
= std::min(TZ
, SE
.getMinTrailingZeros(WholeAddExpr
->getOperand(I
)));
1530 // Set D to be as many least significant bits of C as possible while still
1531 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
1532 return TZ
< BitWidth
? C
.trunc(TZ
).zext(BitWidth
) : C
;
1534 return APInt(BitWidth
, 0);
1537 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top
1538 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
1539 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
1540 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
1541 static APInt
extractConstantWithoutWrapping(ScalarEvolution
&SE
,
1542 const APInt
&ConstantStart
,
1544 const unsigned BitWidth
= ConstantStart
.getBitWidth();
1545 const uint32_t TZ
= SE
.getMinTrailingZeros(Step
);
1547 return TZ
< BitWidth
? ConstantStart
.trunc(TZ
).zext(BitWidth
)
1549 return APInt(BitWidth
, 0);
1552 static void insertFoldCacheEntry(
1553 const ScalarEvolution::FoldID
&ID
, const SCEV
*S
,
1554 DenseMap
<ScalarEvolution::FoldID
, const SCEV
*> &FoldCache
,
1555 DenseMap
<const SCEV
*, SmallVector
<ScalarEvolution::FoldID
, 2>>
1557 auto I
= FoldCache
.insert({ID
, S
});
1559 // Remove FoldCacheUser entry for ID when replacing an existing FoldCache
1561 auto &UserIDs
= FoldCacheUser
[I
.first
->second
];
1562 assert(count(UserIDs
, ID
) == 1 && "unexpected duplicates in UserIDs");
1563 for (unsigned I
= 0; I
!= UserIDs
.size(); ++I
)
1564 if (UserIDs
[I
] == ID
) {
1565 std::swap(UserIDs
[I
], UserIDs
.back());
1569 I
.first
->second
= S
;
1571 FoldCacheUser
[S
].push_back(ID
);
1575 ScalarEvolution::getZeroExtendExpr(const SCEV
*Op
, Type
*Ty
, unsigned Depth
) {
1576 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
1577 "This is not an extending conversion!");
1578 assert(isSCEVable(Ty
) &&
1579 "This is not a conversion to a SCEVable type!");
1580 assert(!Op
->getType()->isPointerTy() && "Can't extend pointer!");
1581 Ty
= getEffectiveSCEVType(Ty
);
1583 FoldID
ID(scZeroExtend
, Op
, Ty
);
1584 auto Iter
= FoldCache
.find(ID
);
1585 if (Iter
!= FoldCache
.end())
1586 return Iter
->second
;
1588 const SCEV
*S
= getZeroExtendExprImpl(Op
, Ty
, Depth
);
1589 if (!isa
<SCEVZeroExtendExpr
>(S
))
1590 insertFoldCacheEntry(ID
, S
, FoldCache
, FoldCacheUser
);
1594 const SCEV
*ScalarEvolution::getZeroExtendExprImpl(const SCEV
*Op
, Type
*Ty
,
1596 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
1597 "This is not an extending conversion!");
1598 assert(isSCEVable(Ty
) && "This is not a conversion to a SCEVable type!");
1599 assert(!Op
->getType()->isPointerTy() && "Can't extend pointer!");
1601 // Fold if the operand is constant.
1602 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1603 return getConstant(SC
->getAPInt().zext(getTypeSizeInBits(Ty
)));
1605 // zext(zext(x)) --> zext(x)
1606 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
1607 return getZeroExtendExpr(SZ
->getOperand(), Ty
, Depth
+ 1);
1609 // Before doing any expensive analysis, check to see if we've already
1610 // computed a SCEV for this Op and Ty.
1611 FoldingSetNodeID ID
;
1612 ID
.AddInteger(scZeroExtend
);
1616 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1617 if (Depth
> MaxCastDepth
) {
1618 SCEV
*S
= new (SCEVAllocator
) SCEVZeroExtendExpr(ID
.Intern(SCEVAllocator
),
1620 UniqueSCEVs
.InsertNode(S
, IP
);
1621 registerUser(S
, Op
);
1625 // zext(trunc(x)) --> zext(x) or x or trunc(x)
1626 if (const SCEVTruncateExpr
*ST
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
1627 // It's possible the bits taken off by the truncate were all zero bits. If
1628 // so, we should be able to simplify this further.
1629 const SCEV
*X
= ST
->getOperand();
1630 ConstantRange CR
= getUnsignedRange(X
);
1631 unsigned TruncBits
= getTypeSizeInBits(ST
->getType());
1632 unsigned NewBits
= getTypeSizeInBits(Ty
);
1633 if (CR
.truncate(TruncBits
).zeroExtend(NewBits
).contains(
1634 CR
.zextOrTrunc(NewBits
)))
1635 return getTruncateOrZeroExtend(X
, Ty
, Depth
);
1638 // If the input value is a chrec scev, and we can prove that the value
1639 // did not overflow the old, smaller, value, we can zero extend all of the
1640 // operands (often constants). This allows analysis of something like
1641 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1642 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
))
1643 if (AR
->isAffine()) {
1644 const SCEV
*Start
= AR
->getStart();
1645 const SCEV
*Step
= AR
->getStepRecurrence(*this);
1646 unsigned BitWidth
= getTypeSizeInBits(AR
->getType());
1647 const Loop
*L
= AR
->getLoop();
1649 // If we have special knowledge that this addrec won't overflow,
1650 // we don't need to do any further analysis.
1651 if (AR
->hasNoUnsignedWrap()) {
1653 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this, Depth
+ 1);
1654 Step
= getZeroExtendExpr(Step
, Ty
, Depth
+ 1);
1655 return getAddRecExpr(Start
, Step
, L
, AR
->getNoWrapFlags());
1658 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1659 // Note that this serves two purposes: It filters out loops that are
1660 // simply not analyzable, and it covers the case where this code is
1661 // being called from within backedge-taken count analysis, such that
1662 // attempting to ask for the backedge-taken count would likely result
1663 // in infinite recursion. In the later case, the analysis code will
1664 // cope with a conservative value, and it will take care to purge
1665 // that value once it has finished.
1666 const SCEV
*MaxBECount
= getConstantMaxBackedgeTakenCount(L
);
1667 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
)) {
1668 // Manually compute the final value for AR, checking for overflow.
1670 // Check whether the backedge-taken count can be losslessly casted to
1671 // the addrec's type. The count is always unsigned.
1672 const SCEV
*CastedMaxBECount
=
1673 getTruncateOrZeroExtend(MaxBECount
, Start
->getType(), Depth
);
1674 const SCEV
*RecastedMaxBECount
= getTruncateOrZeroExtend(
1675 CastedMaxBECount
, MaxBECount
->getType(), Depth
);
1676 if (MaxBECount
== RecastedMaxBECount
) {
1677 Type
*WideTy
= IntegerType::get(getContext(), BitWidth
* 2);
1678 // Check whether Start+Step*MaxBECount has no unsigned overflow.
1679 const SCEV
*ZMul
= getMulExpr(CastedMaxBECount
, Step
,
1680 SCEV::FlagAnyWrap
, Depth
+ 1);
1681 const SCEV
*ZAdd
= getZeroExtendExpr(getAddExpr(Start
, ZMul
,
1685 const SCEV
*WideStart
= getZeroExtendExpr(Start
, WideTy
, Depth
+ 1);
1686 const SCEV
*WideMaxBECount
=
1687 getZeroExtendExpr(CastedMaxBECount
, WideTy
, Depth
+ 1);
1688 const SCEV
*OperandExtendedAdd
=
1689 getAddExpr(WideStart
,
1690 getMulExpr(WideMaxBECount
,
1691 getZeroExtendExpr(Step
, WideTy
, Depth
+ 1),
1692 SCEV::FlagAnyWrap
, Depth
+ 1),
1693 SCEV::FlagAnyWrap
, Depth
+ 1);
1694 if (ZAdd
== OperandExtendedAdd
) {
1695 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1696 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
), SCEV::FlagNUW
);
1697 // Return the expression with the addrec on the outside.
1698 Start
= getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1700 Step
= getZeroExtendExpr(Step
, Ty
, Depth
+ 1);
1701 return getAddRecExpr(Start
, Step
, L
, AR
->getNoWrapFlags());
1703 // Similar to above, only this time treat the step value as signed.
1704 // This covers loops that count down.
1705 OperandExtendedAdd
=
1706 getAddExpr(WideStart
,
1707 getMulExpr(WideMaxBECount
,
1708 getSignExtendExpr(Step
, WideTy
, Depth
+ 1),
1709 SCEV::FlagAnyWrap
, Depth
+ 1),
1710 SCEV::FlagAnyWrap
, Depth
+ 1);
1711 if (ZAdd
== OperandExtendedAdd
) {
1712 // Cache knowledge of AR NW, which is propagated to this AddRec.
1713 // Negative step causes unsigned wrap, but it still can't self-wrap.
1714 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
), SCEV::FlagNW
);
1715 // Return the expression with the addrec on the outside.
1716 Start
= getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1718 Step
= getSignExtendExpr(Step
, Ty
, Depth
+ 1);
1719 return getAddRecExpr(Start
, Step
, L
, AR
->getNoWrapFlags());
1724 // Normally, in the cases we can prove no-overflow via a
1725 // backedge guarding condition, we can also compute a backedge
1726 // taken count for the loop. The exceptions are assumptions and
1727 // guards present in the loop -- SCEV is not great at exploiting
1728 // these to compute max backedge taken counts, but can still use
1729 // these to prove lack of overflow. Use this fact to avoid
1730 // doing extra work that may not pay off.
1731 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
) || HasGuards
||
1732 !AC
.assumptions().empty()) {
1734 auto NewFlags
= proveNoUnsignedWrapViaInduction(AR
);
1735 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
), NewFlags
);
1736 if (AR
->hasNoUnsignedWrap()) {
1737 // Same as nuw case above - duplicated here to avoid a compile time
1738 // issue. It's not clear that the order of checks does matter, but
1739 // it's one of two issue possible causes for a change which was
1740 // reverted. Be conservative for the moment.
1742 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this, Depth
+ 1);
1743 Step
= getZeroExtendExpr(Step
, Ty
, Depth
+ 1);
1744 return getAddRecExpr(Start
, Step
, L
, AR
->getNoWrapFlags());
1747 // For a negative step, we can extend the operands iff doing so only
1748 // traverses values in the range zext([0,UINT_MAX]).
1749 if (isKnownNegative(Step
)) {
1750 const SCEV
*N
= getConstant(APInt::getMaxValue(BitWidth
) -
1751 getSignedRangeMin(Step
));
1752 if (isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_UGT
, AR
, N
) ||
1753 isKnownOnEveryIteration(ICmpInst::ICMP_UGT
, AR
, N
)) {
1754 // Cache knowledge of AR NW, which is propagated to this
1755 // AddRec. Negative step causes unsigned wrap, but it
1756 // still can't self-wrap.
1757 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
), SCEV::FlagNW
);
1758 // Return the expression with the addrec on the outside.
1759 Start
= getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1761 Step
= getSignExtendExpr(Step
, Ty
, Depth
+ 1);
1762 return getAddRecExpr(Start
, Step
, L
, AR
->getNoWrapFlags());
1767 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
1768 // if D + (C - D + Step * n) could be proven to not unsigned wrap
1769 // where D maximizes the number of trailing zeros of (C - D + Step * n)
1770 if (const auto *SC
= dyn_cast
<SCEVConstant
>(Start
)) {
1771 const APInt
&C
= SC
->getAPInt();
1772 const APInt
&D
= extractConstantWithoutWrapping(*this, C
, Step
);
1774 const SCEV
*SZExtD
= getZeroExtendExpr(getConstant(D
), Ty
, Depth
);
1775 const SCEV
*SResidual
=
1776 getAddRecExpr(getConstant(C
- D
), Step
, L
, AR
->getNoWrapFlags());
1777 const SCEV
*SZExtR
= getZeroExtendExpr(SResidual
, Ty
, Depth
+ 1);
1778 return getAddExpr(SZExtD
, SZExtR
,
1779 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
1784 if (proveNoWrapByVaryingStart
<SCEVZeroExtendExpr
>(Start
, Step
, L
)) {
1785 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
), SCEV::FlagNUW
);
1787 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this, Depth
+ 1);
1788 Step
= getZeroExtendExpr(Step
, Ty
, Depth
+ 1);
1789 return getAddRecExpr(Start
, Step
, L
, AR
->getNoWrapFlags());
1793 // zext(A % B) --> zext(A) % zext(B)
1797 if (matchURem(Op
, LHS
, RHS
))
1798 return getURemExpr(getZeroExtendExpr(LHS
, Ty
, Depth
+ 1),
1799 getZeroExtendExpr(RHS
, Ty
, Depth
+ 1));
1802 // zext(A / B) --> zext(A) / zext(B).
1803 if (auto *Div
= dyn_cast
<SCEVUDivExpr
>(Op
))
1804 return getUDivExpr(getZeroExtendExpr(Div
->getLHS(), Ty
, Depth
+ 1),
1805 getZeroExtendExpr(Div
->getRHS(), Ty
, Depth
+ 1));
1807 if (auto *SA
= dyn_cast
<SCEVAddExpr
>(Op
)) {
1808 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
1809 if (SA
->hasNoUnsignedWrap()) {
1810 // If the addition does not unsign overflow then we can, by definition,
1811 // commute the zero extension with the addition operation.
1812 SmallVector
<const SCEV
*, 4> Ops
;
1813 for (const auto *Op
: SA
->operands())
1814 Ops
.push_back(getZeroExtendExpr(Op
, Ty
, Depth
+ 1));
1815 return getAddExpr(Ops
, SCEV::FlagNUW
, Depth
+ 1);
1818 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
1819 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
1820 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1822 // Often address arithmetics contain expressions like
1823 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
1824 // This transformation is useful while proving that such expressions are
1825 // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
1826 if (const auto *SC
= dyn_cast
<SCEVConstant
>(SA
->getOperand(0))) {
1827 const APInt
&D
= extractConstantWithoutWrapping(*this, SC
, SA
);
1829 const SCEV
*SZExtD
= getZeroExtendExpr(getConstant(D
), Ty
, Depth
);
1830 const SCEV
*SResidual
=
1831 getAddExpr(getConstant(-D
), SA
, SCEV::FlagAnyWrap
, Depth
);
1832 const SCEV
*SZExtR
= getZeroExtendExpr(SResidual
, Ty
, Depth
+ 1);
1833 return getAddExpr(SZExtD
, SZExtR
,
1834 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
1840 if (auto *SM
= dyn_cast
<SCEVMulExpr
>(Op
)) {
1841 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw>
1842 if (SM
->hasNoUnsignedWrap()) {
1843 // If the multiply does not unsign overflow then we can, by definition,
1844 // commute the zero extension with the multiply operation.
1845 SmallVector
<const SCEV
*, 4> Ops
;
1846 for (const auto *Op
: SM
->operands())
1847 Ops
.push_back(getZeroExtendExpr(Op
, Ty
, Depth
+ 1));
1848 return getMulExpr(Ops
, SCEV::FlagNUW
, Depth
+ 1);
1851 // zext(2^K * (trunc X to iN)) to iM ->
1852 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
1856 // zext(2^K * (trunc X to iN)) to iM
1857 // = zext((trunc X to iN) << K) to iM
1858 // = zext((trunc X to i{N-K}) << K)<nuw> to iM
1859 // (because shl removes the top K bits)
1860 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
1861 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
1863 if (SM
->getNumOperands() == 2)
1864 if (auto *MulLHS
= dyn_cast
<SCEVConstant
>(SM
->getOperand(0)))
1865 if (MulLHS
->getAPInt().isPowerOf2())
1866 if (auto *TruncRHS
= dyn_cast
<SCEVTruncateExpr
>(SM
->getOperand(1))) {
1867 int NewTruncBits
= getTypeSizeInBits(TruncRHS
->getType()) -
1868 MulLHS
->getAPInt().logBase2();
1869 Type
*NewTruncTy
= IntegerType::get(getContext(), NewTruncBits
);
1871 getZeroExtendExpr(MulLHS
, Ty
),
1873 getTruncateExpr(TruncRHS
->getOperand(), NewTruncTy
), Ty
),
1874 SCEV::FlagNUW
, Depth
+ 1);
1878 // zext(umin(x, y)) -> umin(zext(x), zext(y))
1879 // zext(umax(x, y)) -> umax(zext(x), zext(y))
1880 if (isa
<SCEVUMinExpr
>(Op
) || isa
<SCEVUMaxExpr
>(Op
)) {
1881 auto *MinMax
= cast
<SCEVMinMaxExpr
>(Op
);
1882 SmallVector
<const SCEV
*, 4> Operands
;
1883 for (auto *Operand
: MinMax
->operands())
1884 Operands
.push_back(getZeroExtendExpr(Operand
, Ty
));
1885 if (isa
<SCEVUMinExpr
>(MinMax
))
1886 return getUMinExpr(Operands
);
1887 return getUMaxExpr(Operands
);
1890 // zext(umin_seq(x, y)) -> umin_seq(zext(x), zext(y))
1891 if (auto *MinMax
= dyn_cast
<SCEVSequentialMinMaxExpr
>(Op
)) {
1892 assert(isa
<SCEVSequentialUMinExpr
>(MinMax
) && "Not supported!");
1893 SmallVector
<const SCEV
*, 4> Operands
;
1894 for (auto *Operand
: MinMax
->operands())
1895 Operands
.push_back(getZeroExtendExpr(Operand
, Ty
));
1896 return getUMinExpr(Operands
, /*Sequential*/ true);
1899 // The cast wasn't folded; create an explicit cast node.
1900 // Recompute the insert position, as it may have been invalidated.
1901 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1902 SCEV
*S
= new (SCEVAllocator
) SCEVZeroExtendExpr(ID
.Intern(SCEVAllocator
),
1904 UniqueSCEVs
.InsertNode(S
, IP
);
1905 registerUser(S
, Op
);
1910 ScalarEvolution::getSignExtendExpr(const SCEV
*Op
, Type
*Ty
, unsigned Depth
) {
1911 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
1912 "This is not an extending conversion!");
1913 assert(isSCEVable(Ty
) &&
1914 "This is not a conversion to a SCEVable type!");
1915 assert(!Op
->getType()->isPointerTy() && "Can't extend pointer!");
1916 Ty
= getEffectiveSCEVType(Ty
);
1918 FoldID
ID(scSignExtend
, Op
, Ty
);
1919 auto Iter
= FoldCache
.find(ID
);
1920 if (Iter
!= FoldCache
.end())
1921 return Iter
->second
;
1923 const SCEV
*S
= getSignExtendExprImpl(Op
, Ty
, Depth
);
1924 if (!isa
<SCEVSignExtendExpr
>(S
))
1925 insertFoldCacheEntry(ID
, S
, FoldCache
, FoldCacheUser
);
1929 const SCEV
*ScalarEvolution::getSignExtendExprImpl(const SCEV
*Op
, Type
*Ty
,
1931 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
1932 "This is not an extending conversion!");
1933 assert(isSCEVable(Ty
) && "This is not a conversion to a SCEVable type!");
1934 assert(!Op
->getType()->isPointerTy() && "Can't extend pointer!");
1935 Ty
= getEffectiveSCEVType(Ty
);
1937 // Fold if the operand is constant.
1938 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1939 return getConstant(SC
->getAPInt().sext(getTypeSizeInBits(Ty
)));
1941 // sext(sext(x)) --> sext(x)
1942 if (const SCEVSignExtendExpr
*SS
= dyn_cast
<SCEVSignExtendExpr
>(Op
))
1943 return getSignExtendExpr(SS
->getOperand(), Ty
, Depth
+ 1);
1945 // sext(zext(x)) --> zext(x)
1946 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
1947 return getZeroExtendExpr(SZ
->getOperand(), Ty
, Depth
+ 1);
1949 // Before doing any expensive analysis, check to see if we've already
1950 // computed a SCEV for this Op and Ty.
1951 FoldingSetNodeID ID
;
1952 ID
.AddInteger(scSignExtend
);
1956 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1957 // Limit recursion depth.
1958 if (Depth
> MaxCastDepth
) {
1959 SCEV
*S
= new (SCEVAllocator
) SCEVSignExtendExpr(ID
.Intern(SCEVAllocator
),
1961 UniqueSCEVs
.InsertNode(S
, IP
);
1962 registerUser(S
, Op
);
1966 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1967 if (const SCEVTruncateExpr
*ST
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
1968 // It's possible the bits taken off by the truncate were all sign bits. If
1969 // so, we should be able to simplify this further.
1970 const SCEV
*X
= ST
->getOperand();
1971 ConstantRange CR
= getSignedRange(X
);
1972 unsigned TruncBits
= getTypeSizeInBits(ST
->getType());
1973 unsigned NewBits
= getTypeSizeInBits(Ty
);
1974 if (CR
.truncate(TruncBits
).signExtend(NewBits
).contains(
1975 CR
.sextOrTrunc(NewBits
)))
1976 return getTruncateOrSignExtend(X
, Ty
, Depth
);
1979 if (auto *SA
= dyn_cast
<SCEVAddExpr
>(Op
)) {
1980 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
1981 if (SA
->hasNoSignedWrap()) {
1982 // If the addition does not sign overflow then we can, by definition,
1983 // commute the sign extension with the addition operation.
1984 SmallVector
<const SCEV
*, 4> Ops
;
1985 for (const auto *Op
: SA
->operands())
1986 Ops
.push_back(getSignExtendExpr(Op
, Ty
, Depth
+ 1));
1987 return getAddExpr(Ops
, SCEV::FlagNSW
, Depth
+ 1);
1990 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
1991 // if D + (C - D + x + y + ...) could be proven to not signed wrap
1992 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1994 // For instance, this will bring two seemingly different expressions:
1995 // 1 + sext(5 + 20 * %x + 24 * %y) and
1996 // sext(6 + 20 * %x + 24 * %y)
1997 // to the same form:
1998 // 2 + sext(4 + 20 * %x + 24 * %y)
1999 if (const auto *SC
= dyn_cast
<SCEVConstant
>(SA
->getOperand(0))) {
2000 const APInt
&D
= extractConstantWithoutWrapping(*this, SC
, SA
);
2002 const SCEV
*SSExtD
= getSignExtendExpr(getConstant(D
), Ty
, Depth
);
2003 const SCEV
*SResidual
=
2004 getAddExpr(getConstant(-D
), SA
, SCEV::FlagAnyWrap
, Depth
);
2005 const SCEV
*SSExtR
= getSignExtendExpr(SResidual
, Ty
, Depth
+ 1);
2006 return getAddExpr(SSExtD
, SSExtR
,
2007 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
2012 // If the input value is a chrec scev, and we can prove that the value
2013 // did not overflow the old, smaller, value, we can sign extend all of the
2014 // operands (often constants). This allows analysis of something like
2015 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
2016 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
))
2017 if (AR
->isAffine()) {
2018 const SCEV
*Start
= AR
->getStart();
2019 const SCEV
*Step
= AR
->getStepRecurrence(*this);
2020 unsigned BitWidth
= getTypeSizeInBits(AR
->getType());
2021 const Loop
*L
= AR
->getLoop();
2023 // If we have special knowledge that this addrec won't overflow,
2024 // we don't need to do any further analysis.
2025 if (AR
->hasNoSignedWrap()) {
2027 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this, Depth
+ 1);
2028 Step
= getSignExtendExpr(Step
, Ty
, Depth
+ 1);
2029 return getAddRecExpr(Start
, Step
, L
, SCEV::FlagNSW
);
2032 // Check whether the backedge-taken count is SCEVCouldNotCompute.
2033 // Note that this serves two purposes: It filters out loops that are
2034 // simply not analyzable, and it covers the case where this code is
2035 // being called from within backedge-taken count analysis, such that
2036 // attempting to ask for the backedge-taken count would likely result
2037 // in infinite recursion. In the later case, the analysis code will
2038 // cope with a conservative value, and it will take care to purge
2039 // that value once it has finished.
2040 const SCEV
*MaxBECount
= getConstantMaxBackedgeTakenCount(L
);
2041 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
)) {
2042 // Manually compute the final value for AR, checking for
2045 // Check whether the backedge-taken count can be losslessly casted to
2046 // the addrec's type. The count is always unsigned.
2047 const SCEV
*CastedMaxBECount
=
2048 getTruncateOrZeroExtend(MaxBECount
, Start
->getType(), Depth
);
2049 const SCEV
*RecastedMaxBECount
= getTruncateOrZeroExtend(
2050 CastedMaxBECount
, MaxBECount
->getType(), Depth
);
2051 if (MaxBECount
== RecastedMaxBECount
) {
2052 Type
*WideTy
= IntegerType::get(getContext(), BitWidth
* 2);
2053 // Check whether Start+Step*MaxBECount has no signed overflow.
2054 const SCEV
*SMul
= getMulExpr(CastedMaxBECount
, Step
,
2055 SCEV::FlagAnyWrap
, Depth
+ 1);
2056 const SCEV
*SAdd
= getSignExtendExpr(getAddExpr(Start
, SMul
,
2060 const SCEV
*WideStart
= getSignExtendExpr(Start
, WideTy
, Depth
+ 1);
2061 const SCEV
*WideMaxBECount
=
2062 getZeroExtendExpr(CastedMaxBECount
, WideTy
, Depth
+ 1);
2063 const SCEV
*OperandExtendedAdd
=
2064 getAddExpr(WideStart
,
2065 getMulExpr(WideMaxBECount
,
2066 getSignExtendExpr(Step
, WideTy
, Depth
+ 1),
2067 SCEV::FlagAnyWrap
, Depth
+ 1),
2068 SCEV::FlagAnyWrap
, Depth
+ 1);
2069 if (SAdd
== OperandExtendedAdd
) {
2070 // Cache knowledge of AR NSW, which is propagated to this AddRec.
2071 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
), SCEV::FlagNSW
);
2072 // Return the expression with the addrec on the outside.
2073 Start
= getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this,
2075 Step
= getSignExtendExpr(Step
, Ty
, Depth
+ 1);
2076 return getAddRecExpr(Start
, Step
, L
, AR
->getNoWrapFlags());
2078 // Similar to above, only this time treat the step value as unsigned.
2079 // This covers loops that count up with an unsigned step.
2080 OperandExtendedAdd
=
2081 getAddExpr(WideStart
,
2082 getMulExpr(WideMaxBECount
,
2083 getZeroExtendExpr(Step
, WideTy
, Depth
+ 1),
2084 SCEV::FlagAnyWrap
, Depth
+ 1),
2085 SCEV::FlagAnyWrap
, Depth
+ 1);
2086 if (SAdd
== OperandExtendedAdd
) {
2087 // If AR wraps around then
2089 // abs(Step) * MaxBECount > unsigned-max(AR->getType())
2090 // => SAdd != OperandExtendedAdd
2092 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
2093 // (SAdd == OperandExtendedAdd => AR is NW)
2095 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
), SCEV::FlagNW
);
2097 // Return the expression with the addrec on the outside.
2098 Start
= getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this,
2100 Step
= getZeroExtendExpr(Step
, Ty
, Depth
+ 1);
2101 return getAddRecExpr(Start
, Step
, L
, AR
->getNoWrapFlags());
2106 auto NewFlags
= proveNoSignedWrapViaInduction(AR
);
2107 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
), NewFlags
);
2108 if (AR
->hasNoSignedWrap()) {
2109 // Same as nsw case above - duplicated here to avoid a compile time
2110 // issue. It's not clear that the order of checks does matter, but
2111 // it's one of two issue possible causes for a change which was
2112 // reverted. Be conservative for the moment.
2114 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this, Depth
+ 1);
2115 Step
= getSignExtendExpr(Step
, Ty
, Depth
+ 1);
2116 return getAddRecExpr(Start
, Step
, L
, AR
->getNoWrapFlags());
2119 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
2120 // if D + (C - D + Step * n) could be proven to not signed wrap
2121 // where D maximizes the number of trailing zeros of (C - D + Step * n)
2122 if (const auto *SC
= dyn_cast
<SCEVConstant
>(Start
)) {
2123 const APInt
&C
= SC
->getAPInt();
2124 const APInt
&D
= extractConstantWithoutWrapping(*this, C
, Step
);
2126 const SCEV
*SSExtD
= getSignExtendExpr(getConstant(D
), Ty
, Depth
);
2127 const SCEV
*SResidual
=
2128 getAddRecExpr(getConstant(C
- D
), Step
, L
, AR
->getNoWrapFlags());
2129 const SCEV
*SSExtR
= getSignExtendExpr(SResidual
, Ty
, Depth
+ 1);
2130 return getAddExpr(SSExtD
, SSExtR
,
2131 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
2136 if (proveNoWrapByVaryingStart
<SCEVSignExtendExpr
>(Start
, Step
, L
)) {
2137 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
), SCEV::FlagNSW
);
2139 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this, Depth
+ 1);
2140 Step
= getSignExtendExpr(Step
, Ty
, Depth
+ 1);
2141 return getAddRecExpr(Start
, Step
, L
, AR
->getNoWrapFlags());
2145 // If the input value is provably positive and we could not simplify
2146 // away the sext build a zext instead.
2147 if (isKnownNonNegative(Op
))
2148 return getZeroExtendExpr(Op
, Ty
, Depth
+ 1);
2150 // sext(smin(x, y)) -> smin(sext(x), sext(y))
2151 // sext(smax(x, y)) -> smax(sext(x), sext(y))
2152 if (isa
<SCEVSMinExpr
>(Op
) || isa
<SCEVSMaxExpr
>(Op
)) {
2153 auto *MinMax
= cast
<SCEVMinMaxExpr
>(Op
);
2154 SmallVector
<const SCEV
*, 4> Operands
;
2155 for (auto *Operand
: MinMax
->operands())
2156 Operands
.push_back(getSignExtendExpr(Operand
, Ty
));
2157 if (isa
<SCEVSMinExpr
>(MinMax
))
2158 return getSMinExpr(Operands
);
2159 return getSMaxExpr(Operands
);
2162 // The cast wasn't folded; create an explicit cast node.
2163 // Recompute the insert position, as it may have been invalidated.
2164 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
2165 SCEV
*S
= new (SCEVAllocator
) SCEVSignExtendExpr(ID
.Intern(SCEVAllocator
),
2167 UniqueSCEVs
.InsertNode(S
, IP
);
2168 registerUser(S
, { Op
});
2172 const SCEV
*ScalarEvolution::getCastExpr(SCEVTypes Kind
, const SCEV
*Op
,
2176 return getTruncateExpr(Op
, Ty
);
2178 return getZeroExtendExpr(Op
, Ty
);
2180 return getSignExtendExpr(Op
, Ty
);
2182 return getPtrToIntExpr(Op
, Ty
);
2184 llvm_unreachable("Not a SCEV cast expression!");
2188 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
2189 /// unspecified bits out to the given type.
2190 const SCEV
*ScalarEvolution::getAnyExtendExpr(const SCEV
*Op
,
2192 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
2193 "This is not an extending conversion!");
2194 assert(isSCEVable(Ty
) &&
2195 "This is not a conversion to a SCEVable type!");
2196 Ty
= getEffectiveSCEVType(Ty
);
2198 // Sign-extend negative constants.
2199 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
2200 if (SC
->getAPInt().isNegative())
2201 return getSignExtendExpr(Op
, Ty
);
2203 // Peel off a truncate cast.
2204 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
2205 const SCEV
*NewOp
= T
->getOperand();
2206 if (getTypeSizeInBits(NewOp
->getType()) < getTypeSizeInBits(Ty
))
2207 return getAnyExtendExpr(NewOp
, Ty
);
2208 return getTruncateOrNoop(NewOp
, Ty
);
2211 // Next try a zext cast. If the cast is folded, use it.
2212 const SCEV
*ZExt
= getZeroExtendExpr(Op
, Ty
);
2213 if (!isa
<SCEVZeroExtendExpr
>(ZExt
))
2216 // Next try a sext cast. If the cast is folded, use it.
2217 const SCEV
*SExt
= getSignExtendExpr(Op
, Ty
);
2218 if (!isa
<SCEVSignExtendExpr
>(SExt
))
2221 // Force the cast to be folded into the operands of an addrec.
2222 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
)) {
2223 SmallVector
<const SCEV
*, 4> Ops
;
2224 for (const SCEV
*Op
: AR
->operands())
2225 Ops
.push_back(getAnyExtendExpr(Op
, Ty
));
2226 return getAddRecExpr(Ops
, AR
->getLoop(), SCEV::FlagNW
);
2229 // If the expression is obviously signed, use the sext cast value.
2230 if (isa
<SCEVSMaxExpr
>(Op
))
2233 // Absent any other information, use the zext cast value.
2237 /// Process the given Ops list, which is a list of operands to be added under
2238 /// the given scale, update the given map. This is a helper function for
2239 /// getAddRecExpr. As an example of what it does, given a sequence of operands
2240 /// that would form an add expression like this:
2242 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
2244 /// where A and B are constants, update the map with these values:
2246 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
2248 /// and add 13 + A*B*29 to AccumulatedConstant.
2249 /// This will allow getAddRecExpr to produce this:
2251 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
2253 /// This form often exposes folding opportunities that are hidden in
2254 /// the original operand list.
2256 /// Return true iff it appears that any interesting folding opportunities
2257 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
2258 /// the common case where no interesting opportunities are present, and
2259 /// is also used as a check to avoid infinite recursion.
2261 CollectAddOperandsWithScales(SmallDenseMap
<const SCEV
*, APInt
, 16> &M
,
2262 SmallVectorImpl
<const SCEV
*> &NewOps
,
2263 APInt
&AccumulatedConstant
,
2264 ArrayRef
<const SCEV
*> Ops
, const APInt
&Scale
,
2265 ScalarEvolution
&SE
) {
2266 bool Interesting
= false;
2268 // Iterate over the add operands. They are sorted, with constants first.
2270 while (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Ops
[i
])) {
2272 // Pull a buried constant out to the outside.
2273 if (Scale
!= 1 || AccumulatedConstant
!= 0 || C
->getValue()->isZero())
2275 AccumulatedConstant
+= Scale
* C
->getAPInt();
2278 // Next comes everything else. We're especially interested in multiplies
2279 // here, but they're in the middle, so just visit the rest with one loop.
2280 for (; i
!= Ops
.size(); ++i
) {
2281 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[i
]);
2282 if (Mul
&& isa
<SCEVConstant
>(Mul
->getOperand(0))) {
2284 Scale
* cast
<SCEVConstant
>(Mul
->getOperand(0))->getAPInt();
2285 if (Mul
->getNumOperands() == 2 && isa
<SCEVAddExpr
>(Mul
->getOperand(1))) {
2286 // A multiplication of a constant with another add; recurse.
2287 const SCEVAddExpr
*Add
= cast
<SCEVAddExpr
>(Mul
->getOperand(1));
2289 CollectAddOperandsWithScales(M
, NewOps
, AccumulatedConstant
,
2290 Add
->operands(), NewScale
, SE
);
2292 // A multiplication of a constant with some other value. Update
2294 SmallVector
<const SCEV
*, 4> MulOps(drop_begin(Mul
->operands()));
2295 const SCEV
*Key
= SE
.getMulExpr(MulOps
);
2296 auto Pair
= M
.insert({Key
, NewScale
});
2298 NewOps
.push_back(Pair
.first
->first
);
2300 Pair
.first
->second
+= NewScale
;
2301 // The map already had an entry for this value, which may indicate
2302 // a folding opportunity.
2307 // An ordinary operand. Update the map.
2308 std::pair
<DenseMap
<const SCEV
*, APInt
>::iterator
, bool> Pair
=
2309 M
.insert({Ops
[i
], Scale
});
2311 NewOps
.push_back(Pair
.first
->first
);
2313 Pair
.first
->second
+= Scale
;
2314 // The map already had an entry for this value, which may indicate
2315 // a folding opportunity.
2324 bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp
, bool Signed
,
2325 const SCEV
*LHS
, const SCEV
*RHS
,
2326 const Instruction
*CtxI
) {
2327 const SCEV
*(ScalarEvolution::*Operation
)(const SCEV
*, const SCEV
*,
2328 SCEV::NoWrapFlags
, unsigned);
2331 llvm_unreachable("Unsupported binary op");
2332 case Instruction::Add
:
2333 Operation
= &ScalarEvolution::getAddExpr
;
2335 case Instruction::Sub
:
2336 Operation
= &ScalarEvolution::getMinusSCEV
;
2338 case Instruction::Mul
:
2339 Operation
= &ScalarEvolution::getMulExpr
;
2343 const SCEV
*(ScalarEvolution::*Extension
)(const SCEV
*, Type
*, unsigned) =
2344 Signed
? &ScalarEvolution::getSignExtendExpr
2345 : &ScalarEvolution::getZeroExtendExpr
;
2347 // Check ext(LHS op RHS) == ext(LHS) op ext(RHS)
2348 auto *NarrowTy
= cast
<IntegerType
>(LHS
->getType());
2350 IntegerType::get(NarrowTy
->getContext(), NarrowTy
->getBitWidth() * 2);
2352 const SCEV
*A
= (this->*Extension
)(
2353 (this->*Operation
)(LHS
, RHS
, SCEV::FlagAnyWrap
, 0), WideTy
, 0);
2354 const SCEV
*LHSB
= (this->*Extension
)(LHS
, WideTy
, 0);
2355 const SCEV
*RHSB
= (this->*Extension
)(RHS
, WideTy
, 0);
2356 const SCEV
*B
= (this->*Operation
)(LHSB
, RHSB
, SCEV::FlagAnyWrap
, 0);
2359 // Can we use context to prove the fact we need?
2362 // TODO: Support mul.
2363 if (BinOp
== Instruction::Mul
)
2365 auto *RHSC
= dyn_cast
<SCEVConstant
>(RHS
);
2366 // TODO: Lift this limitation.
2369 APInt C
= RHSC
->getAPInt();
2370 unsigned NumBits
= C
.getBitWidth();
2371 bool IsSub
= (BinOp
== Instruction::Sub
);
2372 bool IsNegativeConst
= (Signed
&& C
.isNegative());
2373 // Compute the direction and magnitude by which we need to check overflow.
2374 bool OverflowDown
= IsSub
^ IsNegativeConst
;
2375 APInt Magnitude
= C
;
2376 if (IsNegativeConst
) {
2377 if (C
== APInt::getSignedMinValue(NumBits
))
2378 // TODO: SINT_MIN on inversion gives the same negative value, we don't
2379 // want to deal with that.
2384 ICmpInst::Predicate Pred
= Signed
? ICmpInst::ICMP_SLE
: ICmpInst::ICMP_ULE
;
2386 // To avoid overflow down, we need to make sure that MIN + Magnitude <= LHS.
2387 APInt Min
= Signed
? APInt::getSignedMinValue(NumBits
)
2388 : APInt::getMinValue(NumBits
);
2389 APInt Limit
= Min
+ Magnitude
;
2390 return isKnownPredicateAt(Pred
, getConstant(Limit
), LHS
, CtxI
);
2392 // To avoid overflow up, we need to make sure that LHS <= MAX - Magnitude.
2393 APInt Max
= Signed
? APInt::getSignedMaxValue(NumBits
)
2394 : APInt::getMaxValue(NumBits
);
2395 APInt Limit
= Max
- Magnitude
;
2396 return isKnownPredicateAt(Pred
, LHS
, getConstant(Limit
), CtxI
);
2400 std::optional
<SCEV::NoWrapFlags
>
2401 ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
2402 const OverflowingBinaryOperator
*OBO
) {
2403 // It cannot be done any better.
2404 if (OBO
->hasNoUnsignedWrap() && OBO
->hasNoSignedWrap())
2405 return std::nullopt
;
2407 SCEV::NoWrapFlags Flags
= SCEV::NoWrapFlags::FlagAnyWrap
;
2409 if (OBO
->hasNoUnsignedWrap())
2410 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNUW
);
2411 if (OBO
->hasNoSignedWrap())
2412 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNSW
);
2414 bool Deduced
= false;
2416 if (OBO
->getOpcode() != Instruction::Add
&&
2417 OBO
->getOpcode() != Instruction::Sub
&&
2418 OBO
->getOpcode() != Instruction::Mul
)
2419 return std::nullopt
;
2421 const SCEV
*LHS
= getSCEV(OBO
->getOperand(0));
2422 const SCEV
*RHS
= getSCEV(OBO
->getOperand(1));
2424 const Instruction
*CtxI
=
2425 UseContextForNoWrapFlagInference
? dyn_cast
<Instruction
>(OBO
) : nullptr;
2426 if (!OBO
->hasNoUnsignedWrap() &&
2427 willNotOverflow((Instruction::BinaryOps
)OBO
->getOpcode(),
2428 /* Signed */ false, LHS
, RHS
, CtxI
)) {
2429 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNUW
);
2433 if (!OBO
->hasNoSignedWrap() &&
2434 willNotOverflow((Instruction::BinaryOps
)OBO
->getOpcode(),
2435 /* Signed */ true, LHS
, RHS
, CtxI
)) {
2436 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNSW
);
2442 return std::nullopt
;
2445 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
2446 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
2447 // can't-overflow flags for the operation if possible.
2448 static SCEV::NoWrapFlags
2449 StrengthenNoWrapFlags(ScalarEvolution
*SE
, SCEVTypes Type
,
2450 const ArrayRef
<const SCEV
*> Ops
,
2451 SCEV::NoWrapFlags Flags
) {
2452 using namespace std::placeholders
;
2454 using OBO
= OverflowingBinaryOperator
;
2457 Type
== scAddExpr
|| Type
== scAddRecExpr
|| Type
== scMulExpr
;
2459 assert(CanAnalyze
&& "don't call from other places!");
2461 int SignOrUnsignMask
= SCEV::FlagNUW
| SCEV::FlagNSW
;
2462 SCEV::NoWrapFlags SignOrUnsignWrap
=
2463 ScalarEvolution::maskFlags(Flags
, SignOrUnsignMask
);
2465 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2466 auto IsKnownNonNegative
= [&](const SCEV
*S
) {
2467 return SE
->isKnownNonNegative(S
);
2470 if (SignOrUnsignWrap
== SCEV::FlagNSW
&& all_of(Ops
, IsKnownNonNegative
))
2472 ScalarEvolution::setFlags(Flags
, (SCEV::NoWrapFlags
)SignOrUnsignMask
);
2474 SignOrUnsignWrap
= ScalarEvolution::maskFlags(Flags
, SignOrUnsignMask
);
2476 if (SignOrUnsignWrap
!= SignOrUnsignMask
&&
2477 (Type
== scAddExpr
|| Type
== scMulExpr
) && Ops
.size() == 2 &&
2478 isa
<SCEVConstant
>(Ops
[0])) {
2483 return Instruction::Add
;
2485 return Instruction::Mul
;
2487 llvm_unreachable("Unexpected SCEV op.");
2491 const APInt
&C
= cast
<SCEVConstant
>(Ops
[0])->getAPInt();
2493 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow.
2494 if (!(SignOrUnsignWrap
& SCEV::FlagNSW
)) {
2495 auto NSWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
2496 Opcode
, C
, OBO::NoSignedWrap
);
2497 if (NSWRegion
.contains(SE
->getSignedRange(Ops
[1])))
2498 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNSW
);
2501 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow.
2502 if (!(SignOrUnsignWrap
& SCEV::FlagNUW
)) {
2503 auto NUWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
2504 Opcode
, C
, OBO::NoUnsignedWrap
);
2505 if (NUWRegion
.contains(SE
->getUnsignedRange(Ops
[1])))
2506 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNUW
);
2510 // <0,+,nonnegative><nw> is also nuw
2511 // TODO: Add corresponding nsw case
2512 if (Type
== scAddRecExpr
&& ScalarEvolution::hasFlags(Flags
, SCEV::FlagNW
) &&
2513 !ScalarEvolution::hasFlags(Flags
, SCEV::FlagNUW
) && Ops
.size() == 2 &&
2514 Ops
[0]->isZero() && IsKnownNonNegative(Ops
[1]))
2515 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNUW
);
2517 // both (udiv X, Y) * Y and Y * (udiv X, Y) are always NUW
2518 if (Type
== scMulExpr
&& !ScalarEvolution::hasFlags(Flags
, SCEV::FlagNUW
) &&
2520 if (auto *UDiv
= dyn_cast
<SCEVUDivExpr
>(Ops
[0]))
2521 if (UDiv
->getOperand(1) == Ops
[1])
2522 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNUW
);
2523 if (auto *UDiv
= dyn_cast
<SCEVUDivExpr
>(Ops
[1]))
2524 if (UDiv
->getOperand(1) == Ops
[0])
2525 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNUW
);
2531 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV
*S
, const Loop
*L
) {
2532 return isLoopInvariant(S
, L
) && properlyDominates(S
, L
->getHeader());
2535 /// Get a canonical add expression, or something simpler if possible.
2536 const SCEV
*ScalarEvolution::getAddExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
2537 SCEV::NoWrapFlags OrigFlags
,
2539 assert(!(OrigFlags
& ~(SCEV::FlagNUW
| SCEV::FlagNSW
)) &&
2540 "only nuw or nsw allowed");
2541 assert(!Ops
.empty() && "Cannot get empty add!");
2542 if (Ops
.size() == 1) return Ops
[0];
2544 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
2545 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
2546 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
2547 "SCEVAddExpr operand types don't match!");
2548 unsigned NumPtrs
= count_if(
2549 Ops
, [](const SCEV
*Op
) { return Op
->getType()->isPointerTy(); });
2550 assert(NumPtrs
<= 1 && "add has at most one pointer operand");
2553 const SCEV
*Folded
= constantFoldAndGroupOps(
2555 [](const APInt
&C1
, const APInt
&C2
) { return C1
+ C2
; },
2556 [](const APInt
&C
) { return C
.isZero(); }, // identity
2557 [](const APInt
&C
) { return false; }); // absorber
2561 unsigned Idx
= isa
<SCEVConstant
>(Ops
[0]) ? 1 : 0;
2563 // Delay expensive flag strengthening until necessary.
2564 auto ComputeFlags
= [this, OrigFlags
](const ArrayRef
<const SCEV
*> Ops
) {
2565 return StrengthenNoWrapFlags(this, scAddExpr
, Ops
, OrigFlags
);
2568 // Limit recursion calls depth.
2569 if (Depth
> MaxArithDepth
|| hasHugeExpression(Ops
))
2570 return getOrCreateAddExpr(Ops
, ComputeFlags(Ops
));
2572 if (SCEV
*S
= findExistingSCEVInCache(scAddExpr
, Ops
)) {
2573 // Don't strengthen flags if we have no new information.
2574 SCEVAddExpr
*Add
= static_cast<SCEVAddExpr
*>(S
);
2575 if (Add
->getNoWrapFlags(OrigFlags
) != OrigFlags
)
2576 Add
->setNoWrapFlags(ComputeFlags(Ops
));
2580 // Okay, check to see if the same value occurs in the operand list more than
2581 // once. If so, merge them together into an multiply expression. Since we
2582 // sorted the list, these values are required to be adjacent.
2583 Type
*Ty
= Ops
[0]->getType();
2584 bool FoundMatch
= false;
2585 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
-1; ++i
)
2586 if (Ops
[i
] == Ops
[i
+1]) { // X + Y + Y --> X + Y*2
2587 // Scan ahead to count how many equal operands there are.
2589 while (i
+Count
!= e
&& Ops
[i
+Count
] == Ops
[i
])
2591 // Merge the values into a multiply.
2592 const SCEV
*Scale
= getConstant(Ty
, Count
);
2593 const SCEV
*Mul
= getMulExpr(Scale
, Ops
[i
], SCEV::FlagAnyWrap
, Depth
+ 1);
2594 if (Ops
.size() == Count
)
2597 Ops
.erase(Ops
.begin()+i
+1, Ops
.begin()+i
+Count
);
2598 --i
; e
-= Count
- 1;
2602 return getAddExpr(Ops
, OrigFlags
, Depth
+ 1);
2604 // Check for truncates. If all the operands are truncated from the same
2605 // type, see if factoring out the truncate would permit the result to be
2606 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y)
2607 // if the contents of the resulting outer trunc fold to something simple.
2608 auto FindTruncSrcType
= [&]() -> Type
* {
2609 // We're ultimately looking to fold an addrec of truncs and muls of only
2610 // constants and truncs, so if we find any other types of SCEV
2611 // as operands of the addrec then we bail and return nullptr here.
2612 // Otherwise, we return the type of the operand of a trunc that we find.
2613 if (auto *T
= dyn_cast
<SCEVTruncateExpr
>(Ops
[Idx
]))
2614 return T
->getOperand()->getType();
2615 if (const auto *Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[Idx
])) {
2616 const auto *LastOp
= Mul
->getOperand(Mul
->getNumOperands() - 1);
2617 if (const auto *T
= dyn_cast
<SCEVTruncateExpr
>(LastOp
))
2618 return T
->getOperand()->getType();
2622 if (auto *SrcType
= FindTruncSrcType()) {
2623 SmallVector
<const SCEV
*, 8> LargeOps
;
2625 // Check all the operands to see if they can be represented in the
2626 // source type of the truncate.
2627 for (const SCEV
*Op
: Ops
) {
2628 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
2629 if (T
->getOperand()->getType() != SrcType
) {
2633 LargeOps
.push_back(T
->getOperand());
2634 } else if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Op
)) {
2635 LargeOps
.push_back(getAnyExtendExpr(C
, SrcType
));
2636 } else if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(Op
)) {
2637 SmallVector
<const SCEV
*, 8> LargeMulOps
;
2638 for (unsigned j
= 0, f
= M
->getNumOperands(); j
!= f
&& Ok
; ++j
) {
2639 if (const SCEVTruncateExpr
*T
=
2640 dyn_cast
<SCEVTruncateExpr
>(M
->getOperand(j
))) {
2641 if (T
->getOperand()->getType() != SrcType
) {
2645 LargeMulOps
.push_back(T
->getOperand());
2646 } else if (const auto *C
= dyn_cast
<SCEVConstant
>(M
->getOperand(j
))) {
2647 LargeMulOps
.push_back(getAnyExtendExpr(C
, SrcType
));
2654 LargeOps
.push_back(getMulExpr(LargeMulOps
, SCEV::FlagAnyWrap
, Depth
+ 1));
2661 // Evaluate the expression in the larger type.
2662 const SCEV
*Fold
= getAddExpr(LargeOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2663 // If it folds to something simple, use it. Otherwise, don't.
2664 if (isa
<SCEVConstant
>(Fold
) || isa
<SCEVUnknown
>(Fold
))
2665 return getTruncateExpr(Fold
, Ty
);
2669 if (Ops
.size() == 2) {
2670 // Check if we have an expression of the form ((X + C1) - C2), where C1 and
2671 // C2 can be folded in a way that allows retaining wrapping flags of (X +
2673 const SCEV
*A
= Ops
[0];
2674 const SCEV
*B
= Ops
[1];
2675 auto *AddExpr
= dyn_cast
<SCEVAddExpr
>(B
);
2676 auto *C
= dyn_cast
<SCEVConstant
>(A
);
2677 if (AddExpr
&& C
&& isa
<SCEVConstant
>(AddExpr
->getOperand(0))) {
2678 auto C1
= cast
<SCEVConstant
>(AddExpr
->getOperand(0))->getAPInt();
2679 auto C2
= C
->getAPInt();
2680 SCEV::NoWrapFlags PreservedFlags
= SCEV::FlagAnyWrap
;
2682 APInt ConstAdd
= C1
+ C2
;
2683 auto AddFlags
= AddExpr
->getNoWrapFlags();
2684 // Adding a smaller constant is NUW if the original AddExpr was NUW.
2685 if (ScalarEvolution::hasFlags(AddFlags
, SCEV::FlagNUW
) &&
2688 ScalarEvolution::setFlags(PreservedFlags
, SCEV::FlagNUW
);
2691 // Adding a constant with the same sign and small magnitude is NSW, if the
2692 // original AddExpr was NSW.
2693 if (ScalarEvolution::hasFlags(AddFlags
, SCEV::FlagNSW
) &&
2694 C1
.isSignBitSet() == ConstAdd
.isSignBitSet() &&
2695 ConstAdd
.abs().ule(C1
.abs())) {
2697 ScalarEvolution::setFlags(PreservedFlags
, SCEV::FlagNSW
);
2700 if (PreservedFlags
!= SCEV::FlagAnyWrap
) {
2701 SmallVector
<const SCEV
*, 4> NewOps(AddExpr
->operands());
2702 NewOps
[0] = getConstant(ConstAdd
);
2703 return getAddExpr(NewOps
, PreservedFlags
);
2708 // Canonicalize (-1 * urem X, Y) + X --> (Y * X/Y)
2709 if (Ops
.size() == 2) {
2710 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[0]);
2711 if (Mul
&& Mul
->getNumOperands() == 2 &&
2712 Mul
->getOperand(0)->isAllOnesValue()) {
2715 if (matchURem(Mul
->getOperand(1), X
, Y
) && X
== Ops
[1]) {
2716 return getMulExpr(Y
, getUDivExpr(X
, Y
));
2721 // Skip past any other cast SCEVs.
2722 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddExpr
)
2725 // If there are add operands they would be next.
2726 if (Idx
< Ops
.size()) {
2727 bool DeletedAdd
= false;
2728 // If the original flags and all inlined SCEVAddExprs are NUW, use the
2729 // common NUW flag for expression after inlining. Other flags cannot be
2730 // preserved, because they may depend on the original order of operations.
2731 SCEV::NoWrapFlags CommonFlags
= maskFlags(OrigFlags
, SCEV::FlagNUW
);
2732 while (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[Idx
])) {
2733 if (Ops
.size() > AddOpsInlineThreshold
||
2734 Add
->getNumOperands() > AddOpsInlineThreshold
)
2736 // If we have an add, expand the add operands onto the end of the operands
2738 Ops
.erase(Ops
.begin()+Idx
);
2739 append_range(Ops
, Add
->operands());
2741 CommonFlags
= maskFlags(CommonFlags
, Add
->getNoWrapFlags());
2744 // If we deleted at least one add, we added operands to the end of the list,
2745 // and they are not necessarily sorted. Recurse to resort and resimplify
2746 // any operands we just acquired.
2748 return getAddExpr(Ops
, CommonFlags
, Depth
+ 1);
2751 // Skip over the add expression until we get to a multiply.
2752 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scMulExpr
)
2755 // Check to see if there are any folding opportunities present with
2756 // operands multiplied by constant values.
2757 if (Idx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[Idx
])) {
2758 uint64_t BitWidth
= getTypeSizeInBits(Ty
);
2759 SmallDenseMap
<const SCEV
*, APInt
, 16> M
;
2760 SmallVector
<const SCEV
*, 8> NewOps
;
2761 APInt
AccumulatedConstant(BitWidth
, 0);
2762 if (CollectAddOperandsWithScales(M
, NewOps
, AccumulatedConstant
,
2763 Ops
, APInt(BitWidth
, 1), *this)) {
2764 struct APIntCompare
{
2765 bool operator()(const APInt
&LHS
, const APInt
&RHS
) const {
2766 return LHS
.ult(RHS
);
2770 // Some interesting folding opportunity is present, so its worthwhile to
2771 // re-generate the operands list. Group the operands by constant scale,
2772 // to avoid multiplying by the same constant scale multiple times.
2773 std::map
<APInt
, SmallVector
<const SCEV
*, 4>, APIntCompare
> MulOpLists
;
2774 for (const SCEV
*NewOp
: NewOps
)
2775 MulOpLists
[M
.find(NewOp
)->second
].push_back(NewOp
);
2776 // Re-generate the operands list.
2778 if (AccumulatedConstant
!= 0)
2779 Ops
.push_back(getConstant(AccumulatedConstant
));
2780 for (auto &MulOp
: MulOpLists
) {
2781 if (MulOp
.first
== 1) {
2782 Ops
.push_back(getAddExpr(MulOp
.second
, SCEV::FlagAnyWrap
, Depth
+ 1));
2783 } else if (MulOp
.first
!= 0) {
2784 Ops
.push_back(getMulExpr(
2785 getConstant(MulOp
.first
),
2786 getAddExpr(MulOp
.second
, SCEV::FlagAnyWrap
, Depth
+ 1),
2787 SCEV::FlagAnyWrap
, Depth
+ 1));
2792 if (Ops
.size() == 1)
2794 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2798 // If we are adding something to a multiply expression, make sure the
2799 // something is not already an operand of the multiply. If so, merge it into
2801 for (; Idx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[Idx
]); ++Idx
) {
2802 const SCEVMulExpr
*Mul
= cast
<SCEVMulExpr
>(Ops
[Idx
]);
2803 for (unsigned MulOp
= 0, e
= Mul
->getNumOperands(); MulOp
!= e
; ++MulOp
) {
2804 const SCEV
*MulOpSCEV
= Mul
->getOperand(MulOp
);
2805 if (isa
<SCEVConstant
>(MulOpSCEV
))
2807 for (unsigned AddOp
= 0, e
= Ops
.size(); AddOp
!= e
; ++AddOp
)
2808 if (MulOpSCEV
== Ops
[AddOp
]) {
2809 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2810 const SCEV
*InnerMul
= Mul
->getOperand(MulOp
== 0);
2811 if (Mul
->getNumOperands() != 2) {
2812 // If the multiply has more than two operands, we must get the
2814 SmallVector
<const SCEV
*, 4> MulOps(
2815 Mul
->operands().take_front(MulOp
));
2816 append_range(MulOps
, Mul
->operands().drop_front(MulOp
+ 1));
2817 InnerMul
= getMulExpr(MulOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2819 SmallVector
<const SCEV
*, 2> TwoOps
= {getOne(Ty
), InnerMul
};
2820 const SCEV
*AddOne
= getAddExpr(TwoOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2821 const SCEV
*OuterMul
= getMulExpr(AddOne
, MulOpSCEV
,
2822 SCEV::FlagAnyWrap
, Depth
+ 1);
2823 if (Ops
.size() == 2) return OuterMul
;
2825 Ops
.erase(Ops
.begin()+AddOp
);
2826 Ops
.erase(Ops
.begin()+Idx
-1);
2828 Ops
.erase(Ops
.begin()+Idx
);
2829 Ops
.erase(Ops
.begin()+AddOp
-1);
2831 Ops
.push_back(OuterMul
);
2832 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2835 // Check this multiply against other multiplies being added together.
2836 for (unsigned OtherMulIdx
= Idx
+1;
2837 OtherMulIdx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[OtherMulIdx
]);
2839 const SCEVMulExpr
*OtherMul
= cast
<SCEVMulExpr
>(Ops
[OtherMulIdx
]);
2840 // If MulOp occurs in OtherMul, we can fold the two multiplies
2842 for (unsigned OMulOp
= 0, e
= OtherMul
->getNumOperands();
2843 OMulOp
!= e
; ++OMulOp
)
2844 if (OtherMul
->getOperand(OMulOp
) == MulOpSCEV
) {
2845 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2846 const SCEV
*InnerMul1
= Mul
->getOperand(MulOp
== 0);
2847 if (Mul
->getNumOperands() != 2) {
2848 SmallVector
<const SCEV
*, 4> MulOps(
2849 Mul
->operands().take_front(MulOp
));
2850 append_range(MulOps
, Mul
->operands().drop_front(MulOp
+1));
2851 InnerMul1
= getMulExpr(MulOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2853 const SCEV
*InnerMul2
= OtherMul
->getOperand(OMulOp
== 0);
2854 if (OtherMul
->getNumOperands() != 2) {
2855 SmallVector
<const SCEV
*, 4> MulOps(
2856 OtherMul
->operands().take_front(OMulOp
));
2857 append_range(MulOps
, OtherMul
->operands().drop_front(OMulOp
+1));
2858 InnerMul2
= getMulExpr(MulOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2860 SmallVector
<const SCEV
*, 2> TwoOps
= {InnerMul1
, InnerMul2
};
2861 const SCEV
*InnerMulSum
=
2862 getAddExpr(TwoOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2863 const SCEV
*OuterMul
= getMulExpr(MulOpSCEV
, InnerMulSum
,
2864 SCEV::FlagAnyWrap
, Depth
+ 1);
2865 if (Ops
.size() == 2) return OuterMul
;
2866 Ops
.erase(Ops
.begin()+Idx
);
2867 Ops
.erase(Ops
.begin()+OtherMulIdx
-1);
2868 Ops
.push_back(OuterMul
);
2869 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2875 // If there are any add recurrences in the operands list, see if any other
2876 // added values are loop invariant. If so, we can fold them into the
2878 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddRecExpr
)
2881 // Scan over all recurrences, trying to fold loop invariants into them.
2882 for (; Idx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[Idx
]); ++Idx
) {
2883 // Scan all of the other operands to this add and add them to the vector if
2884 // they are loop invariant w.r.t. the recurrence.
2885 SmallVector
<const SCEV
*, 8> LIOps
;
2886 const SCEVAddRecExpr
*AddRec
= cast
<SCEVAddRecExpr
>(Ops
[Idx
]);
2887 const Loop
*AddRecLoop
= AddRec
->getLoop();
2888 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
2889 if (isAvailableAtLoopEntry(Ops
[i
], AddRecLoop
)) {
2890 LIOps
.push_back(Ops
[i
]);
2891 Ops
.erase(Ops
.begin()+i
);
2895 // If we found some loop invariants, fold them into the recurrence.
2896 if (!LIOps
.empty()) {
2897 // Compute nowrap flags for the addition of the loop-invariant ops and
2898 // the addrec. Temporarily push it as an operand for that purpose. These
2899 // flags are valid in the scope of the addrec only.
2900 LIOps
.push_back(AddRec
);
2901 SCEV::NoWrapFlags Flags
= ComputeFlags(LIOps
);
2904 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
2905 LIOps
.push_back(AddRec
->getStart());
2907 SmallVector
<const SCEV
*, 4> AddRecOps(AddRec
->operands());
2909 // It is not in general safe to propagate flags valid on an add within
2910 // the addrec scope to one outside it. We must prove that the inner
2911 // scope is guaranteed to execute if the outer one does to be able to
2912 // safely propagate. We know the program is undefined if poison is
2913 // produced on the inner scoped addrec. We also know that *for this use*
2914 // the outer scoped add can't overflow (because of the flags we just
2915 // computed for the inner scoped add) without the program being undefined.
2916 // Proving that entry to the outer scope neccesitates entry to the inner
2917 // scope, thus proves the program undefined if the flags would be violated
2918 // in the outer scope.
2919 SCEV::NoWrapFlags AddFlags
= Flags
;
2920 if (AddFlags
!= SCEV::FlagAnyWrap
) {
2921 auto *DefI
= getDefiningScopeBound(LIOps
);
2922 auto *ReachI
= &*AddRecLoop
->getHeader()->begin();
2923 if (!isGuaranteedToTransferExecutionTo(DefI
, ReachI
))
2924 AddFlags
= SCEV::FlagAnyWrap
;
2926 AddRecOps
[0] = getAddExpr(LIOps
, AddFlags
, Depth
+ 1);
2928 // Build the new addrec. Propagate the NUW and NSW flags if both the
2929 // outer add and the inner addrec are guaranteed to have no overflow.
2930 // Always propagate NW.
2931 Flags
= AddRec
->getNoWrapFlags(setFlags(Flags
, SCEV::FlagNW
));
2932 const SCEV
*NewRec
= getAddRecExpr(AddRecOps
, AddRecLoop
, Flags
);
2934 // If all of the other operands were loop invariant, we are done.
2935 if (Ops
.size() == 1) return NewRec
;
2937 // Otherwise, add the folded AddRec by the non-invariant parts.
2938 for (unsigned i
= 0;; ++i
)
2939 if (Ops
[i
] == AddRec
) {
2943 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2946 // Okay, if there weren't any loop invariants to be folded, check to see if
2947 // there are multiple AddRec's with the same loop induction variable being
2948 // added together. If so, we can fold them.
2949 for (unsigned OtherIdx
= Idx
+1;
2950 OtherIdx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
2952 // We expect the AddRecExpr's to be sorted in reverse dominance order,
2953 // so that the 1st found AddRecExpr is dominated by all others.
2954 assert(DT
.dominates(
2955 cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
])->getLoop()->getHeader(),
2956 AddRec
->getLoop()->getHeader()) &&
2957 "AddRecExprs are not sorted in reverse dominance order?");
2958 if (AddRecLoop
== cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
])->getLoop()) {
2959 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
2960 SmallVector
<const SCEV
*, 4> AddRecOps(AddRec
->operands());
2961 for (; OtherIdx
!= Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
2963 const auto *OtherAddRec
= cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
2964 if (OtherAddRec
->getLoop() == AddRecLoop
) {
2965 for (unsigned i
= 0, e
= OtherAddRec
->getNumOperands();
2967 if (i
>= AddRecOps
.size()) {
2968 append_range(AddRecOps
, OtherAddRec
->operands().drop_front(i
));
2971 SmallVector
<const SCEV
*, 2> TwoOps
= {
2972 AddRecOps
[i
], OtherAddRec
->getOperand(i
)};
2973 AddRecOps
[i
] = getAddExpr(TwoOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2975 Ops
.erase(Ops
.begin() + OtherIdx
); --OtherIdx
;
2978 // Step size has changed, so we cannot guarantee no self-wraparound.
2979 Ops
[Idx
] = getAddRecExpr(AddRecOps
, AddRecLoop
, SCEV::FlagAnyWrap
);
2980 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2984 // Otherwise couldn't fold anything into this recurrence. Move onto the
2988 // Okay, it looks like we really DO need an add expr. Check to see if we
2989 // already have one, otherwise create a new one.
2990 return getOrCreateAddExpr(Ops
, ComputeFlags(Ops
));
2994 ScalarEvolution::getOrCreateAddExpr(ArrayRef
<const SCEV
*> Ops
,
2995 SCEV::NoWrapFlags Flags
) {
2996 FoldingSetNodeID ID
;
2997 ID
.AddInteger(scAddExpr
);
2998 for (const SCEV
*Op
: Ops
)
3002 static_cast<SCEVAddExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
3004 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
3005 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
3006 S
= new (SCEVAllocator
)
3007 SCEVAddExpr(ID
.Intern(SCEVAllocator
), O
, Ops
.size());
3008 UniqueSCEVs
.InsertNode(S
, IP
);
3009 registerUser(S
, Ops
);
3011 S
->setNoWrapFlags(Flags
);
3016 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef
<const SCEV
*> Ops
,
3017 const Loop
*L
, SCEV::NoWrapFlags Flags
) {
3018 FoldingSetNodeID ID
;
3019 ID
.AddInteger(scAddRecExpr
);
3020 for (const SCEV
*Op
: Ops
)
3025 static_cast<SCEVAddRecExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
3027 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
3028 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
3029 S
= new (SCEVAllocator
)
3030 SCEVAddRecExpr(ID
.Intern(SCEVAllocator
), O
, Ops
.size(), L
);
3031 UniqueSCEVs
.InsertNode(S
, IP
);
3032 LoopUsers
[L
].push_back(S
);
3033 registerUser(S
, Ops
);
3035 setNoWrapFlags(S
, Flags
);
3040 ScalarEvolution::getOrCreateMulExpr(ArrayRef
<const SCEV
*> Ops
,
3041 SCEV::NoWrapFlags Flags
) {
3042 FoldingSetNodeID ID
;
3043 ID
.AddInteger(scMulExpr
);
3044 for (const SCEV
*Op
: Ops
)
3048 static_cast<SCEVMulExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
3050 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
3051 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
3052 S
= new (SCEVAllocator
) SCEVMulExpr(ID
.Intern(SCEVAllocator
),
3054 UniqueSCEVs
.InsertNode(S
, IP
);
3055 registerUser(S
, Ops
);
3057 S
->setNoWrapFlags(Flags
);
3061 static uint64_t umul_ov(uint64_t i
, uint64_t j
, bool &Overflow
) {
3063 if (j
> 1 && k
/ j
!= i
) Overflow
= true;
3067 /// Compute the result of "n choose k", the binomial coefficient. If an
3068 /// intermediate computation overflows, Overflow will be set and the return will
3069 /// be garbage. Overflow is not cleared on absence of overflow.
3070 static uint64_t Choose(uint64_t n
, uint64_t k
, bool &Overflow
) {
3071 // We use the multiplicative formula:
3072 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
3073 // At each iteration, we take the n-th term of the numeral and divide by the
3074 // (k-n)th term of the denominator. This division will always produce an
3075 // integral result, and helps reduce the chance of overflow in the
3076 // intermediate computations. However, we can still overflow even when the
3077 // final result would fit.
3079 if (n
== 0 || n
== k
) return 1;
3080 if (k
> n
) return 0;
3086 for (uint64_t i
= 1; i
<= k
; ++i
) {
3087 r
= umul_ov(r
, n
-(i
-1), Overflow
);
3093 /// Determine if any of the operands in this SCEV are a constant or if
3094 /// any of the add or multiply expressions in this SCEV contain a constant.
3095 static bool containsConstantInAddMulChain(const SCEV
*StartExpr
) {
3096 struct FindConstantInAddMulChain
{
3097 bool FoundConstant
= false;
3099 bool follow(const SCEV
*S
) {
3100 FoundConstant
|= isa
<SCEVConstant
>(S
);
3101 return isa
<SCEVAddExpr
>(S
) || isa
<SCEVMulExpr
>(S
);
3104 bool isDone() const {
3105 return FoundConstant
;
3109 FindConstantInAddMulChain F
;
3110 SCEVTraversal
<FindConstantInAddMulChain
> ST(F
);
3111 ST
.visitAll(StartExpr
);
3112 return F
.FoundConstant
;
3115 /// Get a canonical multiply expression, or something simpler if possible.
3116 const SCEV
*ScalarEvolution::getMulExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
3117 SCEV::NoWrapFlags OrigFlags
,
3119 assert(OrigFlags
== maskFlags(OrigFlags
, SCEV::FlagNUW
| SCEV::FlagNSW
) &&
3120 "only nuw or nsw allowed");
3121 assert(!Ops
.empty() && "Cannot get empty mul!");
3122 if (Ops
.size() == 1) return Ops
[0];
3124 Type
*ETy
= Ops
[0]->getType();
3125 assert(!ETy
->isPointerTy());
3126 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
3127 assert(Ops
[i
]->getType() == ETy
&&
3128 "SCEVMulExpr operand types don't match!");
3131 const SCEV
*Folded
= constantFoldAndGroupOps(
3133 [](const APInt
&C1
, const APInt
&C2
) { return C1
* C2
; },
3134 [](const APInt
&C
) { return C
.isOne(); }, // identity
3135 [](const APInt
&C
) { return C
.isZero(); }); // absorber
3139 // Delay expensive flag strengthening until necessary.
3140 auto ComputeFlags
= [this, OrigFlags
](const ArrayRef
<const SCEV
*> Ops
) {
3141 return StrengthenNoWrapFlags(this, scMulExpr
, Ops
, OrigFlags
);
3144 // Limit recursion calls depth.
3145 if (Depth
> MaxArithDepth
|| hasHugeExpression(Ops
))
3146 return getOrCreateMulExpr(Ops
, ComputeFlags(Ops
));
3148 if (SCEV
*S
= findExistingSCEVInCache(scMulExpr
, Ops
)) {
3149 // Don't strengthen flags if we have no new information.
3150 SCEVMulExpr
*Mul
= static_cast<SCEVMulExpr
*>(S
);
3151 if (Mul
->getNoWrapFlags(OrigFlags
) != OrigFlags
)
3152 Mul
->setNoWrapFlags(ComputeFlags(Ops
));
3156 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
3157 if (Ops
.size() == 2) {
3158 // C1*(C2+V) -> C1*C2 + C1*V
3159 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[1]))
3160 // If any of Add's ops are Adds or Muls with a constant, apply this
3161 // transformation as well.
3163 // TODO: There are some cases where this transformation is not
3164 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
3165 // this transformation should be narrowed down.
3166 if (Add
->getNumOperands() == 2 && containsConstantInAddMulChain(Add
)) {
3167 const SCEV
*LHS
= getMulExpr(LHSC
, Add
->getOperand(0),
3168 SCEV::FlagAnyWrap
, Depth
+ 1);
3169 const SCEV
*RHS
= getMulExpr(LHSC
, Add
->getOperand(1),
3170 SCEV::FlagAnyWrap
, Depth
+ 1);
3171 return getAddExpr(LHS
, RHS
, SCEV::FlagAnyWrap
, Depth
+ 1);
3174 if (Ops
[0]->isAllOnesValue()) {
3175 // If we have a mul by -1 of an add, try distributing the -1 among the
3177 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[1])) {
3178 SmallVector
<const SCEV
*, 4> NewOps
;
3179 bool AnyFolded
= false;
3180 for (const SCEV
*AddOp
: Add
->operands()) {
3181 const SCEV
*Mul
= getMulExpr(Ops
[0], AddOp
, SCEV::FlagAnyWrap
,
3183 if (!isa
<SCEVMulExpr
>(Mul
)) AnyFolded
= true;
3184 NewOps
.push_back(Mul
);
3187 return getAddExpr(NewOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
3188 } else if (const auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(Ops
[1])) {
3189 // Negation preserves a recurrence's no self-wrap property.
3190 SmallVector
<const SCEV
*, 4> Operands
;
3191 for (const SCEV
*AddRecOp
: AddRec
->operands())
3192 Operands
.push_back(getMulExpr(Ops
[0], AddRecOp
, SCEV::FlagAnyWrap
,
3194 // Let M be the minimum representable signed value. AddRec with nsw
3195 // multiplied by -1 can have signed overflow if and only if it takes a
3196 // value of M: M * (-1) would stay M and (M + 1) * (-1) would be the
3197 // maximum signed value. In all other cases signed overflow is
3199 auto FlagsMask
= SCEV::FlagNW
;
3200 if (hasFlags(AddRec
->getNoWrapFlags(), SCEV::FlagNSW
)) {
3202 APInt::getSignedMinValue(getTypeSizeInBits(AddRec
->getType()));
3203 if (getSignedRangeMin(AddRec
) != MinInt
)
3204 FlagsMask
= setFlags(FlagsMask
, SCEV::FlagNSW
);
3206 return getAddRecExpr(Operands
, AddRec
->getLoop(),
3207 AddRec
->getNoWrapFlags(FlagsMask
));
3213 // Skip over the add expression until we get to a multiply.
3215 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scMulExpr
)
3218 // If there are mul operands inline them all into this expression.
3219 if (Idx
< Ops
.size()) {
3220 bool DeletedMul
= false;
3221 while (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[Idx
])) {
3222 if (Ops
.size() > MulOpsInlineThreshold
)
3224 // If we have an mul, expand the mul operands onto the end of the
3226 Ops
.erase(Ops
.begin()+Idx
);
3227 append_range(Ops
, Mul
->operands());
3231 // If we deleted at least one mul, we added operands to the end of the
3232 // list, and they are not necessarily sorted. Recurse to resort and
3233 // resimplify any operands we just acquired.
3235 return getMulExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
3238 // If there are any add recurrences in the operands list, see if any other
3239 // added values are loop invariant. If so, we can fold them into the
3241 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddRecExpr
)
3244 // Scan over all recurrences, trying to fold loop invariants into them.
3245 for (; Idx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[Idx
]); ++Idx
) {
3246 // Scan all of the other operands to this mul and add them to the vector
3247 // if they are loop invariant w.r.t. the recurrence.
3248 SmallVector
<const SCEV
*, 8> LIOps
;
3249 const SCEVAddRecExpr
*AddRec
= cast
<SCEVAddRecExpr
>(Ops
[Idx
]);
3250 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
3251 if (isAvailableAtLoopEntry(Ops
[i
], AddRec
->getLoop())) {
3252 LIOps
.push_back(Ops
[i
]);
3253 Ops
.erase(Ops
.begin()+i
);
3257 // If we found some loop invariants, fold them into the recurrence.
3258 if (!LIOps
.empty()) {
3259 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
3260 SmallVector
<const SCEV
*, 4> NewOps
;
3261 NewOps
.reserve(AddRec
->getNumOperands());
3262 const SCEV
*Scale
= getMulExpr(LIOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
3264 // If both the mul and addrec are nuw, we can preserve nuw.
3265 // If both the mul and addrec are nsw, we can only preserve nsw if either
3266 // a) they are also nuw, or
3267 // b) all multiplications of addrec operands with scale are nsw.
3268 SCEV::NoWrapFlags Flags
=
3269 AddRec
->getNoWrapFlags(ComputeFlags({Scale
, AddRec
}));
3271 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
) {
3272 NewOps
.push_back(getMulExpr(Scale
, AddRec
->getOperand(i
),
3273 SCEV::FlagAnyWrap
, Depth
+ 1));
3275 if (hasFlags(Flags
, SCEV::FlagNSW
) && !hasFlags(Flags
, SCEV::FlagNUW
)) {
3276 ConstantRange NSWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
3277 Instruction::Mul
, getSignedRange(Scale
),
3278 OverflowingBinaryOperator::NoSignedWrap
);
3279 if (!NSWRegion
.contains(getSignedRange(AddRec
->getOperand(i
))))
3280 Flags
= clearFlags(Flags
, SCEV::FlagNSW
);
3284 const SCEV
*NewRec
= getAddRecExpr(NewOps
, AddRec
->getLoop(), Flags
);
3286 // If all of the other operands were loop invariant, we are done.
3287 if (Ops
.size() == 1) return NewRec
;
3289 // Otherwise, multiply the folded AddRec by the non-invariant parts.
3290 for (unsigned i
= 0;; ++i
)
3291 if (Ops
[i
] == AddRec
) {
3295 return getMulExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
3298 // Okay, if there weren't any loop invariants to be folded, check to see
3299 // if there are multiple AddRec's with the same loop induction variable
3300 // being multiplied together. If so, we can fold them.
3302 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
3303 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
3304 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
3305 // ]]],+,...up to x=2n}.
3306 // Note that the arguments to choose() are always integers with values
3307 // known at compile time, never SCEV objects.
3309 // The implementation avoids pointless extra computations when the two
3310 // addrec's are of different length (mathematically, it's equivalent to
3311 // an infinite stream of zeros on the right).
3312 bool OpsModified
= false;
3313 for (unsigned OtherIdx
= Idx
+1;
3314 OtherIdx
!= Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
3316 const SCEVAddRecExpr
*OtherAddRec
=
3317 dyn_cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
3318 if (!OtherAddRec
|| OtherAddRec
->getLoop() != AddRec
->getLoop())
3321 // Limit max number of arguments to avoid creation of unreasonably big
3322 // SCEVAddRecs with very complex operands.
3323 if (AddRec
->getNumOperands() + OtherAddRec
->getNumOperands() - 1 >
3324 MaxAddRecSize
|| hasHugeExpression({AddRec
, OtherAddRec
}))
3327 bool Overflow
= false;
3328 Type
*Ty
= AddRec
->getType();
3329 bool LargerThan64Bits
= getTypeSizeInBits(Ty
) > 64;
3330 SmallVector
<const SCEV
*, 7> AddRecOps
;
3331 for (int x
= 0, xe
= AddRec
->getNumOperands() +
3332 OtherAddRec
->getNumOperands() - 1; x
!= xe
&& !Overflow
; ++x
) {
3333 SmallVector
<const SCEV
*, 7> SumOps
;
3334 for (int y
= x
, ye
= 2*x
+1; y
!= ye
&& !Overflow
; ++y
) {
3335 uint64_t Coeff1
= Choose(x
, 2*x
- y
, Overflow
);
3336 for (int z
= std::max(y
-x
, y
-(int)AddRec
->getNumOperands()+1),
3337 ze
= std::min(x
+1, (int)OtherAddRec
->getNumOperands());
3338 z
< ze
&& !Overflow
; ++z
) {
3339 uint64_t Coeff2
= Choose(2*x
- y
, x
-z
, Overflow
);
3341 if (LargerThan64Bits
)
3342 Coeff
= umul_ov(Coeff1
, Coeff2
, Overflow
);
3344 Coeff
= Coeff1
*Coeff2
;
3345 const SCEV
*CoeffTerm
= getConstant(Ty
, Coeff
);
3346 const SCEV
*Term1
= AddRec
->getOperand(y
-z
);
3347 const SCEV
*Term2
= OtherAddRec
->getOperand(z
);
3348 SumOps
.push_back(getMulExpr(CoeffTerm
, Term1
, Term2
,
3349 SCEV::FlagAnyWrap
, Depth
+ 1));
3353 SumOps
.push_back(getZero(Ty
));
3354 AddRecOps
.push_back(getAddExpr(SumOps
, SCEV::FlagAnyWrap
, Depth
+ 1));
3357 const SCEV
*NewAddRec
= getAddRecExpr(AddRecOps
, AddRec
->getLoop(),
3359 if (Ops
.size() == 2) return NewAddRec
;
3360 Ops
[Idx
] = NewAddRec
;
3361 Ops
.erase(Ops
.begin() + OtherIdx
); --OtherIdx
;
3363 AddRec
= dyn_cast
<SCEVAddRecExpr
>(NewAddRec
);
3369 return getMulExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
3371 // Otherwise couldn't fold anything into this recurrence. Move onto the
3375 // Okay, it looks like we really DO need an mul expr. Check to see if we
3376 // already have one, otherwise create a new one.
3377 return getOrCreateMulExpr(Ops
, ComputeFlags(Ops
));
3380 /// Represents an unsigned remainder expression based on unsigned division.
3381 const SCEV
*ScalarEvolution::getURemExpr(const SCEV
*LHS
,
3383 assert(getEffectiveSCEVType(LHS
->getType()) ==
3384 getEffectiveSCEVType(RHS
->getType()) &&
3385 "SCEVURemExpr operand types don't match!");
3387 // Short-circuit easy cases
3388 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
3389 // If constant is one, the result is trivial
3390 if (RHSC
->getValue()->isOne())
3391 return getZero(LHS
->getType()); // X urem 1 --> 0
3393 // If constant is a power of two, fold into a zext(trunc(LHS)).
3394 if (RHSC
->getAPInt().isPowerOf2()) {
3395 Type
*FullTy
= LHS
->getType();
3397 IntegerType::get(getContext(), RHSC
->getAPInt().logBase2());
3398 return getZeroExtendExpr(getTruncateExpr(LHS
, TruncTy
), FullTy
);
3402 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
3403 const SCEV
*UDiv
= getUDivExpr(LHS
, RHS
);
3404 const SCEV
*Mult
= getMulExpr(UDiv
, RHS
, SCEV::FlagNUW
);
3405 return getMinusSCEV(LHS
, Mult
, SCEV::FlagNUW
);
3408 /// Get a canonical unsigned division expression, or something simpler if
3410 const SCEV
*ScalarEvolution::getUDivExpr(const SCEV
*LHS
,
3412 assert(!LHS
->getType()->isPointerTy() &&
3413 "SCEVUDivExpr operand can't be pointer!");
3414 assert(LHS
->getType() == RHS
->getType() &&
3415 "SCEVUDivExpr operand types don't match!");
3417 FoldingSetNodeID ID
;
3418 ID
.AddInteger(scUDivExpr
);
3422 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
))
3426 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(LHS
))
3427 if (LHSC
->getValue()->isZero())
3430 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
3431 if (RHSC
->getValue()->isOne())
3432 return LHS
; // X udiv 1 --> x
3433 // If the denominator is zero, the result of the udiv is undefined. Don't
3434 // try to analyze it, because the resolution chosen here may differ from
3435 // the resolution chosen in other parts of the compiler.
3436 if (!RHSC
->getValue()->isZero()) {
3437 // Determine if the division can be folded into the operands of
3439 // TODO: Generalize this to non-constants by using known-bits information.
3440 Type
*Ty
= LHS
->getType();
3441 unsigned LZ
= RHSC
->getAPInt().countl_zero();
3442 unsigned MaxShiftAmt
= getTypeSizeInBits(Ty
) - LZ
- 1;
3443 // For non-power-of-two values, effectively round the value up to the
3444 // nearest power of two.
3445 if (!RHSC
->getAPInt().isPowerOf2())
3447 IntegerType
*ExtTy
=
3448 IntegerType::get(getContext(), getTypeSizeInBits(Ty
) + MaxShiftAmt
);
3449 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(LHS
))
3450 if (const SCEVConstant
*Step
=
3451 dyn_cast
<SCEVConstant
>(AR
->getStepRecurrence(*this))) {
3452 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
3453 const APInt
&StepInt
= Step
->getAPInt();
3454 const APInt
&DivInt
= RHSC
->getAPInt();
3455 if (!StepInt
.urem(DivInt
) &&
3456 getZeroExtendExpr(AR
, ExtTy
) ==
3457 getAddRecExpr(getZeroExtendExpr(AR
->getStart(), ExtTy
),
3458 getZeroExtendExpr(Step
, ExtTy
),
3459 AR
->getLoop(), SCEV::FlagAnyWrap
)) {
3460 SmallVector
<const SCEV
*, 4> Operands
;
3461 for (const SCEV
*Op
: AR
->operands())
3462 Operands
.push_back(getUDivExpr(Op
, RHS
));
3463 return getAddRecExpr(Operands
, AR
->getLoop(), SCEV::FlagNW
);
3465 /// Get a canonical UDivExpr for a recurrence.
3466 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
3467 // We can currently only fold X%N if X is constant.
3468 const SCEVConstant
*StartC
= dyn_cast
<SCEVConstant
>(AR
->getStart());
3469 if (StartC
&& !DivInt
.urem(StepInt
) &&
3470 getZeroExtendExpr(AR
, ExtTy
) ==
3471 getAddRecExpr(getZeroExtendExpr(AR
->getStart(), ExtTy
),
3472 getZeroExtendExpr(Step
, ExtTy
),
3473 AR
->getLoop(), SCEV::FlagAnyWrap
)) {
3474 const APInt
&StartInt
= StartC
->getAPInt();
3475 const APInt
&StartRem
= StartInt
.urem(StepInt
);
3476 if (StartRem
!= 0) {
3477 const SCEV
*NewLHS
=
3478 getAddRecExpr(getConstant(StartInt
- StartRem
), Step
,
3479 AR
->getLoop(), SCEV::FlagNW
);
3480 if (LHS
!= NewLHS
) {
3483 // Reset the ID to include the new LHS, and check if it is
3486 ID
.AddInteger(scUDivExpr
);
3490 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
))
3496 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
3497 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(LHS
)) {
3498 SmallVector
<const SCEV
*, 4> Operands
;
3499 for (const SCEV
*Op
: M
->operands())
3500 Operands
.push_back(getZeroExtendExpr(Op
, ExtTy
));
3501 if (getZeroExtendExpr(M
, ExtTy
) == getMulExpr(Operands
))
3502 // Find an operand that's safely divisible.
3503 for (unsigned i
= 0, e
= M
->getNumOperands(); i
!= e
; ++i
) {
3504 const SCEV
*Op
= M
->getOperand(i
);
3505 const SCEV
*Div
= getUDivExpr(Op
, RHSC
);
3506 if (!isa
<SCEVUDivExpr
>(Div
) && getMulExpr(Div
, RHSC
) == Op
) {
3507 Operands
= SmallVector
<const SCEV
*, 4>(M
->operands());
3509 return getMulExpr(Operands
);
3514 // (A/B)/C --> A/(B*C) if safe and B*C can be folded.
3515 if (const SCEVUDivExpr
*OtherDiv
= dyn_cast
<SCEVUDivExpr
>(LHS
)) {
3516 if (auto *DivisorConstant
=
3517 dyn_cast
<SCEVConstant
>(OtherDiv
->getRHS())) {
3518 bool Overflow
= false;
3520 DivisorConstant
->getAPInt().umul_ov(RHSC
->getAPInt(), Overflow
);
3522 return getConstant(RHSC
->getType(), 0, false);
3524 return getUDivExpr(OtherDiv
->getLHS(), getConstant(NewRHS
));
3528 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
3529 if (const SCEVAddExpr
*A
= dyn_cast
<SCEVAddExpr
>(LHS
)) {
3530 SmallVector
<const SCEV
*, 4> Operands
;
3531 for (const SCEV
*Op
: A
->operands())
3532 Operands
.push_back(getZeroExtendExpr(Op
, ExtTy
));
3533 if (getZeroExtendExpr(A
, ExtTy
) == getAddExpr(Operands
)) {
3535 for (unsigned i
= 0, e
= A
->getNumOperands(); i
!= e
; ++i
) {
3536 const SCEV
*Op
= getUDivExpr(A
->getOperand(i
), RHS
);
3537 if (isa
<SCEVUDivExpr
>(Op
) ||
3538 getMulExpr(Op
, RHS
) != A
->getOperand(i
))
3540 Operands
.push_back(Op
);
3542 if (Operands
.size() == A
->getNumOperands())
3543 return getAddExpr(Operands
);
3547 // Fold if both operands are constant.
3548 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(LHS
))
3549 return getConstant(LHSC
->getAPInt().udiv(RHSC
->getAPInt()));
3553 // ((-C + (C smax %x)) /u %x) evaluates to zero, for any positive constant C.
3554 if (const auto *AE
= dyn_cast
<SCEVAddExpr
>(LHS
);
3555 AE
&& AE
->getNumOperands() == 2) {
3556 if (const auto *VC
= dyn_cast
<SCEVConstant
>(AE
->getOperand(0))) {
3557 const APInt
&NegC
= VC
->getAPInt();
3558 if (NegC
.isNegative() && !NegC
.isMinSignedValue()) {
3559 const auto *MME
= dyn_cast
<SCEVSMaxExpr
>(AE
->getOperand(1));
3560 if (MME
&& MME
->getNumOperands() == 2 &&
3561 isa
<SCEVConstant
>(MME
->getOperand(0)) &&
3562 cast
<SCEVConstant
>(MME
->getOperand(0))->getAPInt() == -NegC
&&
3563 MME
->getOperand(1) == RHS
)
3564 return getZero(LHS
->getType());
3569 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs
3570 // changes). Make sure we get a new one.
3572 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
3573 SCEV
*S
= new (SCEVAllocator
) SCEVUDivExpr(ID
.Intern(SCEVAllocator
),
3575 UniqueSCEVs
.InsertNode(S
, IP
);
3576 registerUser(S
, {LHS
, RHS
});
3580 APInt
gcd(const SCEVConstant
*C1
, const SCEVConstant
*C2
) {
3581 APInt A
= C1
->getAPInt().abs();
3582 APInt B
= C2
->getAPInt().abs();
3583 uint32_t ABW
= A
.getBitWidth();
3584 uint32_t BBW
= B
.getBitWidth();
3591 return APIntOps::GreatestCommonDivisor(std::move(A
), std::move(B
));
3594 /// Get a canonical unsigned division expression, or something simpler if
3595 /// possible. There is no representation for an exact udiv in SCEV IR, but we
3596 /// can attempt to remove factors from the LHS and RHS. We can't do this when
3597 /// it's not exact because the udiv may be clearing bits.
3598 const SCEV
*ScalarEvolution::getUDivExactExpr(const SCEV
*LHS
,
3600 // TODO: we could try to find factors in all sorts of things, but for now we
3601 // just deal with u/exact (multiply, constant). See SCEVDivision towards the
3602 // end of this file for inspiration.
3604 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(LHS
);
3605 if (!Mul
|| !Mul
->hasNoUnsignedWrap())
3606 return getUDivExpr(LHS
, RHS
);
3608 if (const SCEVConstant
*RHSCst
= dyn_cast
<SCEVConstant
>(RHS
)) {
3609 // If the mulexpr multiplies by a constant, then that constant must be the
3610 // first element of the mulexpr.
3611 if (const auto *LHSCst
= dyn_cast
<SCEVConstant
>(Mul
->getOperand(0))) {
3612 if (LHSCst
== RHSCst
) {
3613 SmallVector
<const SCEV
*, 2> Operands(drop_begin(Mul
->operands()));
3614 return getMulExpr(Operands
);
3617 // We can't just assume that LHSCst divides RHSCst cleanly, it could be
3618 // that there's a factor provided by one of the other terms. We need to
3620 APInt Factor
= gcd(LHSCst
, RHSCst
);
3621 if (!Factor
.isIntN(1)) {
3623 cast
<SCEVConstant
>(getConstant(LHSCst
->getAPInt().udiv(Factor
)));
3625 cast
<SCEVConstant
>(getConstant(RHSCst
->getAPInt().udiv(Factor
)));
3626 SmallVector
<const SCEV
*, 2> Operands
;
3627 Operands
.push_back(LHSCst
);
3628 append_range(Operands
, Mul
->operands().drop_front());
3629 LHS
= getMulExpr(Operands
);
3631 Mul
= dyn_cast
<SCEVMulExpr
>(LHS
);
3633 return getUDivExactExpr(LHS
, RHS
);
3638 for (int i
= 0, e
= Mul
->getNumOperands(); i
!= e
; ++i
) {
3639 if (Mul
->getOperand(i
) == RHS
) {
3640 SmallVector
<const SCEV
*, 2> Operands
;
3641 append_range(Operands
, Mul
->operands().take_front(i
));
3642 append_range(Operands
, Mul
->operands().drop_front(i
+ 1));
3643 return getMulExpr(Operands
);
3647 return getUDivExpr(LHS
, RHS
);
3650 /// Get an add recurrence expression for the specified loop. Simplify the
3651 /// expression as much as possible.
3652 const SCEV
*ScalarEvolution::getAddRecExpr(const SCEV
*Start
, const SCEV
*Step
,
3654 SCEV::NoWrapFlags Flags
) {
3655 SmallVector
<const SCEV
*, 4> Operands
;
3656 Operands
.push_back(Start
);
3657 if (const SCEVAddRecExpr
*StepChrec
= dyn_cast
<SCEVAddRecExpr
>(Step
))
3658 if (StepChrec
->getLoop() == L
) {
3659 append_range(Operands
, StepChrec
->operands());
3660 return getAddRecExpr(Operands
, L
, maskFlags(Flags
, SCEV::FlagNW
));
3663 Operands
.push_back(Step
);
3664 return getAddRecExpr(Operands
, L
, Flags
);
3667 /// Get an add recurrence expression for the specified loop. Simplify the
3668 /// expression as much as possible.
3670 ScalarEvolution::getAddRecExpr(SmallVectorImpl
<const SCEV
*> &Operands
,
3671 const Loop
*L
, SCEV::NoWrapFlags Flags
) {
3672 if (Operands
.size() == 1) return Operands
[0];
3674 Type
*ETy
= getEffectiveSCEVType(Operands
[0]->getType());
3675 for (const SCEV
*Op
: llvm::drop_begin(Operands
)) {
3676 assert(getEffectiveSCEVType(Op
->getType()) == ETy
&&
3677 "SCEVAddRecExpr operand types don't match!");
3678 assert(!Op
->getType()->isPointerTy() && "Step must be integer");
3680 for (const SCEV
*Op
: Operands
)
3681 assert(isAvailableAtLoopEntry(Op
, L
) &&
3682 "SCEVAddRecExpr operand is not available at loop entry!");
3685 if (Operands
.back()->isZero()) {
3686 Operands
.pop_back();
3687 return getAddRecExpr(Operands
, L
, SCEV::FlagAnyWrap
); // {X,+,0} --> X
3690 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and
3691 // use that information to infer NUW and NSW flags. However, computing a
3692 // BE count requires calling getAddRecExpr, so we may not yet have a
3693 // meaningful BE count at this point (and if we don't, we'd be stuck
3694 // with a SCEVCouldNotCompute as the cached BE count).
3696 Flags
= StrengthenNoWrapFlags(this, scAddRecExpr
, Operands
, Flags
);
3698 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
3699 if (const SCEVAddRecExpr
*NestedAR
= dyn_cast
<SCEVAddRecExpr
>(Operands
[0])) {
3700 const Loop
*NestedLoop
= NestedAR
->getLoop();
3701 if (L
->contains(NestedLoop
)
3702 ? (L
->getLoopDepth() < NestedLoop
->getLoopDepth())
3703 : (!NestedLoop
->contains(L
) &&
3704 DT
.dominates(L
->getHeader(), NestedLoop
->getHeader()))) {
3705 SmallVector
<const SCEV
*, 4> NestedOperands(NestedAR
->operands());
3706 Operands
[0] = NestedAR
->getStart();
3707 // AddRecs require their operands be loop-invariant with respect to their
3708 // loops. Don't perform this transformation if it would break this
3710 bool AllInvariant
= all_of(
3711 Operands
, [&](const SCEV
*Op
) { return isLoopInvariant(Op
, L
); });
3714 // Create a recurrence for the outer loop with the same step size.
3716 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
3717 // inner recurrence has the same property.
3718 SCEV::NoWrapFlags OuterFlags
=
3719 maskFlags(Flags
, SCEV::FlagNW
| NestedAR
->getNoWrapFlags());
3721 NestedOperands
[0] = getAddRecExpr(Operands
, L
, OuterFlags
);
3722 AllInvariant
= all_of(NestedOperands
, [&](const SCEV
*Op
) {
3723 return isLoopInvariant(Op
, NestedLoop
);
3727 // Ok, both add recurrences are valid after the transformation.
3729 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
3730 // the outer recurrence has the same property.
3731 SCEV::NoWrapFlags InnerFlags
=
3732 maskFlags(NestedAR
->getNoWrapFlags(), SCEV::FlagNW
| Flags
);
3733 return getAddRecExpr(NestedOperands
, NestedLoop
, InnerFlags
);
3736 // Reset Operands to its original state.
3737 Operands
[0] = NestedAR
;
3741 // Okay, it looks like we really DO need an addrec expr. Check to see if we
3742 // already have one, otherwise create a new one.
3743 return getOrCreateAddRecExpr(Operands
, L
, Flags
);
3747 ScalarEvolution::getGEPExpr(GEPOperator
*GEP
,
3748 const SmallVectorImpl
<const SCEV
*> &IndexExprs
) {
3749 const SCEV
*BaseExpr
= getSCEV(GEP
->getPointerOperand());
3750 // getSCEV(Base)->getType() has the same address space as Base->getType()
3751 // because SCEV::getType() preserves the address space.
3752 Type
*IntIdxTy
= getEffectiveSCEVType(BaseExpr
->getType());
3753 GEPNoWrapFlags NW
= GEP
->getNoWrapFlags();
3754 if (NW
!= GEPNoWrapFlags::none()) {
3755 // We'd like to propagate flags from the IR to the corresponding SCEV nodes,
3756 // but to do that, we have to ensure that said flag is valid in the entire
3757 // defined scope of the SCEV.
3758 // TODO: non-instructions have global scope. We might be able to prove
3759 // some global scope cases
3760 auto *GEPI
= dyn_cast
<Instruction
>(GEP
);
3761 if (!GEPI
|| !isSCEVExprNeverPoison(GEPI
))
3762 NW
= GEPNoWrapFlags::none();
3765 SCEV::NoWrapFlags OffsetWrap
= SCEV::FlagAnyWrap
;
3766 if (NW
.hasNoUnsignedSignedWrap())
3767 OffsetWrap
= setFlags(OffsetWrap
, SCEV::FlagNSW
);
3768 if (NW
.hasNoUnsignedWrap())
3769 OffsetWrap
= setFlags(OffsetWrap
, SCEV::FlagNUW
);
3771 Type
*CurTy
= GEP
->getType();
3772 bool FirstIter
= true;
3773 SmallVector
<const SCEV
*, 4> Offsets
;
3774 for (const SCEV
*IndexExpr
: IndexExprs
) {
3775 // Compute the (potentially symbolic) offset in bytes for this index.
3776 if (StructType
*STy
= dyn_cast
<StructType
>(CurTy
)) {
3777 // For a struct, add the member offset.
3778 ConstantInt
*Index
= cast
<SCEVConstant
>(IndexExpr
)->getValue();
3779 unsigned FieldNo
= Index
->getZExtValue();
3780 const SCEV
*FieldOffset
= getOffsetOfExpr(IntIdxTy
, STy
, FieldNo
);
3781 Offsets
.push_back(FieldOffset
);
3783 // Update CurTy to the type of the field at Index.
3784 CurTy
= STy
->getTypeAtIndex(Index
);
3786 // Update CurTy to its element type.
3788 assert(isa
<PointerType
>(CurTy
) &&
3789 "The first index of a GEP indexes a pointer");
3790 CurTy
= GEP
->getSourceElementType();
3793 CurTy
= GetElementPtrInst::getTypeAtIndex(CurTy
, (uint64_t)0);
3795 // For an array, add the element offset, explicitly scaled.
3796 const SCEV
*ElementSize
= getSizeOfExpr(IntIdxTy
, CurTy
);
3797 // Getelementptr indices are signed.
3798 IndexExpr
= getTruncateOrSignExtend(IndexExpr
, IntIdxTy
);
3800 // Multiply the index by the element size to compute the element offset.
3801 const SCEV
*LocalOffset
= getMulExpr(IndexExpr
, ElementSize
, OffsetWrap
);
3802 Offsets
.push_back(LocalOffset
);
3806 // Handle degenerate case of GEP without offsets.
3807 if (Offsets
.empty())
3810 // Add the offsets together, assuming nsw if inbounds.
3811 const SCEV
*Offset
= getAddExpr(Offsets
, OffsetWrap
);
3812 // Add the base address and the offset. We cannot use the nsw flag, as the
3813 // base address is unsigned. However, if we know that the offset is
3814 // non-negative, we can use nuw.
3815 bool NUW
= NW
.hasNoUnsignedWrap() ||
3816 (NW
.hasNoUnsignedSignedWrap() && isKnownNonNegative(Offset
));
3817 SCEV::NoWrapFlags BaseWrap
= NUW
? SCEV::FlagNUW
: SCEV::FlagAnyWrap
;
3818 auto *GEPExpr
= getAddExpr(BaseExpr
, Offset
, BaseWrap
);
3819 assert(BaseExpr
->getType() == GEPExpr
->getType() &&
3820 "GEP should not change type mid-flight.");
3824 SCEV
*ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType
,
3825 ArrayRef
<const SCEV
*> Ops
) {
3826 FoldingSetNodeID ID
;
3827 ID
.AddInteger(SCEVType
);
3828 for (const SCEV
*Op
: Ops
)
3831 return UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
);
3834 const SCEV
*ScalarEvolution::getAbsExpr(const SCEV
*Op
, bool IsNSW
) {
3835 SCEV::NoWrapFlags Flags
= IsNSW
? SCEV::FlagNSW
: SCEV::FlagAnyWrap
;
3836 return getSMaxExpr(Op
, getNegativeSCEV(Op
, Flags
));
3839 const SCEV
*ScalarEvolution::getMinMaxExpr(SCEVTypes Kind
,
3840 SmallVectorImpl
<const SCEV
*> &Ops
) {
3841 assert(SCEVMinMaxExpr::isMinMaxType(Kind
) && "Not a SCEVMinMaxExpr!");
3842 assert(!Ops
.empty() && "Cannot get empty (u|s)(min|max)!");
3843 if (Ops
.size() == 1) return Ops
[0];
3845 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
3846 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
) {
3847 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
3848 "Operand types don't match!");
3849 assert(Ops
[0]->getType()->isPointerTy() ==
3850 Ops
[i
]->getType()->isPointerTy() &&
3851 "min/max should be consistently pointerish");
3855 bool IsSigned
= Kind
== scSMaxExpr
|| Kind
== scSMinExpr
;
3856 bool IsMax
= Kind
== scSMaxExpr
|| Kind
== scUMaxExpr
;
3858 const SCEV
*Folded
= constantFoldAndGroupOps(
3860 [&](const APInt
&C1
, const APInt
&C2
) {
3863 return APIntOps::smax(C1
, C2
);
3865 return APIntOps::smin(C1
, C2
);
3867 return APIntOps::umax(C1
, C2
);
3869 return APIntOps::umin(C1
, C2
);
3871 llvm_unreachable("Unknown SCEV min/max opcode");
3874 [&](const APInt
&C
) {
3877 return IsSigned
? C
.isMinSignedValue() : C
.isMinValue();
3879 return IsSigned
? C
.isMaxSignedValue() : C
.isMaxValue();
3881 [&](const APInt
&C
) {
3884 return IsSigned
? C
.isMaxSignedValue() : C
.isMaxValue();
3886 return IsSigned
? C
.isMinSignedValue() : C
.isMinValue();
3891 // Check if we have created the same expression before.
3892 if (const SCEV
*S
= findExistingSCEVInCache(Kind
, Ops
)) {
3896 // Find the first operation of the same kind
3898 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < Kind
)
3901 // Check to see if one of the operands is of the same kind. If so, expand its
3902 // operands onto our operand list, and recurse to simplify.
3903 if (Idx
< Ops
.size()) {
3904 bool DeletedAny
= false;
3905 while (Ops
[Idx
]->getSCEVType() == Kind
) {
3906 const SCEVMinMaxExpr
*SMME
= cast
<SCEVMinMaxExpr
>(Ops
[Idx
]);
3907 Ops
.erase(Ops
.begin()+Idx
);
3908 append_range(Ops
, SMME
->operands());
3913 return getMinMaxExpr(Kind
, Ops
);
3916 // Okay, check to see if the same value occurs in the operand list twice. If
3917 // so, delete one. Since we sorted the list, these values are required to
3919 llvm::CmpInst::Predicate GEPred
=
3920 IsSigned
? ICmpInst::ICMP_SGE
: ICmpInst::ICMP_UGE
;
3921 llvm::CmpInst::Predicate LEPred
=
3922 IsSigned
? ICmpInst::ICMP_SLE
: ICmpInst::ICMP_ULE
;
3923 llvm::CmpInst::Predicate FirstPred
= IsMax
? GEPred
: LEPred
;
3924 llvm::CmpInst::Predicate SecondPred
= IsMax
? LEPred
: GEPred
;
3925 for (unsigned i
= 0, e
= Ops
.size() - 1; i
!= e
; ++i
) {
3926 if (Ops
[i
] == Ops
[i
+ 1] ||
3927 isKnownViaNonRecursiveReasoning(FirstPred
, Ops
[i
], Ops
[i
+ 1])) {
3928 // X op Y op Y --> X op Y
3929 // X op Y --> X, if we know X, Y are ordered appropriately
3930 Ops
.erase(Ops
.begin() + i
+ 1, Ops
.begin() + i
+ 2);
3933 } else if (isKnownViaNonRecursiveReasoning(SecondPred
, Ops
[i
],
3935 // X op Y --> Y, if we know X, Y are ordered appropriately
3936 Ops
.erase(Ops
.begin() + i
, Ops
.begin() + i
+ 1);
3942 if (Ops
.size() == 1) return Ops
[0];
3944 assert(!Ops
.empty() && "Reduced smax down to nothing!");
3946 // Okay, it looks like we really DO need an expr. Check to see if we
3947 // already have one, otherwise create a new one.
3948 FoldingSetNodeID ID
;
3949 ID
.AddInteger(Kind
);
3950 for (const SCEV
*Op
: Ops
)
3953 const SCEV
*ExistingSCEV
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
);
3955 return ExistingSCEV
;
3956 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
3957 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
3958 SCEV
*S
= new (SCEVAllocator
)
3959 SCEVMinMaxExpr(ID
.Intern(SCEVAllocator
), Kind
, O
, Ops
.size());
3961 UniqueSCEVs
.InsertNode(S
, IP
);
3962 registerUser(S
, Ops
);
3968 class SCEVSequentialMinMaxDeduplicatingVisitor final
3969 : public SCEVVisitor
<SCEVSequentialMinMaxDeduplicatingVisitor
,
3970 std::optional
<const SCEV
*>> {
3971 using RetVal
= std::optional
<const SCEV
*>;
3972 using Base
= SCEVVisitor
<SCEVSequentialMinMaxDeduplicatingVisitor
, RetVal
>;
3974 ScalarEvolution
&SE
;
3975 const SCEVTypes RootKind
; // Must be a sequential min/max expression.
3976 const SCEVTypes NonSequentialRootKind
; // Non-sequential variant of RootKind.
3977 SmallPtrSet
<const SCEV
*, 16> SeenOps
;
3979 bool canRecurseInto(SCEVTypes Kind
) const {
3980 // We can only recurse into the SCEV expression of the same effective type
3981 // as the type of our root SCEV expression.
3982 return RootKind
== Kind
|| NonSequentialRootKind
== Kind
;
3985 RetVal
visitAnyMinMaxExpr(const SCEV
*S
) {
3986 assert((isa
<SCEVMinMaxExpr
>(S
) || isa
<SCEVSequentialMinMaxExpr
>(S
)) &&
3987 "Only for min/max expressions.");
3988 SCEVTypes Kind
= S
->getSCEVType();
3990 if (!canRecurseInto(Kind
))
3993 auto *NAry
= cast
<SCEVNAryExpr
>(S
);
3994 SmallVector
<const SCEV
*> NewOps
;
3995 bool Changed
= visit(Kind
, NAry
->operands(), NewOps
);
4000 return std::nullopt
;
4002 return isa
<SCEVSequentialMinMaxExpr
>(S
)
4003 ? SE
.getSequentialMinMaxExpr(Kind
, NewOps
)
4004 : SE
.getMinMaxExpr(Kind
, NewOps
);
4007 RetVal
visit(const SCEV
*S
) {
4008 // Has the whole operand been seen already?
4009 if (!SeenOps
.insert(S
).second
)
4010 return std::nullopt
;
4011 return Base::visit(S
);
4015 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution
&SE
,
4017 : SE(SE
), RootKind(RootKind
),
4018 NonSequentialRootKind(
4019 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
4022 bool /*Changed*/ visit(SCEVTypes Kind
, ArrayRef
<const SCEV
*> OrigOps
,
4023 SmallVectorImpl
<const SCEV
*> &NewOps
) {
4024 bool Changed
= false;
4025 SmallVector
<const SCEV
*> Ops
;
4026 Ops
.reserve(OrigOps
.size());
4028 for (const SCEV
*Op
: OrigOps
) {
4029 RetVal NewOp
= visit(Op
);
4033 Ops
.emplace_back(*NewOp
);
4037 NewOps
= std::move(Ops
);
4041 RetVal
visitConstant(const SCEVConstant
*Constant
) { return Constant
; }
4043 RetVal
visitVScale(const SCEVVScale
*VScale
) { return VScale
; }
4045 RetVal
visitPtrToIntExpr(const SCEVPtrToIntExpr
*Expr
) { return Expr
; }
4047 RetVal
visitTruncateExpr(const SCEVTruncateExpr
*Expr
) { return Expr
; }
4049 RetVal
visitZeroExtendExpr(const SCEVZeroExtendExpr
*Expr
) { return Expr
; }
4051 RetVal
visitSignExtendExpr(const SCEVSignExtendExpr
*Expr
) { return Expr
; }
4053 RetVal
visitAddExpr(const SCEVAddExpr
*Expr
) { return Expr
; }
4055 RetVal
visitMulExpr(const SCEVMulExpr
*Expr
) { return Expr
; }
4057 RetVal
visitUDivExpr(const SCEVUDivExpr
*Expr
) { return Expr
; }
4059 RetVal
visitAddRecExpr(const SCEVAddRecExpr
*Expr
) { return Expr
; }
4061 RetVal
visitSMaxExpr(const SCEVSMaxExpr
*Expr
) {
4062 return visitAnyMinMaxExpr(Expr
);
4065 RetVal
visitUMaxExpr(const SCEVUMaxExpr
*Expr
) {
4066 return visitAnyMinMaxExpr(Expr
);
4069 RetVal
visitSMinExpr(const SCEVSMinExpr
*Expr
) {
4070 return visitAnyMinMaxExpr(Expr
);
4073 RetVal
visitUMinExpr(const SCEVUMinExpr
*Expr
) {
4074 return visitAnyMinMaxExpr(Expr
);
4077 RetVal
visitSequentialUMinExpr(const SCEVSequentialUMinExpr
*Expr
) {
4078 return visitAnyMinMaxExpr(Expr
);
4081 RetVal
visitUnknown(const SCEVUnknown
*Expr
) { return Expr
; }
4083 RetVal
visitCouldNotCompute(const SCEVCouldNotCompute
*Expr
) { return Expr
; }
4088 static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind
) {
4105 // If any operand is poison, the whole expression is poison.
4107 case scSequentialUMinExpr
:
4108 // FIXME: if the *first* operand is poison, the whole expression is poison.
4109 return false; // Pessimistically, say that it does not propagate poison.
4110 case scCouldNotCompute
:
4111 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
4113 llvm_unreachable("Unknown SCEV kind!");
4117 // The only way poison may be introduced in a SCEV expression is from a
4118 // poison SCEVUnknown (ConstantExprs are also represented as SCEVUnknown,
4119 // not SCEVConstant). Notably, nowrap flags in SCEV nodes can *not*
4120 // introduce poison -- they encode guaranteed, non-speculated knowledge.
4122 // Additionally, all SCEV nodes propagate poison from inputs to outputs,
4123 // with the notable exception of umin_seq, where only poison from the first
4124 // operand is (unconditionally) propagated.
4125 struct SCEVPoisonCollector
{
4126 bool LookThroughMaybePoisonBlocking
;
4127 SmallPtrSet
<const SCEVUnknown
*, 4> MaybePoison
;
4128 SCEVPoisonCollector(bool LookThroughMaybePoisonBlocking
)
4129 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking
) {}
4131 bool follow(const SCEV
*S
) {
4132 if (!LookThroughMaybePoisonBlocking
&&
4133 !scevUnconditionallyPropagatesPoisonFromOperands(S
->getSCEVType()))
4136 if (auto *SU
= dyn_cast
<SCEVUnknown
>(S
)) {
4137 if (!isGuaranteedNotToBePoison(SU
->getValue()))
4138 MaybePoison
.insert(SU
);
4142 bool isDone() const { return false; }
4146 /// Return true if V is poison given that AssumedPoison is already poison.
4147 static bool impliesPoison(const SCEV
*AssumedPoison
, const SCEV
*S
) {
4148 // First collect all SCEVs that might result in AssumedPoison to be poison.
4149 // We need to look through potentially poison-blocking operations here,
4150 // because we want to find all SCEVs that *might* result in poison, not only
4151 // those that are *required* to.
4152 SCEVPoisonCollector
PC1(/* LookThroughMaybePoisonBlocking */ true);
4153 visitAll(AssumedPoison
, PC1
);
4155 // AssumedPoison is never poison. As the assumption is false, the implication
4156 // is true. Don't bother walking the other SCEV in this case.
4157 if (PC1
.MaybePoison
.empty())
4160 // Collect all SCEVs in S that, if poison, *will* result in S being poison
4161 // as well. We cannot look through potentially poison-blocking operations
4162 // here, as their arguments only *may* make the result poison.
4163 SCEVPoisonCollector
PC2(/* LookThroughMaybePoisonBlocking */ false);
4166 // Make sure that no matter which SCEV in PC1.MaybePoison is actually poison,
4167 // it will also make S poison by being part of PC2.MaybePoison.
4168 return llvm::set_is_subset(PC1
.MaybePoison
, PC2
.MaybePoison
);
4171 void ScalarEvolution::getPoisonGeneratingValues(
4172 SmallPtrSetImpl
<const Value
*> &Result
, const SCEV
*S
) {
4173 SCEVPoisonCollector
PC(/* LookThroughMaybePoisonBlocking */ false);
4175 for (const SCEVUnknown
*SU
: PC
.MaybePoison
)
4176 Result
.insert(SU
->getValue());
4179 bool ScalarEvolution::canReuseInstruction(
4180 const SCEV
*S
, Instruction
*I
,
4181 SmallVectorImpl
<Instruction
*> &DropPoisonGeneratingInsts
) {
4182 // If the instruction cannot be poison, it's always safe to reuse.
4183 if (programUndefinedIfPoison(I
))
4186 // Otherwise, it is possible that I is more poisonous that S. Collect the
4187 // poison-contributors of S, and then check whether I has any additional
4188 // poison-contributors. Poison that is contributed through poison-generating
4189 // flags is handled by dropping those flags instead.
4190 SmallPtrSet
<const Value
*, 8> PoisonVals
;
4191 getPoisonGeneratingValues(PoisonVals
, S
);
4193 SmallVector
<Value
*> Worklist
;
4194 SmallPtrSet
<Value
*, 8> Visited
;
4195 Worklist
.push_back(I
);
4196 while (!Worklist
.empty()) {
4197 Value
*V
= Worklist
.pop_back_val();
4198 if (!Visited
.insert(V
).second
)
4201 // Avoid walking large instruction graphs.
4202 if (Visited
.size() > 16)
4205 // Either the value can't be poison, or the S would also be poison if it
4207 if (PoisonVals
.contains(V
) || ::isGuaranteedNotToBePoison(V
))
4210 auto *I
= dyn_cast
<Instruction
>(V
);
4214 // Disjoint or instructions are interpreted as adds by SCEV. However, we
4215 // can't replace an arbitrary add with disjoint or, even if we drop the
4216 // flag. We would need to convert the or into an add.
4217 if (auto *PDI
= dyn_cast
<PossiblyDisjointInst
>(I
))
4218 if (PDI
->isDisjoint())
4221 // FIXME: Ignore vscale, even though it technically could be poison. Do this
4222 // because SCEV currently assumes it can't be poison. Remove this special
4223 // case once we proper model when vscale can be poison.
4224 if (auto *II
= dyn_cast
<IntrinsicInst
>(I
);
4225 II
&& II
->getIntrinsicID() == Intrinsic::vscale
)
4228 if (canCreatePoison(cast
<Operator
>(I
), /*ConsiderFlagsAndMetadata*/ false))
4231 // If the instruction can't create poison, we can recurse to its operands.
4232 if (I
->hasPoisonGeneratingAnnotations())
4233 DropPoisonGeneratingInsts
.push_back(I
);
4235 for (Value
*Op
: I
->operands())
4236 Worklist
.push_back(Op
);
4242 ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind
,
4243 SmallVectorImpl
<const SCEV
*> &Ops
) {
4244 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind
) &&
4245 "Not a SCEVSequentialMinMaxExpr!");
4246 assert(!Ops
.empty() && "Cannot get empty (u|s)(min|max)!");
4247 if (Ops
.size() == 1)
4250 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
4251 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
) {
4252 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
4253 "Operand types don't match!");
4254 assert(Ops
[0]->getType()->isPointerTy() ==
4255 Ops
[i
]->getType()->isPointerTy() &&
4256 "min/max should be consistently pointerish");
4260 // Note that SCEVSequentialMinMaxExpr is *NOT* commutative,
4261 // so we can *NOT* do any kind of sorting of the expressions!
4263 // Check if we have created the same expression before.
4264 if (const SCEV
*S
= findExistingSCEVInCache(Kind
, Ops
))
4267 // FIXME: there are *some* simplifications that we can do here.
4269 // Keep only the first instance of an operand.
4271 SCEVSequentialMinMaxDeduplicatingVisitor
Deduplicator(*this, Kind
);
4272 bool Changed
= Deduplicator
.visit(Kind
, Ops
, Ops
);
4274 return getSequentialMinMaxExpr(Kind
, Ops
);
4277 // Check to see if one of the operands is of the same kind. If so, expand its
4278 // operands onto our operand list, and recurse to simplify.
4281 bool DeletedAny
= false;
4282 while (Idx
< Ops
.size()) {
4283 if (Ops
[Idx
]->getSCEVType() != Kind
) {
4287 const auto *SMME
= cast
<SCEVSequentialMinMaxExpr
>(Ops
[Idx
]);
4288 Ops
.erase(Ops
.begin() + Idx
);
4289 Ops
.insert(Ops
.begin() + Idx
, SMME
->operands().begin(),
4290 SMME
->operands().end());
4295 return getSequentialMinMaxExpr(Kind
, Ops
);
4298 const SCEV
*SaturationPoint
;
4299 ICmpInst::Predicate Pred
;
4301 case scSequentialUMinExpr
:
4302 SaturationPoint
= getZero(Ops
[0]->getType());
4303 Pred
= ICmpInst::ICMP_ULE
;
4306 llvm_unreachable("Not a sequential min/max type.");
4309 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
) {
4310 bool MayBeUB
= SCEVExprContains(Ops
[i
], [this](const SCEV
*S
) {
4311 auto *UDiv
= dyn_cast
<SCEVUDivExpr
>(S
);
4312 // The UDiv may be UB if the divisor is poison or zero. Unless the divisor
4313 // is a non-zero constant, we have to assume the UDiv may be UB.
4314 return UDiv
&& (!isKnownNonZero(UDiv
->getOperand(1)) ||
4315 !isGuaranteedNotToBePoison(UDiv
->getOperand(1)));
4320 // We can replace %x umin_seq %y with %x umin %y if either:
4321 // * %y being poison implies %x is also poison.
4322 // * %x cannot be the saturating value (e.g. zero for umin).
4323 if (::impliesPoison(Ops
[i
], Ops
[i
- 1]) ||
4324 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE
, Ops
[i
- 1],
4326 SmallVector
<const SCEV
*> SeqOps
= {Ops
[i
- 1], Ops
[i
]};
4327 Ops
[i
- 1] = getMinMaxExpr(
4328 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(Kind
),
4330 Ops
.erase(Ops
.begin() + i
);
4331 return getSequentialMinMaxExpr(Kind
, Ops
);
4333 // Fold %x umin_seq %y to %x if %x ule %y.
4334 // TODO: We might be able to prove the predicate for a later operand.
4335 if (isKnownViaNonRecursiveReasoning(Pred
, Ops
[i
- 1], Ops
[i
])) {
4336 Ops
.erase(Ops
.begin() + i
);
4337 return getSequentialMinMaxExpr(Kind
, Ops
);
4341 // Okay, it looks like we really DO need an expr. Check to see if we
4342 // already have one, otherwise create a new one.
4343 FoldingSetNodeID ID
;
4344 ID
.AddInteger(Kind
);
4345 for (const SCEV
*Op
: Ops
)
4348 const SCEV
*ExistingSCEV
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
);
4350 return ExistingSCEV
;
4352 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
4353 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
4354 SCEV
*S
= new (SCEVAllocator
)
4355 SCEVSequentialMinMaxExpr(ID
.Intern(SCEVAllocator
), Kind
, O
, Ops
.size());
4357 UniqueSCEVs
.InsertNode(S
, IP
);
4358 registerUser(S
, Ops
);
4362 const SCEV
*ScalarEvolution::getSMaxExpr(const SCEV
*LHS
, const SCEV
*RHS
) {
4363 SmallVector
<const SCEV
*, 2> Ops
= {LHS
, RHS
};
4364 return getSMaxExpr(Ops
);
4367 const SCEV
*ScalarEvolution::getSMaxExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
4368 return getMinMaxExpr(scSMaxExpr
, Ops
);
4371 const SCEV
*ScalarEvolution::getUMaxExpr(const SCEV
*LHS
, const SCEV
*RHS
) {
4372 SmallVector
<const SCEV
*, 2> Ops
= {LHS
, RHS
};
4373 return getUMaxExpr(Ops
);
4376 const SCEV
*ScalarEvolution::getUMaxExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
4377 return getMinMaxExpr(scUMaxExpr
, Ops
);
4380 const SCEV
*ScalarEvolution::getSMinExpr(const SCEV
*LHS
,
4382 SmallVector
<const SCEV
*, 2> Ops
= { LHS
, RHS
};
4383 return getSMinExpr(Ops
);
4386 const SCEV
*ScalarEvolution::getSMinExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
4387 return getMinMaxExpr(scSMinExpr
, Ops
);
4390 const SCEV
*ScalarEvolution::getUMinExpr(const SCEV
*LHS
, const SCEV
*RHS
,
4392 SmallVector
<const SCEV
*, 2> Ops
= { LHS
, RHS
};
4393 return getUMinExpr(Ops
, Sequential
);
4396 const SCEV
*ScalarEvolution::getUMinExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
4398 return Sequential
? getSequentialMinMaxExpr(scSequentialUMinExpr
, Ops
)
4399 : getMinMaxExpr(scUMinExpr
, Ops
);
4403 ScalarEvolution::getSizeOfExpr(Type
*IntTy
, TypeSize Size
) {
4404 const SCEV
*Res
= getConstant(IntTy
, Size
.getKnownMinValue());
4405 if (Size
.isScalable())
4406 Res
= getMulExpr(Res
, getVScale(IntTy
));
4410 const SCEV
*ScalarEvolution::getSizeOfExpr(Type
*IntTy
, Type
*AllocTy
) {
4411 return getSizeOfExpr(IntTy
, getDataLayout().getTypeAllocSize(AllocTy
));
4414 const SCEV
*ScalarEvolution::getStoreSizeOfExpr(Type
*IntTy
, Type
*StoreTy
) {
4415 return getSizeOfExpr(IntTy
, getDataLayout().getTypeStoreSize(StoreTy
));
4418 const SCEV
*ScalarEvolution::getOffsetOfExpr(Type
*IntTy
,
4421 // We can bypass creating a target-independent constant expression and then
4422 // folding it back into a ConstantInt. This is just a compile-time
4424 const StructLayout
*SL
= getDataLayout().getStructLayout(STy
);
4425 assert(!SL
->getSizeInBits().isScalable() &&
4426 "Cannot get offset for structure containing scalable vector types");
4427 return getConstant(IntTy
, SL
->getElementOffset(FieldNo
));
4430 const SCEV
*ScalarEvolution::getUnknown(Value
*V
) {
4431 // Don't attempt to do anything other than create a SCEVUnknown object
4432 // here. createSCEV only calls getUnknown after checking for all other
4433 // interesting possibilities, and any other code that calls getUnknown
4434 // is doing so in order to hide a value from SCEV canonicalization.
4436 FoldingSetNodeID ID
;
4437 ID
.AddInteger(scUnknown
);
4440 if (SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) {
4441 assert(cast
<SCEVUnknown
>(S
)->getValue() == V
&&
4442 "Stale SCEVUnknown in uniquing map!");
4445 SCEV
*S
= new (SCEVAllocator
) SCEVUnknown(ID
.Intern(SCEVAllocator
), V
, this,
4447 FirstUnknown
= cast
<SCEVUnknown
>(S
);
4448 UniqueSCEVs
.InsertNode(S
, IP
);
4452 //===----------------------------------------------------------------------===//
4453 // Basic SCEV Analysis and PHI Idiom Recognition Code
4456 /// Test if values of the given type are analyzable within the SCEV
4457 /// framework. This primarily includes integer types, and it can optionally
4458 /// include pointer types if the ScalarEvolution class has access to
4459 /// target-specific information.
4460 bool ScalarEvolution::isSCEVable(Type
*Ty
) const {
4461 // Integers and pointers are always SCEVable.
4462 return Ty
->isIntOrPtrTy();
4465 /// Return the size in bits of the specified type, for which isSCEVable must
4467 uint64_t ScalarEvolution::getTypeSizeInBits(Type
*Ty
) const {
4468 assert(isSCEVable(Ty
) && "Type is not SCEVable!");
4469 if (Ty
->isPointerTy())
4470 return getDataLayout().getIndexTypeSizeInBits(Ty
);
4471 return getDataLayout().getTypeSizeInBits(Ty
);
4474 /// Return a type with the same bitwidth as the given type and which represents
4475 /// how SCEV will treat the given type, for which isSCEVable must return
4476 /// true. For pointer types, this is the pointer index sized integer type.
4477 Type
*ScalarEvolution::getEffectiveSCEVType(Type
*Ty
) const {
4478 assert(isSCEVable(Ty
) && "Type is not SCEVable!");
4480 if (Ty
->isIntegerTy())
4483 // The only other support type is pointer.
4484 assert(Ty
->isPointerTy() && "Unexpected non-pointer non-integer type!");
4485 return getDataLayout().getIndexType(Ty
);
4488 Type
*ScalarEvolution::getWiderType(Type
*T1
, Type
*T2
) const {
4489 return getTypeSizeInBits(T1
) >= getTypeSizeInBits(T2
) ? T1
: T2
;
4492 bool ScalarEvolution::instructionCouldExistWithOperands(const SCEV
*A
,
4494 /// For a valid use point to exist, the defining scope of one operand
4495 /// must dominate the other.
4496 bool PreciseA
, PreciseB
;
4497 auto *ScopeA
= getDefiningScopeBound({A
}, PreciseA
);
4498 auto *ScopeB
= getDefiningScopeBound({B
}, PreciseB
);
4499 if (!PreciseA
|| !PreciseB
)
4502 return (ScopeA
== ScopeB
) || DT
.dominates(ScopeA
, ScopeB
) ||
4503 DT
.dominates(ScopeB
, ScopeA
);
4506 const SCEV
*ScalarEvolution::getCouldNotCompute() {
4507 return CouldNotCompute
.get();
4510 bool ScalarEvolution::checkValidity(const SCEV
*S
) const {
4511 bool ContainsNulls
= SCEVExprContains(S
, [](const SCEV
*S
) {
4512 auto *SU
= dyn_cast
<SCEVUnknown
>(S
);
4513 return SU
&& SU
->getValue() == nullptr;
4516 return !ContainsNulls
;
4519 bool ScalarEvolution::containsAddRecurrence(const SCEV
*S
) {
4520 HasRecMapType::iterator I
= HasRecMap
.find(S
);
4521 if (I
!= HasRecMap
.end())
4525 SCEVExprContains(S
, [](const SCEV
*S
) { return isa
<SCEVAddRecExpr
>(S
); });
4526 HasRecMap
.insert({S
, FoundAddRec
});
4530 /// Return the ValueOffsetPair set for \p S. \p S can be represented
4531 /// by the value and offset from any ValueOffsetPair in the set.
4532 ArrayRef
<Value
*> ScalarEvolution::getSCEVValues(const SCEV
*S
) {
4533 ExprValueMapType::iterator SI
= ExprValueMap
.find_as(S
);
4534 if (SI
== ExprValueMap
.end())
4536 return SI
->second
.getArrayRef();
4539 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V)
4540 /// cannot be used separately. eraseValueFromMap should be used to remove
4541 /// V from ValueExprMap and ExprValueMap at the same time.
4542 void ScalarEvolution::eraseValueFromMap(Value
*V
) {
4543 ValueExprMapType::iterator I
= ValueExprMap
.find_as(V
);
4544 if (I
!= ValueExprMap
.end()) {
4545 auto EVIt
= ExprValueMap
.find(I
->second
);
4546 bool Removed
= EVIt
->second
.remove(V
);
4548 assert(Removed
&& "Value not in ExprValueMap?");
4549 ValueExprMap
.erase(I
);
4553 void ScalarEvolution::insertValueToMap(Value
*V
, const SCEV
*S
) {
4554 // A recursive query may have already computed the SCEV. It should be
4555 // equivalent, but may not necessarily be exactly the same, e.g. due to lazily
4556 // inferred nowrap flags.
4557 auto It
= ValueExprMap
.find_as(V
);
4558 if (It
== ValueExprMap
.end()) {
4559 ValueExprMap
.insert({SCEVCallbackVH(V
, this), S
});
4560 ExprValueMap
[S
].insert(V
);
4564 /// Return an existing SCEV if it exists, otherwise analyze the expression and
4565 /// create a new one.
4566 const SCEV
*ScalarEvolution::getSCEV(Value
*V
) {
4567 assert(isSCEVable(V
->getType()) && "Value is not SCEVable!");
4569 if (const SCEV
*S
= getExistingSCEV(V
))
4571 return createSCEVIter(V
);
4574 const SCEV
*ScalarEvolution::getExistingSCEV(Value
*V
) {
4575 assert(isSCEVable(V
->getType()) && "Value is not SCEVable!");
4577 ValueExprMapType::iterator I
= ValueExprMap
.find_as(V
);
4578 if (I
!= ValueExprMap
.end()) {
4579 const SCEV
*S
= I
->second
;
4580 assert(checkValidity(S
) &&
4581 "existing SCEV has not been properly invalidated");
4587 /// Return a SCEV corresponding to -V = -1*V
4588 const SCEV
*ScalarEvolution::getNegativeSCEV(const SCEV
*V
,
4589 SCEV::NoWrapFlags Flags
) {
4590 if (const SCEVConstant
*VC
= dyn_cast
<SCEVConstant
>(V
))
4592 cast
<ConstantInt
>(ConstantExpr::getNeg(VC
->getValue())));
4594 Type
*Ty
= V
->getType();
4595 Ty
= getEffectiveSCEVType(Ty
);
4596 return getMulExpr(V
, getMinusOne(Ty
), Flags
);
4599 /// If Expr computes ~A, return A else return nullptr
4600 static const SCEV
*MatchNotExpr(const SCEV
*Expr
) {
4601 const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Expr
);
4602 if (!Add
|| Add
->getNumOperands() != 2 ||
4603 !Add
->getOperand(0)->isAllOnesValue())
4606 const SCEVMulExpr
*AddRHS
= dyn_cast
<SCEVMulExpr
>(Add
->getOperand(1));
4607 if (!AddRHS
|| AddRHS
->getNumOperands() != 2 ||
4608 !AddRHS
->getOperand(0)->isAllOnesValue())
4611 return AddRHS
->getOperand(1);
4614 /// Return a SCEV corresponding to ~V = -1-V
4615 const SCEV
*ScalarEvolution::getNotSCEV(const SCEV
*V
) {
4616 assert(!V
->getType()->isPointerTy() && "Can't negate pointer");
4618 if (const SCEVConstant
*VC
= dyn_cast
<SCEVConstant
>(V
))
4620 cast
<ConstantInt
>(ConstantExpr::getNot(VC
->getValue())));
4622 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
4623 if (const SCEVMinMaxExpr
*MME
= dyn_cast
<SCEVMinMaxExpr
>(V
)) {
4624 auto MatchMinMaxNegation
= [&](const SCEVMinMaxExpr
*MME
) {
4625 SmallVector
<const SCEV
*, 2> MatchedOperands
;
4626 for (const SCEV
*Operand
: MME
->operands()) {
4627 const SCEV
*Matched
= MatchNotExpr(Operand
);
4629 return (const SCEV
*)nullptr;
4630 MatchedOperands
.push_back(Matched
);
4632 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME
->getSCEVType()),
4635 if (const SCEV
*Replaced
= MatchMinMaxNegation(MME
))
4639 Type
*Ty
= V
->getType();
4640 Ty
= getEffectiveSCEVType(Ty
);
4641 return getMinusSCEV(getMinusOne(Ty
), V
);
4644 const SCEV
*ScalarEvolution::removePointerBase(const SCEV
*P
) {
4645 assert(P
->getType()->isPointerTy());
4647 if (auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(P
)) {
4648 // The base of an AddRec is the first operand.
4649 SmallVector
<const SCEV
*> Ops
{AddRec
->operands()};
4650 Ops
[0] = removePointerBase(Ops
[0]);
4651 // Don't try to transfer nowrap flags for now. We could in some cases
4652 // (for example, if pointer operand of the AddRec is a SCEVUnknown).
4653 return getAddRecExpr(Ops
, AddRec
->getLoop(), SCEV::FlagAnyWrap
);
4655 if (auto *Add
= dyn_cast
<SCEVAddExpr
>(P
)) {
4656 // The base of an Add is the pointer operand.
4657 SmallVector
<const SCEV
*> Ops
{Add
->operands()};
4658 const SCEV
**PtrOp
= nullptr;
4659 for (const SCEV
*&AddOp
: Ops
) {
4660 if (AddOp
->getType()->isPointerTy()) {
4661 assert(!PtrOp
&& "Cannot have multiple pointer ops");
4665 *PtrOp
= removePointerBase(*PtrOp
);
4666 // Don't try to transfer nowrap flags for now. We could in some cases
4667 // (for example, if the pointer operand of the Add is a SCEVUnknown).
4668 return getAddExpr(Ops
);
4670 // Any other expression must be a pointer base.
4671 return getZero(P
->getType());
4674 const SCEV
*ScalarEvolution::getMinusSCEV(const SCEV
*LHS
, const SCEV
*RHS
,
4675 SCEV::NoWrapFlags Flags
,
4677 // Fast path: X - X --> 0.
4679 return getZero(LHS
->getType());
4681 // If we subtract two pointers with different pointer bases, bail.
4682 // Eventually, we're going to add an assertion to getMulExpr that we
4683 // can't multiply by a pointer.
4684 if (RHS
->getType()->isPointerTy()) {
4685 if (!LHS
->getType()->isPointerTy() ||
4686 getPointerBase(LHS
) != getPointerBase(RHS
))
4687 return getCouldNotCompute();
4688 LHS
= removePointerBase(LHS
);
4689 RHS
= removePointerBase(RHS
);
4692 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
4693 // makes it so that we cannot make much use of NUW.
4694 auto AddFlags
= SCEV::FlagAnyWrap
;
4695 const bool RHSIsNotMinSigned
=
4696 !getSignedRangeMin(RHS
).isMinSignedValue();
4697 if (hasFlags(Flags
, SCEV::FlagNSW
)) {
4698 // Let M be the minimum representable signed value. Then (-1)*RHS
4699 // signed-wraps if and only if RHS is M. That can happen even for
4700 // a NSW subtraction because e.g. (-1)*M signed-wraps even though
4701 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS +
4702 // (-1)*RHS, we need to prove that RHS != M.
4704 // If LHS is non-negative and we know that LHS - RHS does not
4705 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap
4706 // either by proving that RHS > M or that LHS >= 0.
4707 if (RHSIsNotMinSigned
|| isKnownNonNegative(LHS
)) {
4708 AddFlags
= SCEV::FlagNSW
;
4712 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS -
4713 // RHS is NSW and LHS >= 0.
4715 // The difficulty here is that the NSW flag may have been proven
4716 // relative to a loop that is to be found in a recurrence in LHS and
4717 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a
4718 // larger scope than intended.
4719 auto NegFlags
= RHSIsNotMinSigned
? SCEV::FlagNSW
: SCEV::FlagAnyWrap
;
4721 return getAddExpr(LHS
, getNegativeSCEV(RHS
, NegFlags
), AddFlags
, Depth
);
4724 const SCEV
*ScalarEvolution::getTruncateOrZeroExtend(const SCEV
*V
, Type
*Ty
,
4726 Type
*SrcTy
= V
->getType();
4727 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4728 "Cannot truncate or zero extend with non-integer arguments!");
4729 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4730 return V
; // No conversion
4731 if (getTypeSizeInBits(SrcTy
) > getTypeSizeInBits(Ty
))
4732 return getTruncateExpr(V
, Ty
, Depth
);
4733 return getZeroExtendExpr(V
, Ty
, Depth
);
4736 const SCEV
*ScalarEvolution::getTruncateOrSignExtend(const SCEV
*V
, Type
*Ty
,
4738 Type
*SrcTy
= V
->getType();
4739 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4740 "Cannot truncate or zero extend with non-integer arguments!");
4741 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4742 return V
; // No conversion
4743 if (getTypeSizeInBits(SrcTy
) > getTypeSizeInBits(Ty
))
4744 return getTruncateExpr(V
, Ty
, Depth
);
4745 return getSignExtendExpr(V
, Ty
, Depth
);
4749 ScalarEvolution::getNoopOrZeroExtend(const SCEV
*V
, Type
*Ty
) {
4750 Type
*SrcTy
= V
->getType();
4751 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4752 "Cannot noop or zero extend with non-integer arguments!");
4753 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
4754 "getNoopOrZeroExtend cannot truncate!");
4755 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4756 return V
; // No conversion
4757 return getZeroExtendExpr(V
, Ty
);
4761 ScalarEvolution::getNoopOrSignExtend(const SCEV
*V
, Type
*Ty
) {
4762 Type
*SrcTy
= V
->getType();
4763 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4764 "Cannot noop or sign extend with non-integer arguments!");
4765 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
4766 "getNoopOrSignExtend cannot truncate!");
4767 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4768 return V
; // No conversion
4769 return getSignExtendExpr(V
, Ty
);
4773 ScalarEvolution::getNoopOrAnyExtend(const SCEV
*V
, Type
*Ty
) {
4774 Type
*SrcTy
= V
->getType();
4775 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4776 "Cannot noop or any extend with non-integer arguments!");
4777 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
4778 "getNoopOrAnyExtend cannot truncate!");
4779 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4780 return V
; // No conversion
4781 return getAnyExtendExpr(V
, Ty
);
4785 ScalarEvolution::getTruncateOrNoop(const SCEV
*V
, Type
*Ty
) {
4786 Type
*SrcTy
= V
->getType();
4787 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4788 "Cannot truncate or noop with non-integer arguments!");
4789 assert(getTypeSizeInBits(SrcTy
) >= getTypeSizeInBits(Ty
) &&
4790 "getTruncateOrNoop cannot extend!");
4791 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4792 return V
; // No conversion
4793 return getTruncateExpr(V
, Ty
);
4796 const SCEV
*ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV
*LHS
,
4798 const SCEV
*PromotedLHS
= LHS
;
4799 const SCEV
*PromotedRHS
= RHS
;
4801 if (getTypeSizeInBits(LHS
->getType()) > getTypeSizeInBits(RHS
->getType()))
4802 PromotedRHS
= getZeroExtendExpr(RHS
, LHS
->getType());
4804 PromotedLHS
= getNoopOrZeroExtend(LHS
, RHS
->getType());
4806 return getUMaxExpr(PromotedLHS
, PromotedRHS
);
4809 const SCEV
*ScalarEvolution::getUMinFromMismatchedTypes(const SCEV
*LHS
,
4812 SmallVector
<const SCEV
*, 2> Ops
= { LHS
, RHS
};
4813 return getUMinFromMismatchedTypes(Ops
, Sequential
);
4817 ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl
<const SCEV
*> &Ops
,
4819 assert(!Ops
.empty() && "At least one operand must be!");
4821 if (Ops
.size() == 1)
4824 // Find the max type first.
4825 Type
*MaxType
= nullptr;
4826 for (const auto *S
: Ops
)
4828 MaxType
= getWiderType(MaxType
, S
->getType());
4830 MaxType
= S
->getType();
4831 assert(MaxType
&& "Failed to find maximum type!");
4833 // Extend all ops to max type.
4834 SmallVector
<const SCEV
*, 2> PromotedOps
;
4835 for (const auto *S
: Ops
)
4836 PromotedOps
.push_back(getNoopOrZeroExtend(S
, MaxType
));
4839 return getUMinExpr(PromotedOps
, Sequential
);
4842 const SCEV
*ScalarEvolution::getPointerBase(const SCEV
*V
) {
4843 // A pointer operand may evaluate to a nonpointer expression, such as null.
4844 if (!V
->getType()->isPointerTy())
4848 if (auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(V
)) {
4849 V
= AddRec
->getStart();
4850 } else if (auto *Add
= dyn_cast
<SCEVAddExpr
>(V
)) {
4851 const SCEV
*PtrOp
= nullptr;
4852 for (const SCEV
*AddOp
: Add
->operands()) {
4853 if (AddOp
->getType()->isPointerTy()) {
4854 assert(!PtrOp
&& "Cannot have multiple pointer ops");
4858 assert(PtrOp
&& "Must have pointer op");
4860 } else // Not something we can look further into.
4865 /// Push users of the given Instruction onto the given Worklist.
4866 static void PushDefUseChildren(Instruction
*I
,
4867 SmallVectorImpl
<Instruction
*> &Worklist
,
4868 SmallPtrSetImpl
<Instruction
*> &Visited
) {
4869 // Push the def-use children onto the Worklist stack.
4870 for (User
*U
: I
->users()) {
4871 auto *UserInsn
= cast
<Instruction
>(U
);
4872 if (Visited
.insert(UserInsn
).second
)
4873 Worklist
.push_back(UserInsn
);
4879 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start
4880 /// expression in case its Loop is L. If it is not L then
4881 /// if IgnoreOtherLoops is true then use AddRec itself
4882 /// otherwise rewrite cannot be done.
4883 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4884 class SCEVInitRewriter
: public SCEVRewriteVisitor
<SCEVInitRewriter
> {
4886 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
, ScalarEvolution
&SE
,
4887 bool IgnoreOtherLoops
= true) {
4888 SCEVInitRewriter
Rewriter(L
, SE
);
4889 const SCEV
*Result
= Rewriter
.visit(S
);
4890 if (Rewriter
.hasSeenLoopVariantSCEVUnknown())
4891 return SE
.getCouldNotCompute();
4892 return Rewriter
.hasSeenOtherLoops() && !IgnoreOtherLoops
4893 ? SE
.getCouldNotCompute()
4897 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4898 if (!SE
.isLoopInvariant(Expr
, L
))
4899 SeenLoopVariantSCEVUnknown
= true;
4903 const SCEV
*visitAddRecExpr(const SCEVAddRecExpr
*Expr
) {
4904 // Only re-write AddRecExprs for this loop.
4905 if (Expr
->getLoop() == L
)
4906 return Expr
->getStart();
4907 SeenOtherLoops
= true;
4911 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown
; }
4913 bool hasSeenOtherLoops() { return SeenOtherLoops
; }
4916 explicit SCEVInitRewriter(const Loop
*L
, ScalarEvolution
&SE
)
4917 : SCEVRewriteVisitor(SE
), L(L
) {}
4920 bool SeenLoopVariantSCEVUnknown
= false;
4921 bool SeenOtherLoops
= false;
4924 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post
4925 /// increment expression in case its Loop is L. If it is not L then
4926 /// use AddRec itself.
4927 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4928 class SCEVPostIncRewriter
: public SCEVRewriteVisitor
<SCEVPostIncRewriter
> {
4930 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
, ScalarEvolution
&SE
) {
4931 SCEVPostIncRewriter
Rewriter(L
, SE
);
4932 const SCEV
*Result
= Rewriter
.visit(S
);
4933 return Rewriter
.hasSeenLoopVariantSCEVUnknown()
4934 ? SE
.getCouldNotCompute()
4938 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4939 if (!SE
.isLoopInvariant(Expr
, L
))
4940 SeenLoopVariantSCEVUnknown
= true;
4944 const SCEV
*visitAddRecExpr(const SCEVAddRecExpr
*Expr
) {
4945 // Only re-write AddRecExprs for this loop.
4946 if (Expr
->getLoop() == L
)
4947 return Expr
->getPostIncExpr(SE
);
4948 SeenOtherLoops
= true;
4952 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown
; }
4954 bool hasSeenOtherLoops() { return SeenOtherLoops
; }
4957 explicit SCEVPostIncRewriter(const Loop
*L
, ScalarEvolution
&SE
)
4958 : SCEVRewriteVisitor(SE
), L(L
) {}
4961 bool SeenLoopVariantSCEVUnknown
= false;
4962 bool SeenOtherLoops
= false;
4965 /// This class evaluates the compare condition by matching it against the
4966 /// condition of loop latch. If there is a match we assume a true value
4967 /// for the condition while building SCEV nodes.
4968 class SCEVBackedgeConditionFolder
4969 : public SCEVRewriteVisitor
<SCEVBackedgeConditionFolder
> {
4971 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
,
4972 ScalarEvolution
&SE
) {
4973 bool IsPosBECond
= false;
4974 Value
*BECond
= nullptr;
4975 if (BasicBlock
*Latch
= L
->getLoopLatch()) {
4976 BranchInst
*BI
= dyn_cast
<BranchInst
>(Latch
->getTerminator());
4977 if (BI
&& BI
->isConditional()) {
4978 assert(BI
->getSuccessor(0) != BI
->getSuccessor(1) &&
4979 "Both outgoing branches should not target same header!");
4980 BECond
= BI
->getCondition();
4981 IsPosBECond
= BI
->getSuccessor(0) == L
->getHeader();
4986 SCEVBackedgeConditionFolder
Rewriter(L
, BECond
, IsPosBECond
, SE
);
4987 return Rewriter
.visit(S
);
4990 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4991 const SCEV
*Result
= Expr
;
4992 bool InvariantF
= SE
.isLoopInvariant(Expr
, L
);
4995 Instruction
*I
= cast
<Instruction
>(Expr
->getValue());
4996 switch (I
->getOpcode()) {
4997 case Instruction::Select
: {
4998 SelectInst
*SI
= cast
<SelectInst
>(I
);
4999 std::optional
<const SCEV
*> Res
=
5000 compareWithBackedgeCondition(SI
->getCondition());
5002 bool IsOne
= cast
<SCEVConstant
>(*Res
)->getValue()->isOne();
5003 Result
= SE
.getSCEV(IsOne
? SI
->getTrueValue() : SI
->getFalseValue());
5008 std::optional
<const SCEV
*> Res
= compareWithBackedgeCondition(I
);
5019 explicit SCEVBackedgeConditionFolder(const Loop
*L
, Value
*BECond
,
5020 bool IsPosBECond
, ScalarEvolution
&SE
)
5021 : SCEVRewriteVisitor(SE
), L(L
), BackedgeCond(BECond
),
5022 IsPositiveBECond(IsPosBECond
) {}
5024 std::optional
<const SCEV
*> compareWithBackedgeCondition(Value
*IC
);
5027 /// Loop back condition.
5028 Value
*BackedgeCond
= nullptr;
5029 /// Set to true if loop back is on positive branch condition.
5030 bool IsPositiveBECond
;
5033 std::optional
<const SCEV
*>
5034 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value
*IC
) {
5036 // If value matches the backedge condition for loop latch,
5037 // then return a constant evolution node based on loopback
5039 if (BackedgeCond
== IC
)
5040 return IsPositiveBECond
? SE
.getOne(Type::getInt1Ty(SE
.getContext()))
5041 : SE
.getZero(Type::getInt1Ty(SE
.getContext()));
5042 return std::nullopt
;
5045 class SCEVShiftRewriter
: public SCEVRewriteVisitor
<SCEVShiftRewriter
> {
5047 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
,
5048 ScalarEvolution
&SE
) {
5049 SCEVShiftRewriter
Rewriter(L
, SE
);
5050 const SCEV
*Result
= Rewriter
.visit(S
);
5051 return Rewriter
.isValid() ? Result
: SE
.getCouldNotCompute();
5054 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
5055 // Only allow AddRecExprs for this loop.
5056 if (!SE
.isLoopInvariant(Expr
, L
))
5061 const SCEV
*visitAddRecExpr(const SCEVAddRecExpr
*Expr
) {
5062 if (Expr
->getLoop() == L
&& Expr
->isAffine())
5063 return SE
.getMinusSCEV(Expr
, Expr
->getStepRecurrence(SE
));
5068 bool isValid() { return Valid
; }
5071 explicit SCEVShiftRewriter(const Loop
*L
, ScalarEvolution
&SE
)
5072 : SCEVRewriteVisitor(SE
), L(L
) {}
5078 } // end anonymous namespace
5081 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr
*AR
) {
5082 if (!AR
->isAffine())
5083 return SCEV::FlagAnyWrap
;
5085 using OBO
= OverflowingBinaryOperator
;
5087 SCEV::NoWrapFlags Result
= SCEV::FlagAnyWrap
;
5089 if (!AR
->hasNoSelfWrap()) {
5090 const SCEV
*BECount
= getConstantMaxBackedgeTakenCount(AR
->getLoop());
5091 if (const SCEVConstant
*BECountMax
= dyn_cast
<SCEVConstant
>(BECount
)) {
5092 ConstantRange StepCR
= getSignedRange(AR
->getStepRecurrence(*this));
5093 const APInt
&BECountAP
= BECountMax
->getAPInt();
5094 unsigned NoOverflowBitWidth
=
5095 BECountAP
.getActiveBits() + StepCR
.getMinSignedBits();
5096 if (NoOverflowBitWidth
<= getTypeSizeInBits(AR
->getType()))
5097 Result
= ScalarEvolution::setFlags(Result
, SCEV::FlagNW
);
5101 if (!AR
->hasNoSignedWrap()) {
5102 ConstantRange AddRecRange
= getSignedRange(AR
);
5103 ConstantRange IncRange
= getSignedRange(AR
->getStepRecurrence(*this));
5105 auto NSWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
5106 Instruction::Add
, IncRange
, OBO::NoSignedWrap
);
5107 if (NSWRegion
.contains(AddRecRange
))
5108 Result
= ScalarEvolution::setFlags(Result
, SCEV::FlagNSW
);
5111 if (!AR
->hasNoUnsignedWrap()) {
5112 ConstantRange AddRecRange
= getUnsignedRange(AR
);
5113 ConstantRange IncRange
= getUnsignedRange(AR
->getStepRecurrence(*this));
5115 auto NUWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
5116 Instruction::Add
, IncRange
, OBO::NoUnsignedWrap
);
5117 if (NUWRegion
.contains(AddRecRange
))
5118 Result
= ScalarEvolution::setFlags(Result
, SCEV::FlagNUW
);
5125 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr
*AR
) {
5126 SCEV::NoWrapFlags Result
= AR
->getNoWrapFlags();
5128 if (AR
->hasNoSignedWrap())
5131 if (!AR
->isAffine())
5134 // This function can be expensive, only try to prove NSW once per AddRec.
5135 if (!SignedWrapViaInductionTried
.insert(AR
).second
)
5138 const SCEV
*Step
= AR
->getStepRecurrence(*this);
5139 const Loop
*L
= AR
->getLoop();
5141 // Check whether the backedge-taken count is SCEVCouldNotCompute.
5142 // Note that this serves two purposes: It filters out loops that are
5143 // simply not analyzable, and it covers the case where this code is
5144 // being called from within backedge-taken count analysis, such that
5145 // attempting to ask for the backedge-taken count would likely result
5146 // in infinite recursion. In the later case, the analysis code will
5147 // cope with a conservative value, and it will take care to purge
5148 // that value once it has finished.
5149 const SCEV
*MaxBECount
= getConstantMaxBackedgeTakenCount(L
);
5151 // Normally, in the cases we can prove no-overflow via a
5152 // backedge guarding condition, we can also compute a backedge
5153 // taken count for the loop. The exceptions are assumptions and
5154 // guards present in the loop -- SCEV is not great at exploiting
5155 // these to compute max backedge taken counts, but can still use
5156 // these to prove lack of overflow. Use this fact to avoid
5157 // doing extra work that may not pay off.
5159 if (isa
<SCEVCouldNotCompute
>(MaxBECount
) && !HasGuards
&&
5160 AC
.assumptions().empty())
5163 // If the backedge is guarded by a comparison with the pre-inc value the
5164 // addrec is safe. Also, if the entry is guarded by a comparison with the
5165 // start value and the backedge is guarded by a comparison with the post-inc
5166 // value, the addrec is safe.
5167 ICmpInst::Predicate Pred
;
5168 const SCEV
*OverflowLimit
=
5169 getSignedOverflowLimitForStep(Step
, &Pred
, this);
5170 if (OverflowLimit
&&
5171 (isLoopBackedgeGuardedByCond(L
, Pred
, AR
, OverflowLimit
) ||
5172 isKnownOnEveryIteration(Pred
, AR
, OverflowLimit
))) {
5173 Result
= setFlags(Result
, SCEV::FlagNSW
);
5178 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr
*AR
) {
5179 SCEV::NoWrapFlags Result
= AR
->getNoWrapFlags();
5181 if (AR
->hasNoUnsignedWrap())
5184 if (!AR
->isAffine())
5187 // This function can be expensive, only try to prove NUW once per AddRec.
5188 if (!UnsignedWrapViaInductionTried
.insert(AR
).second
)
5191 const SCEV
*Step
= AR
->getStepRecurrence(*this);
5192 unsigned BitWidth
= getTypeSizeInBits(AR
->getType());
5193 const Loop
*L
= AR
->getLoop();
5195 // Check whether the backedge-taken count is SCEVCouldNotCompute.
5196 // Note that this serves two purposes: It filters out loops that are
5197 // simply not analyzable, and it covers the case where this code is
5198 // being called from within backedge-taken count analysis, such that
5199 // attempting to ask for the backedge-taken count would likely result
5200 // in infinite recursion. In the later case, the analysis code will
5201 // cope with a conservative value, and it will take care to purge
5202 // that value once it has finished.
5203 const SCEV
*MaxBECount
= getConstantMaxBackedgeTakenCount(L
);
5205 // Normally, in the cases we can prove no-overflow via a
5206 // backedge guarding condition, we can also compute a backedge
5207 // taken count for the loop. The exceptions are assumptions and
5208 // guards present in the loop -- SCEV is not great at exploiting
5209 // these to compute max backedge taken counts, but can still use
5210 // these to prove lack of overflow. Use this fact to avoid
5211 // doing extra work that may not pay off.
5213 if (isa
<SCEVCouldNotCompute
>(MaxBECount
) && !HasGuards
&&
5214 AC
.assumptions().empty())
5217 // If the backedge is guarded by a comparison with the pre-inc value the
5218 // addrec is safe. Also, if the entry is guarded by a comparison with the
5219 // start value and the backedge is guarded by a comparison with the post-inc
5220 // value, the addrec is safe.
5221 if (isKnownPositive(Step
)) {
5222 const SCEV
*N
= getConstant(APInt::getMinValue(BitWidth
) -
5223 getUnsignedRangeMax(Step
));
5224 if (isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_ULT
, AR
, N
) ||
5225 isKnownOnEveryIteration(ICmpInst::ICMP_ULT
, AR
, N
)) {
5226 Result
= setFlags(Result
, SCEV::FlagNUW
);
5235 /// Represents an abstract binary operation. This may exist as a
5236 /// normal instruction or constant expression, or may have been
5237 /// derived from an expression tree.
5245 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
5246 /// constant expression.
5247 Operator
*Op
= nullptr;
5249 explicit BinaryOp(Operator
*Op
)
5250 : Opcode(Op
->getOpcode()), LHS(Op
->getOperand(0)), RHS(Op
->getOperand(1)),
5252 if (auto *OBO
= dyn_cast
<OverflowingBinaryOperator
>(Op
)) {
5253 IsNSW
= OBO
->hasNoSignedWrap();
5254 IsNUW
= OBO
->hasNoUnsignedWrap();
5258 explicit BinaryOp(unsigned Opcode
, Value
*LHS
, Value
*RHS
, bool IsNSW
= false,
5260 : Opcode(Opcode
), LHS(LHS
), RHS(RHS
), IsNSW(IsNSW
), IsNUW(IsNUW
) {}
5263 } // end anonymous namespace
5265 /// Try to map \p V into a BinaryOp, and return \c std::nullopt on failure.
5266 static std::optional
<BinaryOp
> MatchBinaryOp(Value
*V
, const DataLayout
&DL
,
5267 AssumptionCache
&AC
,
5268 const DominatorTree
&DT
,
5269 const Instruction
*CxtI
) {
5270 auto *Op
= dyn_cast
<Operator
>(V
);
5272 return std::nullopt
;
5274 // Implementation detail: all the cleverness here should happen without
5275 // creating new SCEV expressions -- our caller knowns tricks to avoid creating
5276 // SCEV expressions when possible, and we should not break that.
5278 switch (Op
->getOpcode()) {
5279 case Instruction::Add
:
5280 case Instruction::Sub
:
5281 case Instruction::Mul
:
5282 case Instruction::UDiv
:
5283 case Instruction::URem
:
5284 case Instruction::And
:
5285 case Instruction::AShr
:
5286 case Instruction::Shl
:
5287 return BinaryOp(Op
);
5289 case Instruction::Or
: {
5290 // Convert or disjoint into add nuw nsw.
5291 if (cast
<PossiblyDisjointInst
>(Op
)->isDisjoint())
5292 return BinaryOp(Instruction::Add
, Op
->getOperand(0), Op
->getOperand(1),
5293 /*IsNSW=*/true, /*IsNUW=*/true);
5294 return BinaryOp(Op
);
5297 case Instruction::Xor
:
5298 if (auto *RHSC
= dyn_cast
<ConstantInt
>(Op
->getOperand(1)))
5299 // If the RHS of the xor is a signmask, then this is just an add.
5300 // Instcombine turns add of signmask into xor as a strength reduction step.
5301 if (RHSC
->getValue().isSignMask())
5302 return BinaryOp(Instruction::Add
, Op
->getOperand(0), Op
->getOperand(1));
5303 // Binary `xor` is a bit-wise `add`.
5304 if (V
->getType()->isIntegerTy(1))
5305 return BinaryOp(Instruction::Add
, Op
->getOperand(0), Op
->getOperand(1));
5306 return BinaryOp(Op
);
5308 case Instruction::LShr
:
5309 // Turn logical shift right of a constant into a unsigned divide.
5310 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(Op
->getOperand(1))) {
5311 uint32_t BitWidth
= cast
<IntegerType
>(Op
->getType())->getBitWidth();
5313 // If the shift count is not less than the bitwidth, the result of
5314 // the shift is undefined. Don't try to analyze it, because the
5315 // resolution chosen here may differ from the resolution chosen in
5316 // other parts of the compiler.
5317 if (SA
->getValue().ult(BitWidth
)) {
5319 ConstantInt::get(SA
->getContext(),
5320 APInt::getOneBitSet(BitWidth
, SA
->getZExtValue()));
5321 return BinaryOp(Instruction::UDiv
, Op
->getOperand(0), X
);
5324 return BinaryOp(Op
);
5326 case Instruction::ExtractValue
: {
5327 auto *EVI
= cast
<ExtractValueInst
>(Op
);
5328 if (EVI
->getNumIndices() != 1 || EVI
->getIndices()[0] != 0)
5331 auto *WO
= dyn_cast
<WithOverflowInst
>(EVI
->getAggregateOperand());
5335 Instruction::BinaryOps BinOp
= WO
->getBinaryOp();
5336 bool Signed
= WO
->isSigned();
5337 // TODO: Should add nuw/nsw flags for mul as well.
5338 if (BinOp
== Instruction::Mul
|| !isOverflowIntrinsicNoWrap(WO
, DT
))
5339 return BinaryOp(BinOp
, WO
->getLHS(), WO
->getRHS());
5341 // Now that we know that all uses of the arithmetic-result component of
5342 // CI are guarded by the overflow check, we can go ahead and pretend
5343 // that the arithmetic is non-overflowing.
5344 return BinaryOp(BinOp
, WO
->getLHS(), WO
->getRHS(),
5345 /* IsNSW = */ Signed
, /* IsNUW = */ !Signed
);
5352 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same
5353 // semantics as a Sub, return a binary sub expression.
5354 if (auto *II
= dyn_cast
<IntrinsicInst
>(V
))
5355 if (II
->getIntrinsicID() == Intrinsic::loop_decrement_reg
)
5356 return BinaryOp(Instruction::Sub
, II
->getOperand(0), II
->getOperand(1));
5358 return std::nullopt
;
5361 /// Helper function to createAddRecFromPHIWithCasts. We have a phi
5362 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
5363 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
5364 /// way. This function checks if \p Op, an operand of this SCEVAddExpr,
5365 /// follows one of the following patterns:
5366 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
5367 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
5368 /// If the SCEV expression of \p Op conforms with one of the expected patterns
5369 /// we return the type of the truncation operation, and indicate whether the
5370 /// truncated type should be treated as signed/unsigned by setting
5371 /// \p Signed to true/false, respectively.
5372 static Type
*isSimpleCastedPHI(const SCEV
*Op
, const SCEVUnknown
*SymbolicPHI
,
5373 bool &Signed
, ScalarEvolution
&SE
) {
5374 // The case where Op == SymbolicPHI (that is, with no type conversions on
5375 // the way) is handled by the regular add recurrence creating logic and
5376 // would have already been triggered in createAddRecForPHI. Reaching it here
5377 // means that createAddRecFromPHI had failed for this PHI before (e.g.,
5378 // because one of the other operands of the SCEVAddExpr updating this PHI is
5381 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
5382 // this case predicates that allow us to prove that Op == SymbolicPHI will
5384 if (Op
== SymbolicPHI
)
5387 unsigned SourceBits
= SE
.getTypeSizeInBits(SymbolicPHI
->getType());
5388 unsigned NewBits
= SE
.getTypeSizeInBits(Op
->getType());
5389 if (SourceBits
!= NewBits
)
5392 const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(Op
);
5393 const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(Op
);
5396 const SCEVTruncateExpr
*Trunc
=
5397 SExt
? dyn_cast
<SCEVTruncateExpr
>(SExt
->getOperand())
5398 : dyn_cast
<SCEVTruncateExpr
>(ZExt
->getOperand());
5401 const SCEV
*X
= Trunc
->getOperand();
5402 if (X
!= SymbolicPHI
)
5404 Signed
= SExt
!= nullptr;
5405 return Trunc
->getType();
5408 static const Loop
*isIntegerLoopHeaderPHI(const PHINode
*PN
, LoopInfo
&LI
) {
5409 if (!PN
->getType()->isIntegerTy())
5411 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
5412 if (!L
|| L
->getHeader() != PN
->getParent())
5417 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
5418 // computation that updates the phi follows the following pattern:
5419 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
5420 // which correspond to a phi->trunc->sext/zext->add->phi update chain.
5421 // If so, try to see if it can be rewritten as an AddRecExpr under some
5422 // Predicates. If successful, return them as a pair. Also cache the results
5425 // Example usage scenario:
5426 // Say the Rewriter is called for the following SCEV:
5427 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
5429 // %X = phi i64 (%Start, %BEValue)
5430 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
5431 // and call this function with %SymbolicPHI = %X.
5433 // The analysis will find that the value coming around the backedge has
5434 // the following SCEV:
5435 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
5436 // Upon concluding that this matches the desired pattern, the function
5437 // will return the pair {NewAddRec, SmallPredsVec} where:
5438 // NewAddRec = {%Start,+,%Step}
5439 // SmallPredsVec = {P1, P2, P3} as follows:
5440 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
5441 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
5442 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
5443 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec
5444 // under the predicates {P1,P2,P3}.
5445 // This predicated rewrite will be cached in PredicatedSCEVRewrites:
5446 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
5450 // 1) Extend the Induction descriptor to also support inductions that involve
5451 // casts: When needed (namely, when we are called in the context of the
5452 // vectorizer induction analysis), a Set of cast instructions will be
5453 // populated by this method, and provided back to isInductionPHI. This is
5454 // needed to allow the vectorizer to properly record them to be ignored by
5455 // the cost model and to avoid vectorizing them (otherwise these casts,
5456 // which are redundant under the runtime overflow checks, will be
5457 // vectorized, which can be costly).
5459 // 2) Support additional induction/PHISCEV patterns: We also want to support
5460 // inductions where the sext-trunc / zext-trunc operations (partly) occur
5461 // after the induction update operation (the induction increment):
5463 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
5464 // which correspond to a phi->add->trunc->sext/zext->phi update chain.
5466 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
5467 // which correspond to a phi->trunc->add->sext/zext->phi update chain.
5469 // 3) Outline common code with createAddRecFromPHI to avoid duplication.
5470 std::optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
5471 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown
*SymbolicPHI
) {
5472 SmallVector
<const SCEVPredicate
*, 3> Predicates
;
5474 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can
5475 // return an AddRec expression under some predicate.
5477 auto *PN
= cast
<PHINode
>(SymbolicPHI
->getValue());
5478 const Loop
*L
= isIntegerLoopHeaderPHI(PN
, LI
);
5479 assert(L
&& "Expecting an integer loop header phi");
5481 // The loop may have multiple entrances or multiple exits; we can analyze
5482 // this phi as an addrec if it has a unique entry value and a unique
5484 Value
*BEValueV
= nullptr, *StartValueV
= nullptr;
5485 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
5486 Value
*V
= PN
->getIncomingValue(i
);
5487 if (L
->contains(PN
->getIncomingBlock(i
))) {
5490 } else if (BEValueV
!= V
) {
5494 } else if (!StartValueV
) {
5496 } else if (StartValueV
!= V
) {
5497 StartValueV
= nullptr;
5501 if (!BEValueV
|| !StartValueV
)
5502 return std::nullopt
;
5504 const SCEV
*BEValue
= getSCEV(BEValueV
);
5506 // If the value coming around the backedge is an add with the symbolic
5507 // value we just inserted, possibly with casts that we can ignore under
5508 // an appropriate runtime guard, then we found a simple induction variable!
5509 const auto *Add
= dyn_cast
<SCEVAddExpr
>(BEValue
);
5511 return std::nullopt
;
5513 // If there is a single occurrence of the symbolic value, possibly
5514 // casted, replace it with a recurrence.
5515 unsigned FoundIndex
= Add
->getNumOperands();
5516 Type
*TruncTy
= nullptr;
5518 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
5520 isSimpleCastedPHI(Add
->getOperand(i
), SymbolicPHI
, Signed
, *this)))
5521 if (FoundIndex
== e
) {
5526 if (FoundIndex
== Add
->getNumOperands())
5527 return std::nullopt
;
5529 // Create an add with everything but the specified operand.
5530 SmallVector
<const SCEV
*, 8> Ops
;
5531 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
5532 if (i
!= FoundIndex
)
5533 Ops
.push_back(Add
->getOperand(i
));
5534 const SCEV
*Accum
= getAddExpr(Ops
);
5536 // The runtime checks will not be valid if the step amount is
5537 // varying inside the loop.
5538 if (!isLoopInvariant(Accum
, L
))
5539 return std::nullopt
;
5541 // *** Part2: Create the predicates
5543 // Analysis was successful: we have a phi-with-cast pattern for which we
5544 // can return an AddRec expression under the following predicates:
5546 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
5547 // fits within the truncated type (does not overflow) for i = 0 to n-1.
5548 // P2: An Equal predicate that guarantees that
5549 // Start = (Ext ix (Trunc iy (Start) to ix) to iy)
5550 // P3: An Equal predicate that guarantees that
5551 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
5553 // As we next prove, the above predicates guarantee that:
5554 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
5557 // More formally, we want to prove that:
5558 // Expr(i+1) = Start + (i+1) * Accum
5559 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
5562 // 1) Expr(0) = Start
5563 // 2) Expr(1) = Start + Accum
5564 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
5565 // 3) Induction hypothesis (step i):
5566 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
5570 // = Start + (i+1)*Accum
5571 // = (Start + i*Accum) + Accum
5572 // = Expr(i) + Accum
5573 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
5576 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
5578 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
5579 // + (Ext ix (Trunc iy (Accum) to ix) to iy)
5580 // + Accum :: from P3
5582 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
5583 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
5585 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
5586 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
5588 // By induction, the same applies to all iterations 1<=i<n:
5591 // Create a truncated addrec for which we will add a no overflow check (P1).
5592 const SCEV
*StartVal
= getSCEV(StartValueV
);
5593 const SCEV
*PHISCEV
=
5594 getAddRecExpr(getTruncateExpr(StartVal
, TruncTy
),
5595 getTruncateExpr(Accum
, TruncTy
), L
, SCEV::FlagAnyWrap
);
5597 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr.
5598 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV
5599 // will be constant.
5601 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't
5603 if (const auto *AR
= dyn_cast
<SCEVAddRecExpr
>(PHISCEV
)) {
5604 SCEVWrapPredicate::IncrementWrapFlags AddedFlags
=
5605 Signed
? SCEVWrapPredicate::IncrementNSSW
5606 : SCEVWrapPredicate::IncrementNUSW
;
5607 const SCEVPredicate
*AddRecPred
= getWrapPredicate(AR
, AddedFlags
);
5608 Predicates
.push_back(AddRecPred
);
5611 // Create the Equal Predicates P2,P3:
5613 // It is possible that the predicates P2 and/or P3 are computable at
5614 // compile time due to StartVal and/or Accum being constants.
5615 // If either one is, then we can check that now and escape if either P2
5618 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
5619 // for each of StartVal and Accum
5620 auto getExtendedExpr
= [&](const SCEV
*Expr
,
5621 bool CreateSignExtend
) -> const SCEV
* {
5622 assert(isLoopInvariant(Expr
, L
) && "Expr is expected to be invariant");
5623 const SCEV
*TruncatedExpr
= getTruncateExpr(Expr
, TruncTy
);
5624 const SCEV
*ExtendedExpr
=
5625 CreateSignExtend
? getSignExtendExpr(TruncatedExpr
, Expr
->getType())
5626 : getZeroExtendExpr(TruncatedExpr
, Expr
->getType());
5627 return ExtendedExpr
;
5631 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy
5632 // = getExtendedExpr(Expr)
5633 // Determine whether the predicate P: Expr == ExtendedExpr
5634 // is known to be false at compile time
5635 auto PredIsKnownFalse
= [&](const SCEV
*Expr
,
5636 const SCEV
*ExtendedExpr
) -> bool {
5637 return Expr
!= ExtendedExpr
&&
5638 isKnownPredicate(ICmpInst::ICMP_NE
, Expr
, ExtendedExpr
);
5641 const SCEV
*StartExtended
= getExtendedExpr(StartVal
, Signed
);
5642 if (PredIsKnownFalse(StartVal
, StartExtended
)) {
5643 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
5644 return std::nullopt
;
5647 // The Step is always Signed (because the overflow checks are either
5649 const SCEV
*AccumExtended
= getExtendedExpr(Accum
, /*CreateSignExtend=*/true);
5650 if (PredIsKnownFalse(Accum
, AccumExtended
)) {
5651 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
5652 return std::nullopt
;
5655 auto AppendPredicate
= [&](const SCEV
*Expr
,
5656 const SCEV
*ExtendedExpr
) -> void {
5657 if (Expr
!= ExtendedExpr
&&
5658 !isKnownPredicate(ICmpInst::ICMP_EQ
, Expr
, ExtendedExpr
)) {
5659 const SCEVPredicate
*Pred
= getEqualPredicate(Expr
, ExtendedExpr
);
5660 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred
);
5661 Predicates
.push_back(Pred
);
5665 AppendPredicate(StartVal
, StartExtended
);
5666 AppendPredicate(Accum
, AccumExtended
);
5668 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in
5669 // which the casts had been folded away. The caller can rewrite SymbolicPHI
5670 // into NewAR if it will also add the runtime overflow checks specified in
5672 auto *NewAR
= getAddRecExpr(StartVal
, Accum
, L
, SCEV::FlagAnyWrap
);
5674 std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>> PredRewrite
=
5675 std::make_pair(NewAR
, Predicates
);
5676 // Remember the result of the analysis for this SCEV at this locayyytion.
5677 PredicatedSCEVRewrites
[{SymbolicPHI
, L
}] = PredRewrite
;
5681 std::optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
5682 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown
*SymbolicPHI
) {
5683 auto *PN
= cast
<PHINode
>(SymbolicPHI
->getValue());
5684 const Loop
*L
= isIntegerLoopHeaderPHI(PN
, LI
);
5686 return std::nullopt
;
5688 // Check to see if we already analyzed this PHI.
5689 auto I
= PredicatedSCEVRewrites
.find({SymbolicPHI
, L
});
5690 if (I
!= PredicatedSCEVRewrites
.end()) {
5691 std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>> Rewrite
=
5693 // Analysis was done before and failed to create an AddRec:
5694 if (Rewrite
.first
== SymbolicPHI
)
5695 return std::nullopt
;
5696 // Analysis was done before and succeeded to create an AddRec under
5698 assert(isa
<SCEVAddRecExpr
>(Rewrite
.first
) && "Expected an AddRec");
5699 assert(!(Rewrite
.second
).empty() && "Expected to find Predicates");
5703 std::optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
5704 Rewrite
= createAddRecFromPHIWithCastsImpl(SymbolicPHI
);
5706 // Record in the cache that the analysis failed
5708 SmallVector
<const SCEVPredicate
*, 3> Predicates
;
5709 PredicatedSCEVRewrites
[{SymbolicPHI
, L
}] = {SymbolicPHI
, Predicates
};
5710 return std::nullopt
;
5716 // FIXME: This utility is currently required because the Rewriter currently
5717 // does not rewrite this expression:
5718 // {0, +, (sext ix (trunc iy to ix) to iy)}
5719 // into {0, +, %step},
5720 // even when the following Equal predicate exists:
5721 // "%step == (sext ix (trunc iy to ix) to iy)".
5722 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
5723 const SCEVAddRecExpr
*AR1
, const SCEVAddRecExpr
*AR2
) const {
5727 auto areExprsEqual
= [&](const SCEV
*Expr1
, const SCEV
*Expr2
) -> bool {
5728 if (Expr1
!= Expr2
&& !Preds
->implies(SE
.getEqualPredicate(Expr1
, Expr2
)) &&
5729 !Preds
->implies(SE
.getEqualPredicate(Expr2
, Expr1
)))
5734 if (!areExprsEqual(AR1
->getStart(), AR2
->getStart()) ||
5735 !areExprsEqual(AR1
->getStepRecurrence(SE
), AR2
->getStepRecurrence(SE
)))
5740 /// A helper function for createAddRecFromPHI to handle simple cases.
5742 /// This function tries to find an AddRec expression for the simplest (yet most
5743 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
5744 /// If it fails, createAddRecFromPHI will use a more general, but slow,
5745 /// technique for finding the AddRec expression.
5746 const SCEV
*ScalarEvolution::createSimpleAffineAddRec(PHINode
*PN
,
5748 Value
*StartValueV
) {
5749 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
5750 assert(L
&& L
->getHeader() == PN
->getParent());
5751 assert(BEValueV
&& StartValueV
);
5753 auto BO
= MatchBinaryOp(BEValueV
, getDataLayout(), AC
, DT
, PN
);
5757 if (BO
->Opcode
!= Instruction::Add
)
5760 const SCEV
*Accum
= nullptr;
5761 if (BO
->LHS
== PN
&& L
->isLoopInvariant(BO
->RHS
))
5762 Accum
= getSCEV(BO
->RHS
);
5763 else if (BO
->RHS
== PN
&& L
->isLoopInvariant(BO
->LHS
))
5764 Accum
= getSCEV(BO
->LHS
);
5769 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
5771 Flags
= setFlags(Flags
, SCEV::FlagNUW
);
5773 Flags
= setFlags(Flags
, SCEV::FlagNSW
);
5775 const SCEV
*StartVal
= getSCEV(StartValueV
);
5776 const SCEV
*PHISCEV
= getAddRecExpr(StartVal
, Accum
, L
, Flags
);
5777 insertValueToMap(PN
, PHISCEV
);
5779 if (auto *AR
= dyn_cast
<SCEVAddRecExpr
>(PHISCEV
)) {
5780 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
),
5781 (SCEV::NoWrapFlags
)(AR
->getNoWrapFlags() |
5782 proveNoWrapViaConstantRanges(AR
)));
5785 // We can add Flags to the post-inc expression only if we
5786 // know that it is *undefined behavior* for BEValueV to
5788 if (auto *BEInst
= dyn_cast
<Instruction
>(BEValueV
)) {
5789 assert(isLoopInvariant(Accum
, L
) &&
5790 "Accum is defined outside L, but is not invariant?");
5791 if (isAddRecNeverPoison(BEInst
, L
))
5792 (void)getAddRecExpr(getAddExpr(StartVal
, Accum
), Accum
, L
, Flags
);
5798 const SCEV
*ScalarEvolution::createAddRecFromPHI(PHINode
*PN
) {
5799 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
5800 if (!L
|| L
->getHeader() != PN
->getParent())
5803 // The loop may have multiple entrances or multiple exits; we can analyze
5804 // this phi as an addrec if it has a unique entry value and a unique
5806 Value
*BEValueV
= nullptr, *StartValueV
= nullptr;
5807 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
5808 Value
*V
= PN
->getIncomingValue(i
);
5809 if (L
->contains(PN
->getIncomingBlock(i
))) {
5812 } else if (BEValueV
!= V
) {
5816 } else if (!StartValueV
) {
5818 } else if (StartValueV
!= V
) {
5819 StartValueV
= nullptr;
5823 if (!BEValueV
|| !StartValueV
)
5826 assert(ValueExprMap
.find_as(PN
) == ValueExprMap
.end() &&
5827 "PHI node already processed?");
5829 // First, try to find AddRec expression without creating a fictituos symbolic
5831 if (auto *S
= createSimpleAffineAddRec(PN
, BEValueV
, StartValueV
))
5834 // Handle PHI node value symbolically.
5835 const SCEV
*SymbolicName
= getUnknown(PN
);
5836 insertValueToMap(PN
, SymbolicName
);
5838 // Using this symbolic name for the PHI, analyze the value coming around
5840 const SCEV
*BEValue
= getSCEV(BEValueV
);
5842 // NOTE: If BEValue is loop invariant, we know that the PHI node just
5843 // has a special value for the first iteration of the loop.
5845 // If the value coming around the backedge is an add with the symbolic
5846 // value we just inserted, then we found a simple induction variable!
5847 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(BEValue
)) {
5848 // If there is a single occurrence of the symbolic value, replace it
5849 // with a recurrence.
5850 unsigned FoundIndex
= Add
->getNumOperands();
5851 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
5852 if (Add
->getOperand(i
) == SymbolicName
)
5853 if (FoundIndex
== e
) {
5858 if (FoundIndex
!= Add
->getNumOperands()) {
5859 // Create an add with everything but the specified operand.
5860 SmallVector
<const SCEV
*, 8> Ops
;
5861 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
5862 if (i
!= FoundIndex
)
5863 Ops
.push_back(SCEVBackedgeConditionFolder::rewrite(Add
->getOperand(i
),
5865 const SCEV
*Accum
= getAddExpr(Ops
);
5867 // This is not a valid addrec if the step amount is varying each
5868 // loop iteration, but is not itself an addrec in this loop.
5869 if (isLoopInvariant(Accum
, L
) ||
5870 (isa
<SCEVAddRecExpr
>(Accum
) &&
5871 cast
<SCEVAddRecExpr
>(Accum
)->getLoop() == L
)) {
5872 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
5874 if (auto BO
= MatchBinaryOp(BEValueV
, getDataLayout(), AC
, DT
, PN
)) {
5875 if (BO
->Opcode
== Instruction::Add
&& BO
->LHS
== PN
) {
5877 Flags
= setFlags(Flags
, SCEV::FlagNUW
);
5879 Flags
= setFlags(Flags
, SCEV::FlagNSW
);
5881 } else if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(BEValueV
)) {
5882 if (GEP
->getOperand(0) == PN
) {
5883 GEPNoWrapFlags NW
= GEP
->getNoWrapFlags();
5884 // If the increment has any nowrap flags, then we know the address
5885 // space cannot be wrapped around.
5886 if (NW
!= GEPNoWrapFlags::none())
5887 Flags
= setFlags(Flags
, SCEV::FlagNW
);
5888 // If the GEP is nuw or nusw with non-negative offset, we know that
5889 // no unsigned wrap occurs. We cannot set the nsw flag as only the
5890 // offset is treated as signed, while the base is unsigned.
5891 if (NW
.hasNoUnsignedWrap() ||
5892 (NW
.hasNoUnsignedSignedWrap() && isKnownNonNegative(Accum
)))
5893 Flags
= setFlags(Flags
, SCEV::FlagNUW
);
5896 // We cannot transfer nuw and nsw flags from subtraction
5897 // operations -- sub nuw X, Y is not the same as add nuw X, -Y
5901 const SCEV
*StartVal
= getSCEV(StartValueV
);
5902 const SCEV
*PHISCEV
= getAddRecExpr(StartVal
, Accum
, L
, Flags
);
5904 // Okay, for the entire analysis of this edge we assumed the PHI
5905 // to be symbolic. We now need to go back and purge all of the
5906 // entries for the scalars that use the symbolic expression.
5907 forgetMemoizedResults(SymbolicName
);
5908 insertValueToMap(PN
, PHISCEV
);
5910 if (auto *AR
= dyn_cast
<SCEVAddRecExpr
>(PHISCEV
)) {
5911 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
),
5912 (SCEV::NoWrapFlags
)(AR
->getNoWrapFlags() |
5913 proveNoWrapViaConstantRanges(AR
)));
5916 // We can add Flags to the post-inc expression only if we
5917 // know that it is *undefined behavior* for BEValueV to
5919 if (auto *BEInst
= dyn_cast
<Instruction
>(BEValueV
))
5920 if (isLoopInvariant(Accum
, L
) && isAddRecNeverPoison(BEInst
, L
))
5921 (void)getAddRecExpr(getAddExpr(StartVal
, Accum
), Accum
, L
, Flags
);
5927 // Otherwise, this could be a loop like this:
5928 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
5929 // In this case, j = {1,+,1} and BEValue is j.
5930 // Because the other in-value of i (0) fits the evolution of BEValue
5931 // i really is an addrec evolution.
5933 // We can generalize this saying that i is the shifted value of BEValue
5934 // by one iteration:
5935 // PHI(f(0), f({1,+,1})) --> f({0,+,1})
5936 const SCEV
*Shifted
= SCEVShiftRewriter::rewrite(BEValue
, L
, *this);
5937 const SCEV
*Start
= SCEVInitRewriter::rewrite(Shifted
, L
, *this, false);
5938 if (Shifted
!= getCouldNotCompute() &&
5939 Start
!= getCouldNotCompute()) {
5940 const SCEV
*StartVal
= getSCEV(StartValueV
);
5941 if (Start
== StartVal
) {
5942 // Okay, for the entire analysis of this edge we assumed the PHI
5943 // to be symbolic. We now need to go back and purge all of the
5944 // entries for the scalars that use the symbolic expression.
5945 forgetMemoizedResults(SymbolicName
);
5946 insertValueToMap(PN
, Shifted
);
5952 // Remove the temporary PHI node SCEV that has been inserted while intending
5953 // to create an AddRecExpr for this PHI node. We can not keep this temporary
5954 // as it will prevent later (possibly simpler) SCEV expressions to be added
5955 // to the ValueExprMap.
5956 eraseValueFromMap(PN
);
5961 // Try to match a control flow sequence that branches out at BI and merges back
5962 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful
5964 static bool BrPHIToSelect(DominatorTree
&DT
, BranchInst
*BI
, PHINode
*Merge
,
5965 Value
*&C
, Value
*&LHS
, Value
*&RHS
) {
5966 C
= BI
->getCondition();
5968 BasicBlockEdge
LeftEdge(BI
->getParent(), BI
->getSuccessor(0));
5969 BasicBlockEdge
RightEdge(BI
->getParent(), BI
->getSuccessor(1));
5971 if (!LeftEdge
.isSingleEdge())
5974 assert(RightEdge
.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()");
5976 Use
&LeftUse
= Merge
->getOperandUse(0);
5977 Use
&RightUse
= Merge
->getOperandUse(1);
5979 if (DT
.dominates(LeftEdge
, LeftUse
) && DT
.dominates(RightEdge
, RightUse
)) {
5985 if (DT
.dominates(LeftEdge
, RightUse
) && DT
.dominates(RightEdge
, LeftUse
)) {
5994 const SCEV
*ScalarEvolution::createNodeFromSelectLikePHI(PHINode
*PN
) {
5996 [&](BasicBlock
*BB
) { return DT
.isReachableFromEntry(BB
); };
5997 if (PN
->getNumIncomingValues() == 2 && all_of(PN
->blocks(), IsReachable
)) {
6000 // br %cond, label %left, label %right
6006 // V = phi [ %x, %left ], [ %y, %right ]
6008 // as "select %cond, %x, %y"
6010 BasicBlock
*IDom
= DT
[PN
->getParent()]->getIDom()->getBlock();
6011 assert(IDom
&& "At least the entry block should dominate PN");
6013 auto *BI
= dyn_cast
<BranchInst
>(IDom
->getTerminator());
6014 Value
*Cond
= nullptr, *LHS
= nullptr, *RHS
= nullptr;
6016 if (BI
&& BI
->isConditional() &&
6017 BrPHIToSelect(DT
, BI
, PN
, Cond
, LHS
, RHS
) &&
6018 properlyDominates(getSCEV(LHS
), PN
->getParent()) &&
6019 properlyDominates(getSCEV(RHS
), PN
->getParent()))
6020 return createNodeForSelectOrPHI(PN
, Cond
, LHS
, RHS
);
6026 const SCEV
*ScalarEvolution::createNodeForPHI(PHINode
*PN
) {
6027 if (const SCEV
*S
= createAddRecFromPHI(PN
))
6030 // We do not allow simplifying phi (undef, X) to X here, to avoid reusing the
6032 if (Value
*V
= simplifyInstruction(
6033 PN
, {getDataLayout(), &TLI
, &DT
, &AC
, /*CtxI=*/nullptr,
6034 /*UseInstrInfo=*/true, /*CanUseUndef=*/false}))
6037 if (const SCEV
*S
= createNodeFromSelectLikePHI(PN
))
6040 // If it's not a loop phi, we can't handle it yet.
6041 return getUnknown(PN
);
6044 bool SCEVMinMaxExprContains(const SCEV
*Root
, const SCEV
*OperandToFind
,
6045 SCEVTypes RootKind
) {
6046 struct FindClosure
{
6047 const SCEV
*OperandToFind
;
6048 const SCEVTypes RootKind
; // Must be a sequential min/max expression.
6049 const SCEVTypes NonSequentialRootKind
; // Non-seq variant of RootKind.
6053 bool canRecurseInto(SCEVTypes Kind
) const {
6054 // We can only recurse into the SCEV expression of the same effective type
6055 // as the type of our root SCEV expression, and into zero-extensions.
6056 return RootKind
== Kind
|| NonSequentialRootKind
== Kind
||
6057 scZeroExtend
== Kind
;
6060 FindClosure(const SCEV
*OperandToFind
, SCEVTypes RootKind
)
6061 : OperandToFind(OperandToFind
), RootKind(RootKind
),
6062 NonSequentialRootKind(
6063 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
6066 bool follow(const SCEV
*S
) {
6067 Found
= S
== OperandToFind
;
6069 return !isDone() && canRecurseInto(S
->getSCEVType());
6072 bool isDone() const { return Found
; }
6075 FindClosure
FC(OperandToFind
, RootKind
);
6080 std::optional
<const SCEV
*>
6081 ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(Type
*Ty
,
6085 // Try to match some simple smax or umax patterns.
6088 Value
*LHS
= ICI
->getOperand(0);
6089 Value
*RHS
= ICI
->getOperand(1);
6091 switch (ICI
->getPredicate()) {
6092 case ICmpInst::ICMP_SLT
:
6093 case ICmpInst::ICMP_SLE
:
6094 case ICmpInst::ICMP_ULT
:
6095 case ICmpInst::ICMP_ULE
:
6096 std::swap(LHS
, RHS
);
6098 case ICmpInst::ICMP_SGT
:
6099 case ICmpInst::ICMP_SGE
:
6100 case ICmpInst::ICMP_UGT
:
6101 case ICmpInst::ICMP_UGE
:
6102 // a > b ? a+x : b+x -> max(a, b)+x
6103 // a > b ? b+x : a+x -> min(a, b)+x
6104 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(Ty
)) {
6105 bool Signed
= ICI
->isSigned();
6106 const SCEV
*LA
= getSCEV(TrueVal
);
6107 const SCEV
*RA
= getSCEV(FalseVal
);
6108 const SCEV
*LS
= getSCEV(LHS
);
6109 const SCEV
*RS
= getSCEV(RHS
);
6110 if (LA
->getType()->isPointerTy()) {
6111 // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA.
6112 // Need to make sure we can't produce weird expressions involving
6113 // negated pointers.
6114 if (LA
== LS
&& RA
== RS
)
6115 return Signed
? getSMaxExpr(LS
, RS
) : getUMaxExpr(LS
, RS
);
6116 if (LA
== RS
&& RA
== LS
)
6117 return Signed
? getSMinExpr(LS
, RS
) : getUMinExpr(LS
, RS
);
6119 auto CoerceOperand
= [&](const SCEV
*Op
) -> const SCEV
* {
6120 if (Op
->getType()->isPointerTy()) {
6121 Op
= getLosslessPtrToIntExpr(Op
);
6122 if (isa
<SCEVCouldNotCompute
>(Op
))
6126 Op
= getNoopOrSignExtend(Op
, Ty
);
6128 Op
= getNoopOrZeroExtend(Op
, Ty
);
6131 LS
= CoerceOperand(LS
);
6132 RS
= CoerceOperand(RS
);
6133 if (isa
<SCEVCouldNotCompute
>(LS
) || isa
<SCEVCouldNotCompute
>(RS
))
6135 const SCEV
*LDiff
= getMinusSCEV(LA
, LS
);
6136 const SCEV
*RDiff
= getMinusSCEV(RA
, RS
);
6138 return getAddExpr(Signed
? getSMaxExpr(LS
, RS
) : getUMaxExpr(LS
, RS
),
6140 LDiff
= getMinusSCEV(LA
, RS
);
6141 RDiff
= getMinusSCEV(RA
, LS
);
6143 return getAddExpr(Signed
? getSMinExpr(LS
, RS
) : getUMinExpr(LS
, RS
),
6147 case ICmpInst::ICMP_NE
:
6148 // x != 0 ? x+y : C+y -> x == 0 ? C+y : x+y
6149 std::swap(TrueVal
, FalseVal
);
6151 case ICmpInst::ICMP_EQ
:
6152 // x == 0 ? C+y : x+y -> umax(x, C)+y iff C u<= 1
6153 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(Ty
) &&
6154 isa
<ConstantInt
>(RHS
) && cast
<ConstantInt
>(RHS
)->isZero()) {
6155 const SCEV
*X
= getNoopOrZeroExtend(getSCEV(LHS
), Ty
);
6156 const SCEV
*TrueValExpr
= getSCEV(TrueVal
); // C+y
6157 const SCEV
*FalseValExpr
= getSCEV(FalseVal
); // x+y
6158 const SCEV
*Y
= getMinusSCEV(FalseValExpr
, X
); // y = (x+y)-x
6159 const SCEV
*C
= getMinusSCEV(TrueValExpr
, Y
); // C = (C+y)-y
6160 if (isa
<SCEVConstant
>(C
) && cast
<SCEVConstant
>(C
)->getAPInt().ule(1))
6161 return getAddExpr(getUMaxExpr(X
, C
), Y
);
6163 // x == 0 ? 0 : umin (..., x, ...) -> umin_seq(x, umin (...))
6164 // x == 0 ? 0 : umin_seq(..., x, ...) -> umin_seq(x, umin_seq(...))
6165 // x == 0 ? 0 : umin (..., umin_seq(..., x, ...), ...)
6166 // -> umin_seq(x, umin (..., umin_seq(...), ...))
6167 if (isa
<ConstantInt
>(RHS
) && cast
<ConstantInt
>(RHS
)->isZero() &&
6168 isa
<ConstantInt
>(TrueVal
) && cast
<ConstantInt
>(TrueVal
)->isZero()) {
6169 const SCEV
*X
= getSCEV(LHS
);
6170 while (auto *ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(X
))
6171 X
= ZExt
->getOperand();
6172 if (getTypeSizeInBits(X
->getType()) <= getTypeSizeInBits(Ty
)) {
6173 const SCEV
*FalseValExpr
= getSCEV(FalseVal
);
6174 if (SCEVMinMaxExprContains(FalseValExpr
, X
, scSequentialUMinExpr
))
6175 return getUMinExpr(getNoopOrZeroExtend(X
, Ty
), FalseValExpr
,
6176 /*Sequential=*/true);
6184 return std::nullopt
;
6187 static std::optional
<const SCEV
*>
6188 createNodeForSelectViaUMinSeq(ScalarEvolution
*SE
, const SCEV
*CondExpr
,
6189 const SCEV
*TrueExpr
, const SCEV
*FalseExpr
) {
6190 assert(CondExpr
->getType()->isIntegerTy(1) &&
6191 TrueExpr
->getType() == FalseExpr
->getType() &&
6192 TrueExpr
->getType()->isIntegerTy(1) &&
6193 "Unexpected operands of a select.");
6195 // i1 cond ? i1 x : i1 C --> C + (i1 cond ? (i1 x - i1 C) : i1 0)
6196 // --> C + (umin_seq cond, x - C)
6198 // i1 cond ? i1 C : i1 x --> C + (i1 cond ? i1 0 : (i1 x - i1 C))
6199 // --> C + (i1 ~cond ? (i1 x - i1 C) : i1 0)
6200 // --> C + (umin_seq ~cond, x - C)
6202 // FIXME: while we can't legally model the case where both of the hands
6203 // are fully variable, we only require that the *difference* is constant.
6204 if (!isa
<SCEVConstant
>(TrueExpr
) && !isa
<SCEVConstant
>(FalseExpr
))
6205 return std::nullopt
;
6208 if (isa
<SCEVConstant
>(TrueExpr
)) {
6209 CondExpr
= SE
->getNotSCEV(CondExpr
);
6216 return SE
->getAddExpr(C
, SE
->getUMinExpr(CondExpr
, SE
->getMinusSCEV(X
, C
),
6217 /*Sequential=*/true));
6220 static std::optional
<const SCEV
*>
6221 createNodeForSelectViaUMinSeq(ScalarEvolution
*SE
, Value
*Cond
, Value
*TrueVal
,
6223 if (!isa
<ConstantInt
>(TrueVal
) && !isa
<ConstantInt
>(FalseVal
))
6224 return std::nullopt
;
6226 const auto *SECond
= SE
->getSCEV(Cond
);
6227 const auto *SETrue
= SE
->getSCEV(TrueVal
);
6228 const auto *SEFalse
= SE
->getSCEV(FalseVal
);
6229 return createNodeForSelectViaUMinSeq(SE
, SECond
, SETrue
, SEFalse
);
6232 const SCEV
*ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6233 Value
*V
, Value
*Cond
, Value
*TrueVal
, Value
*FalseVal
) {
6234 assert(Cond
->getType()->isIntegerTy(1) && "Select condition is not an i1?");
6235 assert(TrueVal
->getType() == FalseVal
->getType() &&
6236 V
->getType() == TrueVal
->getType() &&
6237 "Types of select hands and of the result must match.");
6239 // For now, only deal with i1-typed `select`s.
6240 if (!V
->getType()->isIntegerTy(1))
6241 return getUnknown(V
);
6243 if (std::optional
<const SCEV
*> S
=
6244 createNodeForSelectViaUMinSeq(this, Cond
, TrueVal
, FalseVal
))
6247 return getUnknown(V
);
6250 const SCEV
*ScalarEvolution::createNodeForSelectOrPHI(Value
*V
, Value
*Cond
,
6253 // Handle "constant" branch or select. This can occur for instance when a
6254 // loop pass transforms an inner loop and moves on to process the outer loop.
6255 if (auto *CI
= dyn_cast
<ConstantInt
>(Cond
))
6256 return getSCEV(CI
->isOne() ? TrueVal
: FalseVal
);
6258 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
6259 if (auto *ICI
= dyn_cast
<ICmpInst
>(Cond
)) {
6260 if (std::optional
<const SCEV
*> S
=
6261 createNodeForSelectOrPHIInstWithICmpInstCond(I
->getType(), ICI
,
6267 return createNodeForSelectOrPHIViaUMinSeq(V
, Cond
, TrueVal
, FalseVal
);
6270 /// Expand GEP instructions into add and multiply operations. This allows them
6271 /// to be analyzed by regular SCEV code.
6272 const SCEV
*ScalarEvolution::createNodeForGEP(GEPOperator
*GEP
) {
6273 assert(GEP
->getSourceElementType()->isSized() &&
6274 "GEP source element type must be sized");
6276 SmallVector
<const SCEV
*, 4> IndexExprs
;
6277 for (Value
*Index
: GEP
->indices())
6278 IndexExprs
.push_back(getSCEV(Index
));
6279 return getGEPExpr(GEP
, IndexExprs
);
6282 APInt
ScalarEvolution::getConstantMultipleImpl(const SCEV
*S
) {
6283 uint64_t BitWidth
= getTypeSizeInBits(S
->getType());
6284 auto GetShiftedByZeros
= [BitWidth
](uint32_t TrailingZeros
) {
6285 return TrailingZeros
>= BitWidth
6286 ? APInt::getZero(BitWidth
)
6287 : APInt::getOneBitSet(BitWidth
, TrailingZeros
);
6289 auto GetGCDMultiple
= [this](const SCEVNAryExpr
*N
) {
6290 // The result is GCD of all operands results.
6291 APInt Res
= getConstantMultiple(N
->getOperand(0));
6292 for (unsigned I
= 1, E
= N
->getNumOperands(); I
< E
&& Res
!= 1; ++I
)
6293 Res
= APIntOps::GreatestCommonDivisor(
6294 Res
, getConstantMultiple(N
->getOperand(I
)));
6298 switch (S
->getSCEVType()) {
6300 return cast
<SCEVConstant
>(S
)->getAPInt();
6302 return getConstantMultiple(cast
<SCEVPtrToIntExpr
>(S
)->getOperand());
6305 return APInt(BitWidth
, 1);
6307 // Only multiples that are a power of 2 will hold after truncation.
6308 const SCEVTruncateExpr
*T
= cast
<SCEVTruncateExpr
>(S
);
6309 uint32_t TZ
= getMinTrailingZeros(T
->getOperand());
6310 return GetShiftedByZeros(TZ
);
6312 case scZeroExtend
: {
6313 const SCEVZeroExtendExpr
*Z
= cast
<SCEVZeroExtendExpr
>(S
);
6314 return getConstantMultiple(Z
->getOperand()).zext(BitWidth
);
6316 case scSignExtend
: {
6317 // Only multiples that are a power of 2 will hold after sext.
6318 const SCEVSignExtendExpr
*E
= cast
<SCEVSignExtendExpr
>(S
);
6319 uint32_t TZ
= getMinTrailingZeros(E
->getOperand());
6320 return GetShiftedByZeros(TZ
);
6323 const SCEVMulExpr
*M
= cast
<SCEVMulExpr
>(S
);
6324 if (M
->hasNoUnsignedWrap()) {
6325 // The result is the product of all operand results.
6326 APInt Res
= getConstantMultiple(M
->getOperand(0));
6327 for (const SCEV
*Operand
: M
->operands().drop_front())
6328 Res
= Res
* getConstantMultiple(Operand
);
6332 // If there are no wrap guarentees, find the trailing zeros, which is the
6333 // sum of trailing zeros for all its operands.
6335 for (const SCEV
*Operand
: M
->operands())
6336 TZ
+= getMinTrailingZeros(Operand
);
6337 return GetShiftedByZeros(TZ
);
6340 case scAddRecExpr
: {
6341 const SCEVNAryExpr
*N
= cast
<SCEVNAryExpr
>(S
);
6342 if (N
->hasNoUnsignedWrap())
6343 return GetGCDMultiple(N
);
6344 // Find the trailing bits, which is the minimum of its operands.
6345 uint32_t TZ
= getMinTrailingZeros(N
->getOperand(0));
6346 for (const SCEV
*Operand
: N
->operands().drop_front())
6347 TZ
= std::min(TZ
, getMinTrailingZeros(Operand
));
6348 return GetShiftedByZeros(TZ
);
6354 case scSequentialUMinExpr
:
6355 return GetGCDMultiple(cast
<SCEVNAryExpr
>(S
));
6357 // ask ValueTracking for known bits
6358 const SCEVUnknown
*U
= cast
<SCEVUnknown
>(S
);
6360 computeKnownBits(U
->getValue(), getDataLayout(), 0, &AC
, nullptr, &DT
)
6361 .countMinTrailingZeros();
6362 return GetShiftedByZeros(Known
);
6364 case scCouldNotCompute
:
6365 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
6367 llvm_unreachable("Unknown SCEV kind!");
6370 APInt
ScalarEvolution::getConstantMultiple(const SCEV
*S
) {
6371 auto I
= ConstantMultipleCache
.find(S
);
6372 if (I
!= ConstantMultipleCache
.end())
6375 APInt Result
= getConstantMultipleImpl(S
);
6376 auto InsertPair
= ConstantMultipleCache
.insert({S
, Result
});
6377 assert(InsertPair
.second
&& "Should insert a new key");
6378 return InsertPair
.first
->second
;
6381 APInt
ScalarEvolution::getNonZeroConstantMultiple(const SCEV
*S
) {
6382 APInt Multiple
= getConstantMultiple(S
);
6383 return Multiple
== 0 ? APInt(Multiple
.getBitWidth(), 1) : Multiple
;
6386 uint32_t ScalarEvolution::getMinTrailingZeros(const SCEV
*S
) {
6387 return std::min(getConstantMultiple(S
).countTrailingZeros(),
6388 (unsigned)getTypeSizeInBits(S
->getType()));
6391 /// Helper method to assign a range to V from metadata present in the IR.
6392 static std::optional
<ConstantRange
> GetRangeFromMetadata(Value
*V
) {
6393 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
6394 if (MDNode
*MD
= I
->getMetadata(LLVMContext::MD_range
))
6395 return getConstantRangeFromMetadata(*MD
);
6396 if (const auto *CB
= dyn_cast
<CallBase
>(V
))
6397 if (std::optional
<ConstantRange
> Range
= CB
->getRange())
6400 if (auto *A
= dyn_cast
<Argument
>(V
))
6401 if (std::optional
<ConstantRange
> Range
= A
->getRange())
6404 return std::nullopt
;
6407 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr
*AddRec
,
6408 SCEV::NoWrapFlags Flags
) {
6409 if (AddRec
->getNoWrapFlags(Flags
) != Flags
) {
6410 AddRec
->setNoWrapFlags(Flags
);
6411 UnsignedRanges
.erase(AddRec
);
6412 SignedRanges
.erase(AddRec
);
6413 ConstantMultipleCache
.erase(AddRec
);
6417 ConstantRange
ScalarEvolution::
6418 getRangeForUnknownRecurrence(const SCEVUnknown
*U
) {
6419 const DataLayout
&DL
= getDataLayout();
6421 unsigned BitWidth
= getTypeSizeInBits(U
->getType());
6422 const ConstantRange
FullSet(BitWidth
, /*isFullSet=*/true);
6424 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then
6425 // use information about the trip count to improve our available range. Note
6426 // that the trip count independent cases are already handled by known bits.
6427 // WARNING: The definition of recurrence used here is subtly different than
6428 // the one used by AddRec (and thus most of this file). Step is allowed to
6429 // be arbitrarily loop varying here, where AddRec allows only loop invariant
6430 // and other addrecs in the same loop (for non-affine addrecs). The code
6431 // below intentionally handles the case where step is not loop invariant.
6432 auto *P
= dyn_cast
<PHINode
>(U
->getValue());
6436 // Make sure that no Phi input comes from an unreachable block. Otherwise,
6437 // even the values that are not available in these blocks may come from them,
6438 // and this leads to false-positive recurrence test.
6439 for (auto *Pred
: predecessors(P
->getParent()))
6440 if (!DT
.isReachableFromEntry(Pred
))
6444 Value
*Start
, *Step
;
6445 if (!matchSimpleRecurrence(P
, BO
, Start
, Step
))
6448 // If we found a recurrence in reachable code, we must be in a loop. Note
6449 // that BO might be in some subloop of L, and that's completely okay.
6450 auto *L
= LI
.getLoopFor(P
->getParent());
6451 assert(L
&& L
->getHeader() == P
->getParent());
6452 if (!L
->contains(BO
->getParent()))
6453 // NOTE: This bailout should be an assert instead. However, asserting
6454 // the condition here exposes a case where LoopFusion is querying SCEV
6455 // with malformed loop information during the midst of the transform.
6456 // There doesn't appear to be an obvious fix, so for the moment bailout
6457 // until the caller issue can be fixed. PR49566 tracks the bug.
6460 // TODO: Extend to other opcodes such as mul, and div
6461 switch (BO
->getOpcode()) {
6464 case Instruction::AShr
:
6465 case Instruction::LShr
:
6466 case Instruction::Shl
:
6470 if (BO
->getOperand(0) != P
)
6471 // TODO: Handle the power function forms some day.
6474 unsigned TC
= getSmallConstantMaxTripCount(L
);
6475 if (!TC
|| TC
>= BitWidth
)
6478 auto KnownStart
= computeKnownBits(Start
, DL
, 0, &AC
, nullptr, &DT
);
6479 auto KnownStep
= computeKnownBits(Step
, DL
, 0, &AC
, nullptr, &DT
);
6480 assert(KnownStart
.getBitWidth() == BitWidth
&&
6481 KnownStep
.getBitWidth() == BitWidth
);
6483 // Compute total shift amount, being careful of overflow and bitwidths.
6484 auto MaxShiftAmt
= KnownStep
.getMaxValue();
6485 APInt
TCAP(BitWidth
, TC
-1);
6486 bool Overflow
= false;
6487 auto TotalShift
= MaxShiftAmt
.umul_ov(TCAP
, Overflow
);
6491 switch (BO
->getOpcode()) {
6493 llvm_unreachable("filtered out above");
6494 case Instruction::AShr
: {
6495 // For each ashr, three cases:
6496 // shift = 0 => unchanged value
6497 // saturation => 0 or -1
6498 // other => a value closer to zero (of the same sign)
6499 // Thus, the end value is closer to zero than the start.
6500 auto KnownEnd
= KnownBits::ashr(KnownStart
,
6501 KnownBits::makeConstant(TotalShift
));
6502 if (KnownStart
.isNonNegative())
6503 // Analogous to lshr (simply not yet canonicalized)
6504 return ConstantRange::getNonEmpty(KnownEnd
.getMinValue(),
6505 KnownStart
.getMaxValue() + 1);
6506 if (KnownStart
.isNegative())
6507 // End >=u Start && End <=s Start
6508 return ConstantRange::getNonEmpty(KnownStart
.getMinValue(),
6509 KnownEnd
.getMaxValue() + 1);
6512 case Instruction::LShr
: {
6513 // For each lshr, three cases:
6514 // shift = 0 => unchanged value
6516 // other => a smaller positive number
6517 // Thus, the low end of the unsigned range is the last value produced.
6518 auto KnownEnd
= KnownBits::lshr(KnownStart
,
6519 KnownBits::makeConstant(TotalShift
));
6520 return ConstantRange::getNonEmpty(KnownEnd
.getMinValue(),
6521 KnownStart
.getMaxValue() + 1);
6523 case Instruction::Shl
: {
6524 // Iff no bits are shifted out, value increases on every shift.
6525 auto KnownEnd
= KnownBits::shl(KnownStart
,
6526 KnownBits::makeConstant(TotalShift
));
6527 if (TotalShift
.ult(KnownStart
.countMinLeadingZeros()))
6528 return ConstantRange(KnownStart
.getMinValue(),
6529 KnownEnd
.getMaxValue() + 1);
6536 const ConstantRange
&
6537 ScalarEvolution::getRangeRefIter(const SCEV
*S
,
6538 ScalarEvolution::RangeSignHint SignHint
) {
6539 DenseMap
<const SCEV
*, ConstantRange
> &Cache
=
6540 SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
? UnsignedRanges
6542 SmallVector
<const SCEV
*> WorkList
;
6543 SmallPtrSet
<const SCEV
*, 8> Seen
;
6545 // Add Expr to the worklist, if Expr is either an N-ary expression or a
6546 // SCEVUnknown PHI node.
6547 auto AddToWorklist
= [&WorkList
, &Seen
, &Cache
](const SCEV
*Expr
) {
6548 if (!Seen
.insert(Expr
).second
)
6550 if (Cache
.contains(Expr
))
6552 switch (Expr
->getSCEVType()) {
6554 if (!isa
<PHINode
>(cast
<SCEVUnknown
>(Expr
)->getValue()))
6571 case scSequentialUMinExpr
:
6572 WorkList
.push_back(Expr
);
6574 case scCouldNotCompute
:
6575 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
6580 // Build worklist by queuing operands of N-ary expressions and phi nodes.
6581 for (unsigned I
= 0; I
!= WorkList
.size(); ++I
) {
6582 const SCEV
*P
= WorkList
[I
];
6583 auto *UnknownS
= dyn_cast
<SCEVUnknown
>(P
);
6584 // If it is not a `SCEVUnknown`, just recurse into operands.
6586 for (const SCEV
*Op
: P
->operands())
6590 // `SCEVUnknown`'s require special treatment.
6591 if (const PHINode
*P
= dyn_cast
<PHINode
>(UnknownS
->getValue())) {
6592 if (!PendingPhiRangesIter
.insert(P
).second
)
6594 for (auto &Op
: reverse(P
->operands()))
6595 AddToWorklist(getSCEV(Op
));
6599 if (!WorkList
.empty()) {
6600 // Use getRangeRef to compute ranges for items in the worklist in reverse
6601 // order. This will force ranges for earlier operands to be computed before
6602 // their users in most cases.
6603 for (const SCEV
*P
: reverse(drop_begin(WorkList
))) {
6604 getRangeRef(P
, SignHint
);
6606 if (auto *UnknownS
= dyn_cast
<SCEVUnknown
>(P
))
6607 if (const PHINode
*P
= dyn_cast
<PHINode
>(UnknownS
->getValue()))
6608 PendingPhiRangesIter
.erase(P
);
6612 return getRangeRef(S
, SignHint
, 0);
6615 /// Determine the range for a particular SCEV. If SignHint is
6616 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
6617 /// with a "cleaner" unsigned (resp. signed) representation.
6618 const ConstantRange
&ScalarEvolution::getRangeRef(
6619 const SCEV
*S
, ScalarEvolution::RangeSignHint SignHint
, unsigned Depth
) {
6620 DenseMap
<const SCEV
*, ConstantRange
> &Cache
=
6621 SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
? UnsignedRanges
6623 ConstantRange::PreferredRangeType RangeType
=
6624 SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
? ConstantRange::Unsigned
6625 : ConstantRange::Signed
;
6627 // See if we've computed this range already.
6628 DenseMap
<const SCEV
*, ConstantRange
>::iterator I
= Cache
.find(S
);
6629 if (I
!= Cache
.end())
6632 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
))
6633 return setRange(C
, SignHint
, ConstantRange(C
->getAPInt()));
6635 // Switch to iteratively computing the range for S, if it is part of a deeply
6636 // nested expression.
6637 if (Depth
> RangeIterThreshold
)
6638 return getRangeRefIter(S
, SignHint
);
6640 unsigned BitWidth
= getTypeSizeInBits(S
->getType());
6641 ConstantRange
ConservativeResult(BitWidth
, /*isFullSet=*/true);
6642 using OBO
= OverflowingBinaryOperator
;
6644 // If the value has known zeros, the maximum value will have those known zeros
6646 if (SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
) {
6647 APInt Multiple
= getNonZeroConstantMultiple(S
);
6648 APInt Remainder
= APInt::getMaxValue(BitWidth
).urem(Multiple
);
6649 if (!Remainder
.isZero())
6650 ConservativeResult
=
6651 ConstantRange(APInt::getMinValue(BitWidth
),
6652 APInt::getMaxValue(BitWidth
) - Remainder
+ 1);
6655 uint32_t TZ
= getMinTrailingZeros(S
);
6657 ConservativeResult
= ConstantRange(
6658 APInt::getSignedMinValue(BitWidth
),
6659 APInt::getSignedMaxValue(BitWidth
).ashr(TZ
).shl(TZ
) + 1);
6663 switch (S
->getSCEVType()) {
6665 llvm_unreachable("Already handled above.");
6667 return setRange(S
, SignHint
, getVScaleRange(&F
, BitWidth
));
6669 const SCEVTruncateExpr
*Trunc
= cast
<SCEVTruncateExpr
>(S
);
6670 ConstantRange X
= getRangeRef(Trunc
->getOperand(), SignHint
, Depth
+ 1);
6673 ConservativeResult
.intersectWith(X
.truncate(BitWidth
), RangeType
));
6675 case scZeroExtend
: {
6676 const SCEVZeroExtendExpr
*ZExt
= cast
<SCEVZeroExtendExpr
>(S
);
6677 ConstantRange X
= getRangeRef(ZExt
->getOperand(), SignHint
, Depth
+ 1);
6680 ConservativeResult
.intersectWith(X
.zeroExtend(BitWidth
), RangeType
));
6682 case scSignExtend
: {
6683 const SCEVSignExtendExpr
*SExt
= cast
<SCEVSignExtendExpr
>(S
);
6684 ConstantRange X
= getRangeRef(SExt
->getOperand(), SignHint
, Depth
+ 1);
6687 ConservativeResult
.intersectWith(X
.signExtend(BitWidth
), RangeType
));
6690 const SCEVPtrToIntExpr
*PtrToInt
= cast
<SCEVPtrToIntExpr
>(S
);
6691 ConstantRange X
= getRangeRef(PtrToInt
->getOperand(), SignHint
, Depth
+ 1);
6692 return setRange(PtrToInt
, SignHint
, X
);
6695 const SCEVAddExpr
*Add
= cast
<SCEVAddExpr
>(S
);
6696 ConstantRange X
= getRangeRef(Add
->getOperand(0), SignHint
, Depth
+ 1);
6697 unsigned WrapType
= OBO::AnyWrap
;
6698 if (Add
->hasNoSignedWrap())
6699 WrapType
|= OBO::NoSignedWrap
;
6700 if (Add
->hasNoUnsignedWrap())
6701 WrapType
|= OBO::NoUnsignedWrap
;
6702 for (const SCEV
*Op
: drop_begin(Add
->operands()))
6703 X
= X
.addWithNoWrap(getRangeRef(Op
, SignHint
, Depth
+ 1), WrapType
,
6705 return setRange(Add
, SignHint
,
6706 ConservativeResult
.intersectWith(X
, RangeType
));
6709 const SCEVMulExpr
*Mul
= cast
<SCEVMulExpr
>(S
);
6710 ConstantRange X
= getRangeRef(Mul
->getOperand(0), SignHint
, Depth
+ 1);
6711 for (const SCEV
*Op
: drop_begin(Mul
->operands()))
6712 X
= X
.multiply(getRangeRef(Op
, SignHint
, Depth
+ 1));
6713 return setRange(Mul
, SignHint
,
6714 ConservativeResult
.intersectWith(X
, RangeType
));
6717 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(S
);
6718 ConstantRange X
= getRangeRef(UDiv
->getLHS(), SignHint
, Depth
+ 1);
6719 ConstantRange Y
= getRangeRef(UDiv
->getRHS(), SignHint
, Depth
+ 1);
6720 return setRange(UDiv
, SignHint
,
6721 ConservativeResult
.intersectWith(X
.udiv(Y
), RangeType
));
6723 case scAddRecExpr
: {
6724 const SCEVAddRecExpr
*AddRec
= cast
<SCEVAddRecExpr
>(S
);
6725 // If there's no unsigned wrap, the value will never be less than its
6727 if (AddRec
->hasNoUnsignedWrap()) {
6728 APInt UnsignedMinValue
= getUnsignedRangeMin(AddRec
->getStart());
6729 if (!UnsignedMinValue
.isZero())
6730 ConservativeResult
= ConservativeResult
.intersectWith(
6731 ConstantRange(UnsignedMinValue
, APInt(BitWidth
, 0)), RangeType
);
6734 // If there's no signed wrap, and all the operands except initial value have
6735 // the same sign or zero, the value won't ever be:
6736 // 1: smaller than initial value if operands are non negative,
6737 // 2: bigger than initial value if operands are non positive.
6738 // For both cases, value can not cross signed min/max boundary.
6739 if (AddRec
->hasNoSignedWrap()) {
6740 bool AllNonNeg
= true;
6741 bool AllNonPos
= true;
6742 for (unsigned i
= 1, e
= AddRec
->getNumOperands(); i
!= e
; ++i
) {
6743 if (!isKnownNonNegative(AddRec
->getOperand(i
)))
6745 if (!isKnownNonPositive(AddRec
->getOperand(i
)))
6749 ConservativeResult
= ConservativeResult
.intersectWith(
6750 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec
->getStart()),
6751 APInt::getSignedMinValue(BitWidth
)),
6754 ConservativeResult
= ConservativeResult
.intersectWith(
6755 ConstantRange::getNonEmpty(APInt::getSignedMinValue(BitWidth
),
6756 getSignedRangeMax(AddRec
->getStart()) +
6761 // TODO: non-affine addrec
6762 if (AddRec
->isAffine()) {
6763 const SCEV
*MaxBEScev
=
6764 getConstantMaxBackedgeTakenCount(AddRec
->getLoop());
6765 if (!isa
<SCEVCouldNotCompute
>(MaxBEScev
)) {
6766 APInt MaxBECount
= cast
<SCEVConstant
>(MaxBEScev
)->getAPInt();
6768 // Adjust MaxBECount to the same bitwidth as AddRec. We can truncate if
6769 // MaxBECount's active bits are all <= AddRec's bit width.
6770 if (MaxBECount
.getBitWidth() > BitWidth
&&
6771 MaxBECount
.getActiveBits() <= BitWidth
)
6772 MaxBECount
= MaxBECount
.trunc(BitWidth
);
6773 else if (MaxBECount
.getBitWidth() < BitWidth
)
6774 MaxBECount
= MaxBECount
.zext(BitWidth
);
6776 if (MaxBECount
.getBitWidth() == BitWidth
) {
6777 auto RangeFromAffine
= getRangeForAffineAR(
6778 AddRec
->getStart(), AddRec
->getStepRecurrence(*this), MaxBECount
);
6779 ConservativeResult
=
6780 ConservativeResult
.intersectWith(RangeFromAffine
, RangeType
);
6782 auto RangeFromFactoring
= getRangeViaFactoring(
6783 AddRec
->getStart(), AddRec
->getStepRecurrence(*this), MaxBECount
);
6784 ConservativeResult
=
6785 ConservativeResult
.intersectWith(RangeFromFactoring
, RangeType
);
6789 // Now try symbolic BE count and more powerful methods.
6790 if (UseExpensiveRangeSharpening
) {
6791 const SCEV
*SymbolicMaxBECount
=
6792 getSymbolicMaxBackedgeTakenCount(AddRec
->getLoop());
6793 if (!isa
<SCEVCouldNotCompute
>(SymbolicMaxBECount
) &&
6794 getTypeSizeInBits(MaxBEScev
->getType()) <= BitWidth
&&
6795 AddRec
->hasNoSelfWrap()) {
6796 auto RangeFromAffineNew
= getRangeForAffineNoSelfWrappingAR(
6797 AddRec
, SymbolicMaxBECount
, BitWidth
, SignHint
);
6798 ConservativeResult
=
6799 ConservativeResult
.intersectWith(RangeFromAffineNew
, RangeType
);
6804 return setRange(AddRec
, SignHint
, std::move(ConservativeResult
));
6810 case scSequentialUMinExpr
: {
6812 switch (S
->getSCEVType()) {
6814 ID
= Intrinsic::umax
;
6817 ID
= Intrinsic::smax
;
6820 case scSequentialUMinExpr
:
6821 ID
= Intrinsic::umin
;
6824 ID
= Intrinsic::smin
;
6827 llvm_unreachable("Unknown SCEVMinMaxExpr/SCEVSequentialMinMaxExpr.");
6830 const auto *NAry
= cast
<SCEVNAryExpr
>(S
);
6831 ConstantRange X
= getRangeRef(NAry
->getOperand(0), SignHint
, Depth
+ 1);
6832 for (unsigned i
= 1, e
= NAry
->getNumOperands(); i
!= e
; ++i
)
6834 ID
, {X
, getRangeRef(NAry
->getOperand(i
), SignHint
, Depth
+ 1)});
6835 return setRange(S
, SignHint
,
6836 ConservativeResult
.intersectWith(X
, RangeType
));
6839 const SCEVUnknown
*U
= cast
<SCEVUnknown
>(S
);
6840 Value
*V
= U
->getValue();
6842 // Check if the IR explicitly contains !range metadata.
6843 std::optional
<ConstantRange
> MDRange
= GetRangeFromMetadata(V
);
6845 ConservativeResult
=
6846 ConservativeResult
.intersectWith(*MDRange
, RangeType
);
6848 // Use facts about recurrences in the underlying IR. Note that add
6849 // recurrences are AddRecExprs and thus don't hit this path. This
6850 // primarily handles shift recurrences.
6851 auto CR
= getRangeForUnknownRecurrence(U
);
6852 ConservativeResult
= ConservativeResult
.intersectWith(CR
);
6854 // See if ValueTracking can give us a useful range.
6855 const DataLayout
&DL
= getDataLayout();
6856 KnownBits Known
= computeKnownBits(V
, DL
, 0, &AC
, nullptr, &DT
);
6857 if (Known
.getBitWidth() != BitWidth
)
6858 Known
= Known
.zextOrTrunc(BitWidth
);
6860 // ValueTracking may be able to compute a tighter result for the number of
6861 // sign bits than for the value of those sign bits.
6862 unsigned NS
= ComputeNumSignBits(V
, DL
, 0, &AC
, nullptr, &DT
);
6863 if (U
->getType()->isPointerTy()) {
6864 // If the pointer size is larger than the index size type, this can cause
6865 // NS to be larger than BitWidth. So compensate for this.
6866 unsigned ptrSize
= DL
.getPointerTypeSizeInBits(U
->getType());
6867 int ptrIdxDiff
= ptrSize
- BitWidth
;
6868 if (ptrIdxDiff
> 0 && ptrSize
> BitWidth
&& NS
> (unsigned)ptrIdxDiff
)
6873 // If we know any of the sign bits, we know all of the sign bits.
6874 if (!Known
.Zero
.getHiBits(NS
).isZero())
6875 Known
.Zero
.setHighBits(NS
);
6876 if (!Known
.One
.getHiBits(NS
).isZero())
6877 Known
.One
.setHighBits(NS
);
6880 if (Known
.getMinValue() != Known
.getMaxValue() + 1)
6881 ConservativeResult
= ConservativeResult
.intersectWith(
6882 ConstantRange(Known
.getMinValue(), Known
.getMaxValue() + 1),
6885 ConservativeResult
= ConservativeResult
.intersectWith(
6886 ConstantRange(APInt::getSignedMinValue(BitWidth
).ashr(NS
- 1),
6887 APInt::getSignedMaxValue(BitWidth
).ashr(NS
- 1) + 1),
6890 if (U
->getType()->isPointerTy() && SignHint
== HINT_RANGE_UNSIGNED
) {
6891 // Strengthen the range if the underlying IR value is a
6892 // global/alloca/heap allocation using the size of the object.
6893 bool CanBeNull
, CanBeFreed
;
6894 uint64_t DerefBytes
=
6895 V
->getPointerDereferenceableBytes(DL
, CanBeNull
, CanBeFreed
);
6896 if (DerefBytes
> 1 && isUIntN(BitWidth
, DerefBytes
)) {
6897 // The highest address the object can start is DerefBytes bytes before
6898 // the end (unsigned max value). If this value is not a multiple of the
6899 // alignment, the last possible start value is the next lowest multiple
6900 // of the alignment. Note: The computations below cannot overflow,
6901 // because if they would there's no possible start address for the
6904 APInt::getMaxValue(BitWidth
) - APInt(BitWidth
, DerefBytes
);
6905 uint64_t Align
= U
->getValue()->getPointerAlignment(DL
).value();
6906 uint64_t Rem
= MaxVal
.urem(Align
);
6907 MaxVal
-= APInt(BitWidth
, Rem
);
6908 APInt MinVal
= APInt::getZero(BitWidth
);
6909 if (llvm::isKnownNonZero(V
, DL
))
6911 ConservativeResult
= ConservativeResult
.intersectWith(
6912 ConstantRange::getNonEmpty(MinVal
, MaxVal
+ 1), RangeType
);
6916 // A range of Phi is a subset of union of all ranges of its input.
6917 if (PHINode
*Phi
= dyn_cast
<PHINode
>(V
)) {
6918 // Make sure that we do not run over cycled Phis.
6919 if (PendingPhiRanges
.insert(Phi
).second
) {
6920 ConstantRange
RangeFromOps(BitWidth
, /*isFullSet=*/false);
6922 for (const auto &Op
: Phi
->operands()) {
6923 auto OpRange
= getRangeRef(getSCEV(Op
), SignHint
, Depth
+ 1);
6924 RangeFromOps
= RangeFromOps
.unionWith(OpRange
);
6925 // No point to continue if we already have a full set.
6926 if (RangeFromOps
.isFullSet())
6929 ConservativeResult
=
6930 ConservativeResult
.intersectWith(RangeFromOps
, RangeType
);
6931 bool Erased
= PendingPhiRanges
.erase(Phi
);
6932 assert(Erased
&& "Failed to erase Phi properly?");
6937 // vscale can't be equal to zero
6938 if (const auto *II
= dyn_cast
<IntrinsicInst
>(V
))
6939 if (II
->getIntrinsicID() == Intrinsic::vscale
) {
6940 ConstantRange Disallowed
= APInt::getZero(BitWidth
);
6941 ConservativeResult
= ConservativeResult
.difference(Disallowed
);
6944 return setRange(U
, SignHint
, std::move(ConservativeResult
));
6946 case scCouldNotCompute
:
6947 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
6950 return setRange(S
, SignHint
, std::move(ConservativeResult
));
6953 // Given a StartRange, Step and MaxBECount for an expression compute a range of
6954 // values that the expression can take. Initially, the expression has a value
6955 // from StartRange and then is changed by Step up to MaxBECount times. Signed
6956 // argument defines if we treat Step as signed or unsigned.
6957 static ConstantRange
getRangeForAffineARHelper(APInt Step
,
6958 const ConstantRange
&StartRange
,
6959 const APInt
&MaxBECount
,
6961 unsigned BitWidth
= Step
.getBitWidth();
6962 assert(BitWidth
== StartRange
.getBitWidth() &&
6963 BitWidth
== MaxBECount
.getBitWidth() && "mismatched bit widths");
6964 // If either Step or MaxBECount is 0, then the expression won't change, and we
6965 // just need to return the initial range.
6966 if (Step
== 0 || MaxBECount
== 0)
6969 // If we don't know anything about the initial value (i.e. StartRange is
6970 // FullRange), then we don't know anything about the final range either.
6971 // Return FullRange.
6972 if (StartRange
.isFullSet())
6973 return ConstantRange::getFull(BitWidth
);
6975 // If Step is signed and negative, then we use its absolute value, but we also
6976 // note that we're moving in the opposite direction.
6977 bool Descending
= Signed
&& Step
.isNegative();
6980 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this:
6981 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128.
6982 // This equations hold true due to the well-defined wrap-around behavior of
6986 // Check if Offset is more than full span of BitWidth. If it is, the
6987 // expression is guaranteed to overflow.
6988 if (APInt::getMaxValue(StartRange
.getBitWidth()).udiv(Step
).ult(MaxBECount
))
6989 return ConstantRange::getFull(BitWidth
);
6991 // Offset is by how much the expression can change. Checks above guarantee no
6993 APInt Offset
= Step
* MaxBECount
;
6995 // Minimum value of the final range will match the minimal value of StartRange
6996 // if the expression is increasing and will be decreased by Offset otherwise.
6997 // Maximum value of the final range will match the maximal value of StartRange
6998 // if the expression is decreasing and will be increased by Offset otherwise.
6999 APInt StartLower
= StartRange
.getLower();
7000 APInt StartUpper
= StartRange
.getUpper() - 1;
7001 APInt MovedBoundary
= Descending
? (StartLower
- std::move(Offset
))
7002 : (StartUpper
+ std::move(Offset
));
7004 // It's possible that the new minimum/maximum value will fall into the initial
7005 // range (due to wrap around). This means that the expression can take any
7006 // value in this bitwidth, and we have to return full range.
7007 if (StartRange
.contains(MovedBoundary
))
7008 return ConstantRange::getFull(BitWidth
);
7011 Descending
? std::move(MovedBoundary
) : std::move(StartLower
);
7013 Descending
? std::move(StartUpper
) : std::move(MovedBoundary
);
7016 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
7017 return ConstantRange::getNonEmpty(std::move(NewLower
), std::move(NewUpper
));
7020 ConstantRange
ScalarEvolution::getRangeForAffineAR(const SCEV
*Start
,
7022 const APInt
&MaxBECount
) {
7023 assert(getTypeSizeInBits(Start
->getType()) ==
7024 getTypeSizeInBits(Step
->getType()) &&
7025 getTypeSizeInBits(Start
->getType()) == MaxBECount
.getBitWidth() &&
7026 "mismatched bit widths");
7028 // First, consider step signed.
7029 ConstantRange StartSRange
= getSignedRange(Start
);
7030 ConstantRange StepSRange
= getSignedRange(Step
);
7032 // If Step can be both positive and negative, we need to find ranges for the
7033 // maximum absolute step values in both directions and union them.
7034 ConstantRange SR
= getRangeForAffineARHelper(
7035 StepSRange
.getSignedMin(), StartSRange
, MaxBECount
, /* Signed = */ true);
7036 SR
= SR
.unionWith(getRangeForAffineARHelper(StepSRange
.getSignedMax(),
7037 StartSRange
, MaxBECount
,
7038 /* Signed = */ true));
7040 // Next, consider step unsigned.
7041 ConstantRange UR
= getRangeForAffineARHelper(
7042 getUnsignedRangeMax(Step
), getUnsignedRange(Start
), MaxBECount
,
7043 /* Signed = */ false);
7045 // Finally, intersect signed and unsigned ranges.
7046 return SR
.intersectWith(UR
, ConstantRange::Smallest
);
7049 ConstantRange
ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7050 const SCEVAddRecExpr
*AddRec
, const SCEV
*MaxBECount
, unsigned BitWidth
,
7051 ScalarEvolution::RangeSignHint SignHint
) {
7052 assert(AddRec
->isAffine() && "Non-affine AddRecs are not suppored!\n");
7053 assert(AddRec
->hasNoSelfWrap() &&
7054 "This only works for non-self-wrapping AddRecs!");
7055 const bool IsSigned
= SignHint
== HINT_RANGE_SIGNED
;
7056 const SCEV
*Step
= AddRec
->getStepRecurrence(*this);
7057 // Only deal with constant step to save compile time.
7058 if (!isa
<SCEVConstant
>(Step
))
7059 return ConstantRange::getFull(BitWidth
);
7060 // Let's make sure that we can prove that we do not self-wrap during
7061 // MaxBECount iterations. We need this because MaxBECount is a maximum
7062 // iteration count estimate, and we might infer nw from some exit for which we
7063 // do not know max exit count (or any other side reasoning).
7064 // TODO: Turn into assert at some point.
7065 if (getTypeSizeInBits(MaxBECount
->getType()) >
7066 getTypeSizeInBits(AddRec
->getType()))
7067 return ConstantRange::getFull(BitWidth
);
7068 MaxBECount
= getNoopOrZeroExtend(MaxBECount
, AddRec
->getType());
7069 const SCEV
*RangeWidth
= getMinusOne(AddRec
->getType());
7070 const SCEV
*StepAbs
= getUMinExpr(Step
, getNegativeSCEV(Step
));
7071 const SCEV
*MaxItersWithoutWrap
= getUDivExpr(RangeWidth
, StepAbs
);
7072 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE
, MaxBECount
,
7073 MaxItersWithoutWrap
))
7074 return ConstantRange::getFull(BitWidth
);
7076 ICmpInst::Predicate LEPred
=
7077 IsSigned
? ICmpInst::ICMP_SLE
: ICmpInst::ICMP_ULE
;
7078 ICmpInst::Predicate GEPred
=
7079 IsSigned
? ICmpInst::ICMP_SGE
: ICmpInst::ICMP_UGE
;
7080 const SCEV
*End
= AddRec
->evaluateAtIteration(MaxBECount
, *this);
7082 // We know that there is no self-wrap. Let's take Start and End values and
7083 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during
7084 // the iteration. They either lie inside the range [Min(Start, End),
7085 // Max(Start, End)] or outside it:
7087 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax;
7088 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax;
7090 // No self wrap flag guarantees that the intermediate values cannot be BOTH
7091 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that
7092 // knowledge, let's try to prove that we are dealing with Case 1. It is so if
7093 // Start <= End and step is positive, or Start >= End and step is negative.
7094 const SCEV
*Start
= applyLoopGuards(AddRec
->getStart(), AddRec
->getLoop());
7095 ConstantRange StartRange
= getRangeRef(Start
, SignHint
);
7096 ConstantRange EndRange
= getRangeRef(End
, SignHint
);
7097 ConstantRange RangeBetween
= StartRange
.unionWith(EndRange
);
7098 // If they already cover full iteration space, we will know nothing useful
7099 // even if we prove what we want to prove.
7100 if (RangeBetween
.isFullSet())
7101 return RangeBetween
;
7102 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax).
7103 bool IsWrappedSet
= IsSigned
? RangeBetween
.isSignWrappedSet()
7104 : RangeBetween
.isWrappedSet();
7106 return ConstantRange::getFull(BitWidth
);
7108 if (isKnownPositive(Step
) &&
7109 isKnownPredicateViaConstantRanges(LEPred
, Start
, End
))
7110 return RangeBetween
;
7111 if (isKnownNegative(Step
) &&
7112 isKnownPredicateViaConstantRanges(GEPred
, Start
, End
))
7113 return RangeBetween
;
7114 return ConstantRange::getFull(BitWidth
);
7117 ConstantRange
ScalarEvolution::getRangeViaFactoring(const SCEV
*Start
,
7119 const APInt
&MaxBECount
) {
7120 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q})
7121 // == RangeOf({A,+,P}) union RangeOf({B,+,Q})
7123 unsigned BitWidth
= MaxBECount
.getBitWidth();
7124 assert(getTypeSizeInBits(Start
->getType()) == BitWidth
&&
7125 getTypeSizeInBits(Step
->getType()) == BitWidth
&&
7126 "mismatched bit widths");
7128 struct SelectPattern
{
7129 Value
*Condition
= nullptr;
7133 explicit SelectPattern(ScalarEvolution
&SE
, unsigned BitWidth
,
7135 std::optional
<unsigned> CastOp
;
7136 APInt
Offset(BitWidth
, 0);
7138 assert(SE
.getTypeSizeInBits(S
->getType()) == BitWidth
&&
7141 // Peel off a constant offset:
7142 if (auto *SA
= dyn_cast
<SCEVAddExpr
>(S
)) {
7143 // In the future we could consider being smarter here and handle
7144 // {Start+Step,+,Step} too.
7145 if (SA
->getNumOperands() != 2 || !isa
<SCEVConstant
>(SA
->getOperand(0)))
7148 Offset
= cast
<SCEVConstant
>(SA
->getOperand(0))->getAPInt();
7149 S
= SA
->getOperand(1);
7152 // Peel off a cast operation
7153 if (auto *SCast
= dyn_cast
<SCEVIntegralCastExpr
>(S
)) {
7154 CastOp
= SCast
->getSCEVType();
7155 S
= SCast
->getOperand();
7158 using namespace llvm::PatternMatch
;
7160 auto *SU
= dyn_cast
<SCEVUnknown
>(S
);
7161 const APInt
*TrueVal
, *FalseVal
;
7163 !match(SU
->getValue(), m_Select(m_Value(Condition
), m_APInt(TrueVal
),
7164 m_APInt(FalseVal
)))) {
7165 Condition
= nullptr;
7169 TrueValue
= *TrueVal
;
7170 FalseValue
= *FalseVal
;
7172 // Re-apply the cast we peeled off earlier
7176 llvm_unreachable("Unknown SCEV cast type!");
7179 TrueValue
= TrueValue
.trunc(BitWidth
);
7180 FalseValue
= FalseValue
.trunc(BitWidth
);
7183 TrueValue
= TrueValue
.zext(BitWidth
);
7184 FalseValue
= FalseValue
.zext(BitWidth
);
7187 TrueValue
= TrueValue
.sext(BitWidth
);
7188 FalseValue
= FalseValue
.sext(BitWidth
);
7192 // Re-apply the constant offset we peeled off earlier
7193 TrueValue
+= Offset
;
7194 FalseValue
+= Offset
;
7197 bool isRecognized() { return Condition
!= nullptr; }
7200 SelectPattern
StartPattern(*this, BitWidth
, Start
);
7201 if (!StartPattern
.isRecognized())
7202 return ConstantRange::getFull(BitWidth
);
7204 SelectPattern
StepPattern(*this, BitWidth
, Step
);
7205 if (!StepPattern
.isRecognized())
7206 return ConstantRange::getFull(BitWidth
);
7208 if (StartPattern
.Condition
!= StepPattern
.Condition
) {
7209 // We don't handle this case today; but we could, by considering four
7210 // possibilities below instead of two. I'm not sure if there are cases where
7211 // that will help over what getRange already does, though.
7212 return ConstantRange::getFull(BitWidth
);
7215 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to
7216 // construct arbitrary general SCEV expressions here. This function is called
7217 // from deep in the call stack, and calling getSCEV (on a sext instruction,
7218 // say) can end up caching a suboptimal value.
7220 // FIXME: without the explicit `this` receiver below, MSVC errors out with
7221 // C2352 and C2512 (otherwise it isn't needed).
7223 const SCEV
*TrueStart
= this->getConstant(StartPattern
.TrueValue
);
7224 const SCEV
*TrueStep
= this->getConstant(StepPattern
.TrueValue
);
7225 const SCEV
*FalseStart
= this->getConstant(StartPattern
.FalseValue
);
7226 const SCEV
*FalseStep
= this->getConstant(StepPattern
.FalseValue
);
7228 ConstantRange TrueRange
=
7229 this->getRangeForAffineAR(TrueStart
, TrueStep
, MaxBECount
);
7230 ConstantRange FalseRange
=
7231 this->getRangeForAffineAR(FalseStart
, FalseStep
, MaxBECount
);
7233 return TrueRange
.unionWith(FalseRange
);
7236 SCEV::NoWrapFlags
ScalarEvolution::getNoWrapFlagsFromUB(const Value
*V
) {
7237 if (isa
<ConstantExpr
>(V
)) return SCEV::FlagAnyWrap
;
7238 const BinaryOperator
*BinOp
= cast
<BinaryOperator
>(V
);
7240 // Return early if there are no flags to propagate to the SCEV.
7241 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
7242 if (BinOp
->hasNoUnsignedWrap())
7243 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNUW
);
7244 if (BinOp
->hasNoSignedWrap())
7245 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNSW
);
7246 if (Flags
== SCEV::FlagAnyWrap
)
7247 return SCEV::FlagAnyWrap
;
7249 return isSCEVExprNeverPoison(BinOp
) ? Flags
: SCEV::FlagAnyWrap
;
7253 ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV
*S
) {
7254 if (auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(S
))
7255 return &*AddRec
->getLoop()->getHeader()->begin();
7256 if (auto *U
= dyn_cast
<SCEVUnknown
>(S
))
7257 if (auto *I
= dyn_cast
<Instruction
>(U
->getValue()))
7263 ScalarEvolution::getDefiningScopeBound(ArrayRef
<const SCEV
*> Ops
,
7266 // Do a bounded search of the def relation of the requested SCEVs.
7267 SmallSet
<const SCEV
*, 16> Visited
;
7268 SmallVector
<const SCEV
*> Worklist
;
7269 auto pushOp
= [&](const SCEV
*S
) {
7270 if (!Visited
.insert(S
).second
)
7272 // Threshold of 30 here is arbitrary.
7273 if (Visited
.size() > 30) {
7277 Worklist
.push_back(S
);
7280 for (const auto *S
: Ops
)
7283 const Instruction
*Bound
= nullptr;
7284 while (!Worklist
.empty()) {
7285 auto *S
= Worklist
.pop_back_val();
7286 if (auto *DefI
= getNonTrivialDefiningScopeBound(S
)) {
7287 if (!Bound
|| DT
.dominates(Bound
, DefI
))
7290 for (const auto *Op
: S
->operands())
7294 return Bound
? Bound
: &*F
.getEntryBlock().begin();
7298 ScalarEvolution::getDefiningScopeBound(ArrayRef
<const SCEV
*> Ops
) {
7300 return getDefiningScopeBound(Ops
, Discard
);
7303 bool ScalarEvolution::isGuaranteedToTransferExecutionTo(const Instruction
*A
,
7304 const Instruction
*B
) {
7305 if (A
->getParent() == B
->getParent() &&
7306 isGuaranteedToTransferExecutionToSuccessor(A
->getIterator(),
7310 auto *BLoop
= LI
.getLoopFor(B
->getParent());
7311 if (BLoop
&& BLoop
->getHeader() == B
->getParent() &&
7312 BLoop
->getLoopPreheader() == A
->getParent() &&
7313 isGuaranteedToTransferExecutionToSuccessor(A
->getIterator(),
7314 A
->getParent()->end()) &&
7315 isGuaranteedToTransferExecutionToSuccessor(B
->getParent()->begin(),
7321 bool ScalarEvolution::isGuaranteedNotToBePoison(const SCEV
*Op
) {
7322 SCEVPoisonCollector
PC(/* LookThroughMaybePoisonBlocking */ true);
7324 return PC
.MaybePoison
.empty();
7327 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction
*I
) {
7328 // Only proceed if we can prove that I does not yield poison.
7329 if (!programUndefinedIfPoison(I
))
7332 // At this point we know that if I is executed, then it does not wrap
7333 // according to at least one of NSW or NUW. If I is not executed, then we do
7334 // not know if the calculation that I represents would wrap. Multiple
7335 // instructions can map to the same SCEV. If we apply NSW or NUW from I to
7336 // the SCEV, we must guarantee no wrapping for that SCEV also when it is
7337 // derived from other instructions that map to the same SCEV. We cannot make
7338 // that guarantee for cases where I is not executed. So we need to find a
7339 // upper bound on the defining scope for the SCEV, and prove that I is
7340 // executed every time we enter that scope. When the bounding scope is a
7341 // loop (the common case), this is equivalent to proving I executes on every
7342 // iteration of that loop.
7343 SmallVector
<const SCEV
*> SCEVOps
;
7344 for (const Use
&Op
: I
->operands()) {
7345 // I could be an extractvalue from a call to an overflow intrinsic.
7346 // TODO: We can do better here in some cases.
7347 if (isSCEVable(Op
->getType()))
7348 SCEVOps
.push_back(getSCEV(Op
));
7350 auto *DefI
= getDefiningScopeBound(SCEVOps
);
7351 return isGuaranteedToTransferExecutionTo(DefI
, I
);
7354 bool ScalarEvolution::isAddRecNeverPoison(const Instruction
*I
, const Loop
*L
) {
7355 // If we know that \c I can never be poison period, then that's enough.
7356 if (isSCEVExprNeverPoison(I
))
7359 // If the loop only has one exit, then we know that, if the loop is entered,
7360 // any instruction dominating that exit will be executed. If any such
7361 // instruction would result in UB, the addrec cannot be poison.
7363 // This is basically the same reasoning as in isSCEVExprNeverPoison(), but
7364 // also handles uses outside the loop header (they just need to dominate the
7367 auto *ExitingBB
= L
->getExitingBlock();
7368 if (!ExitingBB
|| !loopHasNoAbnormalExits(L
))
7371 SmallPtrSet
<const Value
*, 16> KnownPoison
;
7372 SmallVector
<const Instruction
*, 8> Worklist
;
7374 // We start by assuming \c I, the post-inc add recurrence, is poison. Only
7375 // things that are known to be poison under that assumption go on the
7377 KnownPoison
.insert(I
);
7378 Worklist
.push_back(I
);
7380 while (!Worklist
.empty()) {
7381 const Instruction
*Poison
= Worklist
.pop_back_val();
7383 for (const Use
&U
: Poison
->uses()) {
7384 const Instruction
*PoisonUser
= cast
<Instruction
>(U
.getUser());
7385 if (mustTriggerUB(PoisonUser
, KnownPoison
) &&
7386 DT
.dominates(PoisonUser
->getParent(), ExitingBB
))
7389 if (propagatesPoison(U
) && L
->contains(PoisonUser
))
7390 if (KnownPoison
.insert(PoisonUser
).second
)
7391 Worklist
.push_back(PoisonUser
);
7398 ScalarEvolution::LoopProperties
7399 ScalarEvolution::getLoopProperties(const Loop
*L
) {
7400 using LoopProperties
= ScalarEvolution::LoopProperties
;
7402 auto Itr
= LoopPropertiesCache
.find(L
);
7403 if (Itr
== LoopPropertiesCache
.end()) {
7404 auto HasSideEffects
= [](Instruction
*I
) {
7405 if (auto *SI
= dyn_cast
<StoreInst
>(I
))
7406 return !SI
->isSimple();
7408 return I
->mayThrow() || I
->mayWriteToMemory();
7411 LoopProperties LP
= {/* HasNoAbnormalExits */ true,
7412 /*HasNoSideEffects*/ true};
7414 for (auto *BB
: L
->getBlocks())
7415 for (auto &I
: *BB
) {
7416 if (!isGuaranteedToTransferExecutionToSuccessor(&I
))
7417 LP
.HasNoAbnormalExits
= false;
7418 if (HasSideEffects(&I
))
7419 LP
.HasNoSideEffects
= false;
7420 if (!LP
.HasNoAbnormalExits
&& !LP
.HasNoSideEffects
)
7421 break; // We're already as pessimistic as we can get.
7424 auto InsertPair
= LoopPropertiesCache
.insert({L
, LP
});
7425 assert(InsertPair
.second
&& "We just checked!");
7426 Itr
= InsertPair
.first
;
7432 bool ScalarEvolution::loopIsFiniteByAssumption(const Loop
*L
) {
7433 // A mustprogress loop without side effects must be finite.
7434 // TODO: The check used here is very conservative. It's only *specific*
7435 // side effects which are well defined in infinite loops.
7436 return isFinite(L
) || (isMustProgress(L
) && loopHasNoSideEffects(L
));
7439 const SCEV
*ScalarEvolution::createSCEVIter(Value
*V
) {
7440 // Worklist item with a Value and a bool indicating whether all operands have
7441 // been visited already.
7442 using PointerTy
= PointerIntPair
<Value
*, 1, bool>;
7443 SmallVector
<PointerTy
> Stack
;
7445 Stack
.emplace_back(V
, true);
7446 Stack
.emplace_back(V
, false);
7447 while (!Stack
.empty()) {
7448 auto E
= Stack
.pop_back_val();
7449 Value
*CurV
= E
.getPointer();
7451 if (getExistingSCEV(CurV
))
7454 SmallVector
<Value
*> Ops
;
7455 const SCEV
*CreatedSCEV
= nullptr;
7456 // If all operands have been visited already, create the SCEV.
7458 CreatedSCEV
= createSCEV(CurV
);
7460 // Otherwise get the operands we need to create SCEV's for before creating
7461 // the SCEV for CurV. If the SCEV for CurV can be constructed trivially,
7463 CreatedSCEV
= getOperandsToCreate(CurV
, Ops
);
7467 insertValueToMap(CurV
, CreatedSCEV
);
7469 // Queue CurV for SCEV creation, followed by its's operands which need to
7470 // be constructed first.
7471 Stack
.emplace_back(CurV
, true);
7472 for (Value
*Op
: Ops
)
7473 Stack
.emplace_back(Op
, false);
7477 return getExistingSCEV(V
);
7481 ScalarEvolution::getOperandsToCreate(Value
*V
, SmallVectorImpl
<Value
*> &Ops
) {
7482 if (!isSCEVable(V
->getType()))
7483 return getUnknown(V
);
7485 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
7486 // Don't attempt to analyze instructions in blocks that aren't
7487 // reachable. Such instructions don't matter, and they aren't required
7488 // to obey basic rules for definitions dominating uses which this
7489 // analysis depends on.
7490 if (!DT
.isReachableFromEntry(I
->getParent()))
7491 return getUnknown(PoisonValue::get(V
->getType()));
7492 } else if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
))
7493 return getConstant(CI
);
7494 else if (isa
<GlobalAlias
>(V
))
7495 return getUnknown(V
);
7496 else if (!isa
<ConstantExpr
>(V
))
7497 return getUnknown(V
);
7499 Operator
*U
= cast
<Operator
>(V
);
7501 MatchBinaryOp(U
, getDataLayout(), AC
, DT
, dyn_cast
<Instruction
>(V
))) {
7502 bool IsConstArg
= isa
<ConstantInt
>(BO
->RHS
);
7503 switch (BO
->Opcode
) {
7504 case Instruction::Add
:
7505 case Instruction::Mul
: {
7506 // For additions and multiplications, traverse add/mul chains for which we
7507 // can potentially create a single SCEV, to reduce the number of
7508 // get{Add,Mul}Expr calls.
7511 if (BO
->Op
!= V
&& getExistingSCEV(BO
->Op
)) {
7512 Ops
.push_back(BO
->Op
);
7516 Ops
.push_back(BO
->RHS
);
7517 auto NewBO
= MatchBinaryOp(BO
->LHS
, getDataLayout(), AC
, DT
,
7518 dyn_cast
<Instruction
>(V
));
7520 (BO
->Opcode
== Instruction::Add
&&
7521 (NewBO
->Opcode
!= Instruction::Add
&&
7522 NewBO
->Opcode
!= Instruction::Sub
)) ||
7523 (BO
->Opcode
== Instruction::Mul
&&
7524 NewBO
->Opcode
!= Instruction::Mul
)) {
7525 Ops
.push_back(BO
->LHS
);
7528 // CreateSCEV calls getNoWrapFlagsFromUB, which under certain conditions
7529 // requires a SCEV for the LHS.
7530 if (BO
->Op
&& (BO
->IsNSW
|| BO
->IsNUW
)) {
7531 auto *I
= dyn_cast
<Instruction
>(BO
->Op
);
7532 if (I
&& programUndefinedIfPoison(I
)) {
7533 Ops
.push_back(BO
->LHS
);
7541 case Instruction::Sub
:
7542 case Instruction::UDiv
:
7543 case Instruction::URem
:
7545 case Instruction::AShr
:
7546 case Instruction::Shl
:
7547 case Instruction::Xor
:
7551 case Instruction::And
:
7552 case Instruction::Or
:
7553 if (!IsConstArg
&& !BO
->LHS
->getType()->isIntegerTy(1))
7556 case Instruction::LShr
:
7557 return getUnknown(V
);
7559 llvm_unreachable("Unhandled binop");
7563 Ops
.push_back(BO
->LHS
);
7564 Ops
.push_back(BO
->RHS
);
7568 switch (U
->getOpcode()) {
7569 case Instruction::Trunc
:
7570 case Instruction::ZExt
:
7571 case Instruction::SExt
:
7572 case Instruction::PtrToInt
:
7573 Ops
.push_back(U
->getOperand(0));
7576 case Instruction::BitCast
:
7577 if (isSCEVable(U
->getType()) && isSCEVable(U
->getOperand(0)->getType())) {
7578 Ops
.push_back(U
->getOperand(0));
7581 return getUnknown(V
);
7583 case Instruction::SDiv
:
7584 case Instruction::SRem
:
7585 Ops
.push_back(U
->getOperand(0));
7586 Ops
.push_back(U
->getOperand(1));
7589 case Instruction::GetElementPtr
:
7590 assert(cast
<GEPOperator
>(U
)->getSourceElementType()->isSized() &&
7591 "GEP source element type must be sized");
7592 for (Value
*Index
: U
->operands())
7593 Ops
.push_back(Index
);
7596 case Instruction::IntToPtr
:
7597 return getUnknown(V
);
7599 case Instruction::PHI
:
7600 // Keep constructing SCEVs' for phis recursively for now.
7603 case Instruction::Select
: {
7604 // Check if U is a select that can be simplified to a SCEVUnknown.
7605 auto CanSimplifyToUnknown
= [this, U
]() {
7606 if (U
->getType()->isIntegerTy(1) || isa
<ConstantInt
>(U
->getOperand(0)))
7609 auto *ICI
= dyn_cast
<ICmpInst
>(U
->getOperand(0));
7612 Value
*LHS
= ICI
->getOperand(0);
7613 Value
*RHS
= ICI
->getOperand(1);
7614 if (ICI
->getPredicate() == CmpInst::ICMP_EQ
||
7615 ICI
->getPredicate() == CmpInst::ICMP_NE
) {
7616 if (!(isa
<ConstantInt
>(RHS
) && cast
<ConstantInt
>(RHS
)->isZero()))
7618 } else if (getTypeSizeInBits(LHS
->getType()) >
7619 getTypeSizeInBits(U
->getType()))
7623 if (CanSimplifyToUnknown())
7624 return getUnknown(U
);
7626 for (Value
*Inc
: U
->operands())
7631 case Instruction::Call
:
7632 case Instruction::Invoke
:
7633 if (Value
*RV
= cast
<CallBase
>(U
)->getReturnedArgOperand()) {
7638 if (auto *II
= dyn_cast
<IntrinsicInst
>(U
)) {
7639 switch (II
->getIntrinsicID()) {
7640 case Intrinsic::abs
:
7641 Ops
.push_back(II
->getArgOperand(0));
7643 case Intrinsic::umax
:
7644 case Intrinsic::umin
:
7645 case Intrinsic::smax
:
7646 case Intrinsic::smin
:
7647 case Intrinsic::usub_sat
:
7648 case Intrinsic::uadd_sat
:
7649 Ops
.push_back(II
->getArgOperand(0));
7650 Ops
.push_back(II
->getArgOperand(1));
7652 case Intrinsic::start_loop_iterations
:
7653 case Intrinsic::annotation
:
7654 case Intrinsic::ptr_annotation
:
7655 Ops
.push_back(II
->getArgOperand(0));
7667 const SCEV
*ScalarEvolution::createSCEV(Value
*V
) {
7668 if (!isSCEVable(V
->getType()))
7669 return getUnknown(V
);
7671 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
7672 // Don't attempt to analyze instructions in blocks that aren't
7673 // reachable. Such instructions don't matter, and they aren't required
7674 // to obey basic rules for definitions dominating uses which this
7675 // analysis depends on.
7676 if (!DT
.isReachableFromEntry(I
->getParent()))
7677 return getUnknown(PoisonValue::get(V
->getType()));
7678 } else if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
))
7679 return getConstant(CI
);
7680 else if (isa
<GlobalAlias
>(V
))
7681 return getUnknown(V
);
7682 else if (!isa
<ConstantExpr
>(V
))
7683 return getUnknown(V
);
7688 Operator
*U
= cast
<Operator
>(V
);
7690 MatchBinaryOp(U
, getDataLayout(), AC
, DT
, dyn_cast
<Instruction
>(V
))) {
7691 switch (BO
->Opcode
) {
7692 case Instruction::Add
: {
7693 // The simple thing to do would be to just call getSCEV on both operands
7694 // and call getAddExpr with the result. However if we're looking at a
7695 // bunch of things all added together, this can be quite inefficient,
7696 // because it leads to N-1 getAddExpr calls for N ultimate operands.
7697 // Instead, gather up all the operands and make a single getAddExpr call.
7698 // LLVM IR canonical form means we need only traverse the left operands.
7699 SmallVector
<const SCEV
*, 4> AddOps
;
7702 if (auto *OpSCEV
= getExistingSCEV(BO
->Op
)) {
7703 AddOps
.push_back(OpSCEV
);
7707 // If a NUW or NSW flag can be applied to the SCEV for this
7708 // addition, then compute the SCEV for this addition by itself
7709 // with a separate call to getAddExpr. We need to do that
7710 // instead of pushing the operands of the addition onto AddOps,
7711 // since the flags are only known to apply to this particular
7712 // addition - they may not apply to other additions that can be
7713 // formed with operands from AddOps.
7714 const SCEV
*RHS
= getSCEV(BO
->RHS
);
7715 SCEV::NoWrapFlags Flags
= getNoWrapFlagsFromUB(BO
->Op
);
7716 if (Flags
!= SCEV::FlagAnyWrap
) {
7717 const SCEV
*LHS
= getSCEV(BO
->LHS
);
7718 if (BO
->Opcode
== Instruction::Sub
)
7719 AddOps
.push_back(getMinusSCEV(LHS
, RHS
, Flags
));
7721 AddOps
.push_back(getAddExpr(LHS
, RHS
, Flags
));
7726 if (BO
->Opcode
== Instruction::Sub
)
7727 AddOps
.push_back(getNegativeSCEV(getSCEV(BO
->RHS
)));
7729 AddOps
.push_back(getSCEV(BO
->RHS
));
7731 auto NewBO
= MatchBinaryOp(BO
->LHS
, getDataLayout(), AC
, DT
,
7732 dyn_cast
<Instruction
>(V
));
7733 if (!NewBO
|| (NewBO
->Opcode
!= Instruction::Add
&&
7734 NewBO
->Opcode
!= Instruction::Sub
)) {
7735 AddOps
.push_back(getSCEV(BO
->LHS
));
7741 return getAddExpr(AddOps
);
7744 case Instruction::Mul
: {
7745 SmallVector
<const SCEV
*, 4> MulOps
;
7748 if (auto *OpSCEV
= getExistingSCEV(BO
->Op
)) {
7749 MulOps
.push_back(OpSCEV
);
7753 SCEV::NoWrapFlags Flags
= getNoWrapFlagsFromUB(BO
->Op
);
7754 if (Flags
!= SCEV::FlagAnyWrap
) {
7755 LHS
= getSCEV(BO
->LHS
);
7756 RHS
= getSCEV(BO
->RHS
);
7757 MulOps
.push_back(getMulExpr(LHS
, RHS
, Flags
));
7762 MulOps
.push_back(getSCEV(BO
->RHS
));
7763 auto NewBO
= MatchBinaryOp(BO
->LHS
, getDataLayout(), AC
, DT
,
7764 dyn_cast
<Instruction
>(V
));
7765 if (!NewBO
|| NewBO
->Opcode
!= Instruction::Mul
) {
7766 MulOps
.push_back(getSCEV(BO
->LHS
));
7772 return getMulExpr(MulOps
);
7774 case Instruction::UDiv
:
7775 LHS
= getSCEV(BO
->LHS
);
7776 RHS
= getSCEV(BO
->RHS
);
7777 return getUDivExpr(LHS
, RHS
);
7778 case Instruction::URem
:
7779 LHS
= getSCEV(BO
->LHS
);
7780 RHS
= getSCEV(BO
->RHS
);
7781 return getURemExpr(LHS
, RHS
);
7782 case Instruction::Sub
: {
7783 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
7785 Flags
= getNoWrapFlagsFromUB(BO
->Op
);
7786 LHS
= getSCEV(BO
->LHS
);
7787 RHS
= getSCEV(BO
->RHS
);
7788 return getMinusSCEV(LHS
, RHS
, Flags
);
7790 case Instruction::And
:
7791 // For an expression like x&255 that merely masks off the high bits,
7792 // use zext(trunc(x)) as the SCEV expression.
7793 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
7795 return getSCEV(BO
->RHS
);
7796 if (CI
->isMinusOne())
7797 return getSCEV(BO
->LHS
);
7798 const APInt
&A
= CI
->getValue();
7800 // Instcombine's ShrinkDemandedConstant may strip bits out of
7801 // constants, obscuring what would otherwise be a low-bits mask.
7802 // Use computeKnownBits to compute what ShrinkDemandedConstant
7803 // knew about to reconstruct a low-bits mask value.
7804 unsigned LZ
= A
.countl_zero();
7805 unsigned TZ
= A
.countr_zero();
7806 unsigned BitWidth
= A
.getBitWidth();
7807 KnownBits
Known(BitWidth
);
7808 computeKnownBits(BO
->LHS
, Known
, getDataLayout(),
7809 0, &AC
, nullptr, &DT
);
7811 APInt EffectiveMask
=
7812 APInt::getLowBitsSet(BitWidth
, BitWidth
- LZ
- TZ
).shl(TZ
);
7813 if ((LZ
!= 0 || TZ
!= 0) && !((~A
& ~Known
.Zero
) & EffectiveMask
)) {
7814 const SCEV
*MulCount
= getConstant(APInt::getOneBitSet(BitWidth
, TZ
));
7815 const SCEV
*LHS
= getSCEV(BO
->LHS
);
7816 const SCEV
*ShiftedLHS
= nullptr;
7817 if (auto *LHSMul
= dyn_cast
<SCEVMulExpr
>(LHS
)) {
7818 if (auto *OpC
= dyn_cast
<SCEVConstant
>(LHSMul
->getOperand(0))) {
7819 // For an expression like (x * 8) & 8, simplify the multiply.
7820 unsigned MulZeros
= OpC
->getAPInt().countr_zero();
7821 unsigned GCD
= std::min(MulZeros
, TZ
);
7822 APInt DivAmt
= APInt::getOneBitSet(BitWidth
, TZ
- GCD
);
7823 SmallVector
<const SCEV
*, 4> MulOps
;
7824 MulOps
.push_back(getConstant(OpC
->getAPInt().lshr(GCD
)));
7825 append_range(MulOps
, LHSMul
->operands().drop_front());
7826 auto *NewMul
= getMulExpr(MulOps
, LHSMul
->getNoWrapFlags());
7827 ShiftedLHS
= getUDivExpr(NewMul
, getConstant(DivAmt
));
7831 ShiftedLHS
= getUDivExpr(LHS
, MulCount
);
7834 getTruncateExpr(ShiftedLHS
,
7835 IntegerType::get(getContext(), BitWidth
- LZ
- TZ
)),
7836 BO
->LHS
->getType()),
7840 // Binary `and` is a bit-wise `umin`.
7841 if (BO
->LHS
->getType()->isIntegerTy(1)) {
7842 LHS
= getSCEV(BO
->LHS
);
7843 RHS
= getSCEV(BO
->RHS
);
7844 return getUMinExpr(LHS
, RHS
);
7848 case Instruction::Or
:
7849 // Binary `or` is a bit-wise `umax`.
7850 if (BO
->LHS
->getType()->isIntegerTy(1)) {
7851 LHS
= getSCEV(BO
->LHS
);
7852 RHS
= getSCEV(BO
->RHS
);
7853 return getUMaxExpr(LHS
, RHS
);
7857 case Instruction::Xor
:
7858 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
7859 // If the RHS of xor is -1, then this is a not operation.
7860 if (CI
->isMinusOne())
7861 return getNotSCEV(getSCEV(BO
->LHS
));
7863 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
7864 // This is a variant of the check for xor with -1, and it handles
7865 // the case where instcombine has trimmed non-demanded bits out
7866 // of an xor with -1.
7867 if (auto *LBO
= dyn_cast
<BinaryOperator
>(BO
->LHS
))
7868 if (ConstantInt
*LCI
= dyn_cast
<ConstantInt
>(LBO
->getOperand(1)))
7869 if (LBO
->getOpcode() == Instruction::And
&&
7870 LCI
->getValue() == CI
->getValue())
7871 if (const SCEVZeroExtendExpr
*Z
=
7872 dyn_cast
<SCEVZeroExtendExpr
>(getSCEV(BO
->LHS
))) {
7873 Type
*UTy
= BO
->LHS
->getType();
7874 const SCEV
*Z0
= Z
->getOperand();
7875 Type
*Z0Ty
= Z0
->getType();
7876 unsigned Z0TySize
= getTypeSizeInBits(Z0Ty
);
7878 // If C is a low-bits mask, the zero extend is serving to
7879 // mask off the high bits. Complement the operand and
7880 // re-apply the zext.
7881 if (CI
->getValue().isMask(Z0TySize
))
7882 return getZeroExtendExpr(getNotSCEV(Z0
), UTy
);
7884 // If C is a single bit, it may be in the sign-bit position
7885 // before the zero-extend. In this case, represent the xor
7886 // using an add, which is equivalent, and re-apply the zext.
7887 APInt Trunc
= CI
->getValue().trunc(Z0TySize
);
7888 if (Trunc
.zext(getTypeSizeInBits(UTy
)) == CI
->getValue() &&
7890 return getZeroExtendExpr(getAddExpr(Z0
, getConstant(Trunc
)),
7896 case Instruction::Shl
:
7897 // Turn shift left of a constant amount into a multiply.
7898 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
7899 uint32_t BitWidth
= cast
<IntegerType
>(SA
->getType())->getBitWidth();
7901 // If the shift count is not less than the bitwidth, the result of
7902 // the shift is undefined. Don't try to analyze it, because the
7903 // resolution chosen here may differ from the resolution chosen in
7904 // other parts of the compiler.
7905 if (SA
->getValue().uge(BitWidth
))
7908 // We can safely preserve the nuw flag in all cases. It's also safe to
7909 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation
7910 // requires special handling. It can be preserved as long as we're not
7911 // left shifting by bitwidth - 1.
7912 auto Flags
= SCEV::FlagAnyWrap
;
7914 auto MulFlags
= getNoWrapFlagsFromUB(BO
->Op
);
7915 if ((MulFlags
& SCEV::FlagNSW
) &&
7916 ((MulFlags
& SCEV::FlagNUW
) || SA
->getValue().ult(BitWidth
- 1)))
7917 Flags
= (SCEV::NoWrapFlags
)(Flags
| SCEV::FlagNSW
);
7918 if (MulFlags
& SCEV::FlagNUW
)
7919 Flags
= (SCEV::NoWrapFlags
)(Flags
| SCEV::FlagNUW
);
7922 ConstantInt
*X
= ConstantInt::get(
7923 getContext(), APInt::getOneBitSet(BitWidth
, SA
->getZExtValue()));
7924 return getMulExpr(getSCEV(BO
->LHS
), getConstant(X
), Flags
);
7928 case Instruction::AShr
:
7929 // AShr X, C, where C is a constant.
7930 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
);
7934 Type
*OuterTy
= BO
->LHS
->getType();
7935 uint64_t BitWidth
= getTypeSizeInBits(OuterTy
);
7936 // If the shift count is not less than the bitwidth, the result of
7937 // the shift is undefined. Don't try to analyze it, because the
7938 // resolution chosen here may differ from the resolution chosen in
7939 // other parts of the compiler.
7940 if (CI
->getValue().uge(BitWidth
))
7944 return getSCEV(BO
->LHS
); // shift by zero --> noop
7946 uint64_t AShrAmt
= CI
->getZExtValue();
7947 Type
*TruncTy
= IntegerType::get(getContext(), BitWidth
- AShrAmt
);
7949 Operator
*L
= dyn_cast
<Operator
>(BO
->LHS
);
7950 const SCEV
*AddTruncateExpr
= nullptr;
7951 ConstantInt
*ShlAmtCI
= nullptr;
7952 const SCEV
*AddConstant
= nullptr;
7954 if (L
&& L
->getOpcode() == Instruction::Add
) {
7958 // n, c and m are constants.
7960 Operator
*LShift
= dyn_cast
<Operator
>(L
->getOperand(0));
7961 ConstantInt
*AddOperandCI
= dyn_cast
<ConstantInt
>(L
->getOperand(1));
7962 if (LShift
&& LShift
->getOpcode() == Instruction::Shl
) {
7964 const SCEV
*ShlOp0SCEV
= getSCEV(LShift
->getOperand(0));
7965 ShlAmtCI
= dyn_cast
<ConstantInt
>(LShift
->getOperand(1));
7966 // since we truncate to TruncTy, the AddConstant should be of the
7967 // same type, so create a new Constant with type same as TruncTy.
7968 // Also, the Add constant should be shifted right by AShr amount.
7969 APInt AddOperand
= AddOperandCI
->getValue().ashr(AShrAmt
);
7970 AddConstant
= getConstant(AddOperand
.trunc(BitWidth
- AShrAmt
));
7971 // we model the expression as sext(add(trunc(A), c << n)), since the
7972 // sext(trunc) part is already handled below, we create a
7973 // AddExpr(TruncExp) which will be used later.
7974 AddTruncateExpr
= getTruncateExpr(ShlOp0SCEV
, TruncTy
);
7977 } else if (L
&& L
->getOpcode() == Instruction::Shl
) {
7980 // Both n and m are constant.
7982 const SCEV
*ShlOp0SCEV
= getSCEV(L
->getOperand(0));
7983 ShlAmtCI
= dyn_cast
<ConstantInt
>(L
->getOperand(1));
7984 AddTruncateExpr
= getTruncateExpr(ShlOp0SCEV
, TruncTy
);
7987 if (AddTruncateExpr
&& ShlAmtCI
) {
7988 // We can merge the two given cases into a single SCEV statement,
7989 // incase n = m, the mul expression will be 2^0, so it gets resolved to
7990 // a simpler case. The following code handles the two cases:
7992 // 1) For a two-shift sext-inreg, i.e. n = m,
7993 // use sext(trunc(x)) as the SCEV expression.
7995 // 2) When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV
7996 // expression. We already checked that ShlAmt < BitWidth, so
7997 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as
7998 // ShlAmt - AShrAmt < Amt.
7999 const APInt
&ShlAmt
= ShlAmtCI
->getValue();
8000 if (ShlAmt
.ult(BitWidth
) && ShlAmt
.uge(AShrAmt
)) {
8001 APInt Mul
= APInt::getOneBitSet(BitWidth
- AShrAmt
,
8002 ShlAmtCI
->getZExtValue() - AShrAmt
);
8003 const SCEV
*CompositeExpr
=
8004 getMulExpr(AddTruncateExpr
, getConstant(Mul
));
8005 if (L
->getOpcode() != Instruction::Shl
)
8006 CompositeExpr
= getAddExpr(CompositeExpr
, AddConstant
);
8008 return getSignExtendExpr(CompositeExpr
, OuterTy
);
8015 switch (U
->getOpcode()) {
8016 case Instruction::Trunc
:
8017 return getTruncateExpr(getSCEV(U
->getOperand(0)), U
->getType());
8019 case Instruction::ZExt
:
8020 return getZeroExtendExpr(getSCEV(U
->getOperand(0)), U
->getType());
8022 case Instruction::SExt
:
8023 if (auto BO
= MatchBinaryOp(U
->getOperand(0), getDataLayout(), AC
, DT
,
8024 dyn_cast
<Instruction
>(V
))) {
8025 // The NSW flag of a subtract does not always survive the conversion to
8026 // A + (-1)*B. By pushing sign extension onto its operands we are much
8027 // more likely to preserve NSW and allow later AddRec optimisations.
8029 // NOTE: This is effectively duplicating this logic from getSignExtend:
8030 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
8031 // but by that point the NSW information has potentially been lost.
8032 if (BO
->Opcode
== Instruction::Sub
&& BO
->IsNSW
) {
8033 Type
*Ty
= U
->getType();
8034 auto *V1
= getSignExtendExpr(getSCEV(BO
->LHS
), Ty
);
8035 auto *V2
= getSignExtendExpr(getSCEV(BO
->RHS
), Ty
);
8036 return getMinusSCEV(V1
, V2
, SCEV::FlagNSW
);
8039 return getSignExtendExpr(getSCEV(U
->getOperand(0)), U
->getType());
8041 case Instruction::BitCast
:
8042 // BitCasts are no-op casts so we just eliminate the cast.
8043 if (isSCEVable(U
->getType()) && isSCEVable(U
->getOperand(0)->getType()))
8044 return getSCEV(U
->getOperand(0));
8047 case Instruction::PtrToInt
: {
8048 // Pointer to integer cast is straight-forward, so do model it.
8049 const SCEV
*Op
= getSCEV(U
->getOperand(0));
8050 Type
*DstIntTy
= U
->getType();
8051 // But only if effective SCEV (integer) type is wide enough to represent
8052 // all possible pointer values.
8053 const SCEV
*IntOp
= getPtrToIntExpr(Op
, DstIntTy
);
8054 if (isa
<SCEVCouldNotCompute
>(IntOp
))
8055 return getUnknown(V
);
8058 case Instruction::IntToPtr
:
8059 // Just don't deal with inttoptr casts.
8060 return getUnknown(V
);
8062 case Instruction::SDiv
:
8063 // If both operands are non-negative, this is just an udiv.
8064 if (isKnownNonNegative(getSCEV(U
->getOperand(0))) &&
8065 isKnownNonNegative(getSCEV(U
->getOperand(1))))
8066 return getUDivExpr(getSCEV(U
->getOperand(0)), getSCEV(U
->getOperand(1)));
8069 case Instruction::SRem
:
8070 // If both operands are non-negative, this is just an urem.
8071 if (isKnownNonNegative(getSCEV(U
->getOperand(0))) &&
8072 isKnownNonNegative(getSCEV(U
->getOperand(1))))
8073 return getURemExpr(getSCEV(U
->getOperand(0)), getSCEV(U
->getOperand(1)));
8076 case Instruction::GetElementPtr
:
8077 return createNodeForGEP(cast
<GEPOperator
>(U
));
8079 case Instruction::PHI
:
8080 return createNodeForPHI(cast
<PHINode
>(U
));
8082 case Instruction::Select
:
8083 return createNodeForSelectOrPHI(U
, U
->getOperand(0), U
->getOperand(1),
8086 case Instruction::Call
:
8087 case Instruction::Invoke
:
8088 if (Value
*RV
= cast
<CallBase
>(U
)->getReturnedArgOperand())
8091 if (auto *II
= dyn_cast
<IntrinsicInst
>(U
)) {
8092 switch (II
->getIntrinsicID()) {
8093 case Intrinsic::abs
:
8095 getSCEV(II
->getArgOperand(0)),
8096 /*IsNSW=*/cast
<ConstantInt
>(II
->getArgOperand(1))->isOne());
8097 case Intrinsic::umax
:
8098 LHS
= getSCEV(II
->getArgOperand(0));
8099 RHS
= getSCEV(II
->getArgOperand(1));
8100 return getUMaxExpr(LHS
, RHS
);
8101 case Intrinsic::umin
:
8102 LHS
= getSCEV(II
->getArgOperand(0));
8103 RHS
= getSCEV(II
->getArgOperand(1));
8104 return getUMinExpr(LHS
, RHS
);
8105 case Intrinsic::smax
:
8106 LHS
= getSCEV(II
->getArgOperand(0));
8107 RHS
= getSCEV(II
->getArgOperand(1));
8108 return getSMaxExpr(LHS
, RHS
);
8109 case Intrinsic::smin
:
8110 LHS
= getSCEV(II
->getArgOperand(0));
8111 RHS
= getSCEV(II
->getArgOperand(1));
8112 return getSMinExpr(LHS
, RHS
);
8113 case Intrinsic::usub_sat
: {
8114 const SCEV
*X
= getSCEV(II
->getArgOperand(0));
8115 const SCEV
*Y
= getSCEV(II
->getArgOperand(1));
8116 const SCEV
*ClampedY
= getUMinExpr(X
, Y
);
8117 return getMinusSCEV(X
, ClampedY
, SCEV::FlagNUW
);
8119 case Intrinsic::uadd_sat
: {
8120 const SCEV
*X
= getSCEV(II
->getArgOperand(0));
8121 const SCEV
*Y
= getSCEV(II
->getArgOperand(1));
8122 const SCEV
*ClampedX
= getUMinExpr(X
, getNotSCEV(Y
));
8123 return getAddExpr(ClampedX
, Y
, SCEV::FlagNUW
);
8125 case Intrinsic::start_loop_iterations
:
8126 case Intrinsic::annotation
:
8127 case Intrinsic::ptr_annotation
:
8128 // A start_loop_iterations or llvm.annotation or llvm.prt.annotation is
8129 // just eqivalent to the first operand for SCEV purposes.
8130 return getSCEV(II
->getArgOperand(0));
8131 case Intrinsic::vscale
:
8132 return getVScale(II
->getType());
8140 return getUnknown(V
);
8143 //===----------------------------------------------------------------------===//
8144 // Iteration Count Computation Code
8147 const SCEV
*ScalarEvolution::getTripCountFromExitCount(const SCEV
*ExitCount
) {
8148 if (isa
<SCEVCouldNotCompute
>(ExitCount
))
8149 return getCouldNotCompute();
8151 auto *ExitCountType
= ExitCount
->getType();
8152 assert(ExitCountType
->isIntegerTy());
8153 auto *EvalTy
= Type::getIntNTy(ExitCountType
->getContext(),
8154 1 + ExitCountType
->getScalarSizeInBits());
8155 return getTripCountFromExitCount(ExitCount
, EvalTy
, nullptr);
8158 const SCEV
*ScalarEvolution::getTripCountFromExitCount(const SCEV
*ExitCount
,
8161 if (isa
<SCEVCouldNotCompute
>(ExitCount
))
8162 return getCouldNotCompute();
8164 unsigned ExitCountSize
= getTypeSizeInBits(ExitCount
->getType());
8165 unsigned EvalSize
= EvalTy
->getPrimitiveSizeInBits();
8167 auto CanAddOneWithoutOverflow
= [&]() {
8168 ConstantRange ExitCountRange
=
8169 getRangeRef(ExitCount
, RangeSignHint::HINT_RANGE_UNSIGNED
);
8170 if (!ExitCountRange
.contains(APInt::getMaxValue(ExitCountSize
)))
8173 return L
&& isLoopEntryGuardedByCond(L
, ICmpInst::ICMP_NE
, ExitCount
,
8174 getMinusOne(ExitCount
->getType()));
8177 // If we need to zero extend the backedge count, check if we can add one to
8178 // it prior to zero extending without overflow. Provided this is safe, it
8179 // allows better simplification of the +1.
8180 if (EvalSize
> ExitCountSize
&& CanAddOneWithoutOverflow())
8181 return getZeroExtendExpr(
8182 getAddExpr(ExitCount
, getOne(ExitCount
->getType())), EvalTy
);
8184 // Get the total trip count from the count by adding 1. This may wrap.
8185 return getAddExpr(getTruncateOrZeroExtend(ExitCount
, EvalTy
), getOne(EvalTy
));
8188 static unsigned getConstantTripCount(const SCEVConstant
*ExitCount
) {
8192 ConstantInt
*ExitConst
= ExitCount
->getValue();
8194 // Guard against huge trip counts.
8195 if (ExitConst
->getValue().getActiveBits() > 32)
8198 // In case of integer overflow, this returns 0, which is correct.
8199 return ((unsigned)ExitConst
->getZExtValue()) + 1;
8202 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop
*L
) {
8203 auto *ExitCount
= dyn_cast
<SCEVConstant
>(getBackedgeTakenCount(L
, Exact
));
8204 return getConstantTripCount(ExitCount
);
8208 ScalarEvolution::getSmallConstantTripCount(const Loop
*L
,
8209 const BasicBlock
*ExitingBlock
) {
8210 assert(ExitingBlock
&& "Must pass a non-null exiting block!");
8211 assert(L
->isLoopExiting(ExitingBlock
) &&
8212 "Exiting block must actually branch out of the loop!");
8213 const SCEVConstant
*ExitCount
=
8214 dyn_cast
<SCEVConstant
>(getExitCount(L
, ExitingBlock
));
8215 return getConstantTripCount(ExitCount
);
8218 unsigned ScalarEvolution::getSmallConstantMaxTripCount(
8219 const Loop
*L
, SmallVectorImpl
<const SCEVPredicate
*> *Predicates
) {
8221 const auto *MaxExitCount
=
8222 Predicates
? getPredicatedConstantMaxBackedgeTakenCount(L
, *Predicates
)
8223 : getConstantMaxBackedgeTakenCount(L
);
8224 return getConstantTripCount(dyn_cast
<SCEVConstant
>(MaxExitCount
));
8227 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop
*L
) {
8228 SmallVector
<BasicBlock
*, 8> ExitingBlocks
;
8229 L
->getExitingBlocks(ExitingBlocks
);
8231 std::optional
<unsigned> Res
;
8232 for (auto *ExitingBB
: ExitingBlocks
) {
8233 unsigned Multiple
= getSmallConstantTripMultiple(L
, ExitingBB
);
8236 Res
= (unsigned)std::gcd(*Res
, Multiple
);
8238 return Res
.value_or(1);
8241 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop
*L
,
8242 const SCEV
*ExitCount
) {
8243 if (ExitCount
== getCouldNotCompute())
8246 // Get the trip count
8247 const SCEV
*TCExpr
= getTripCountFromExitCount(applyLoopGuards(ExitCount
, L
));
8249 APInt Multiple
= getNonZeroConstantMultiple(TCExpr
);
8250 // If a trip multiple is huge (>=2^32), the trip count is still divisible by
8251 // the greatest power of 2 divisor less than 2^32.
8252 return Multiple
.getActiveBits() > 32
8253 ? 1U << std::min((unsigned)31, Multiple
.countTrailingZeros())
8254 : (unsigned)Multiple
.zextOrTrunc(32).getZExtValue();
8257 /// Returns the largest constant divisor of the trip count of this loop as a
8258 /// normal unsigned value, if possible. This means that the actual trip count is
8259 /// always a multiple of the returned value (don't forget the trip count could
8260 /// very well be zero as well!).
8262 /// Returns 1 if the trip count is unknown or not guaranteed to be the
8263 /// multiple of a constant (which is also the case if the trip count is simply
8264 /// constant, use getSmallConstantTripCount for that case), Will also return 1
8265 /// if the trip count is very large (>= 2^32).
8267 /// As explained in the comments for getSmallConstantTripCount, this assumes
8268 /// that control exits the loop via ExitingBlock.
8270 ScalarEvolution::getSmallConstantTripMultiple(const Loop
*L
,
8271 const BasicBlock
*ExitingBlock
) {
8272 assert(ExitingBlock
&& "Must pass a non-null exiting block!");
8273 assert(L
->isLoopExiting(ExitingBlock
) &&
8274 "Exiting block must actually branch out of the loop!");
8275 const SCEV
*ExitCount
= getExitCount(L
, ExitingBlock
);
8276 return getSmallConstantTripMultiple(L
, ExitCount
);
8279 const SCEV
*ScalarEvolution::getExitCount(const Loop
*L
,
8280 const BasicBlock
*ExitingBlock
,
8281 ExitCountKind Kind
) {
8284 return getBackedgeTakenInfo(L
).getExact(ExitingBlock
, this);
8285 case SymbolicMaximum
:
8286 return getBackedgeTakenInfo(L
).getSymbolicMax(ExitingBlock
, this);
8287 case ConstantMaximum
:
8288 return getBackedgeTakenInfo(L
).getConstantMax(ExitingBlock
, this);
8290 llvm_unreachable("Invalid ExitCountKind!");
8293 const SCEV
*ScalarEvolution::getPredicatedExitCount(
8294 const Loop
*L
, const BasicBlock
*ExitingBlock
,
8295 SmallVectorImpl
<const SCEVPredicate
*> *Predicates
, ExitCountKind Kind
) {
8298 return getPredicatedBackedgeTakenInfo(L
).getExact(ExitingBlock
, this,
8300 case SymbolicMaximum
:
8301 return getPredicatedBackedgeTakenInfo(L
).getSymbolicMax(ExitingBlock
, this,
8303 case ConstantMaximum
:
8304 return getPredicatedBackedgeTakenInfo(L
).getConstantMax(ExitingBlock
, this,
8307 llvm_unreachable("Invalid ExitCountKind!");
8310 const SCEV
*ScalarEvolution::getPredicatedBackedgeTakenCount(
8311 const Loop
*L
, SmallVectorImpl
<const SCEVPredicate
*> &Preds
) {
8312 return getPredicatedBackedgeTakenInfo(L
).getExact(L
, this, &Preds
);
8315 const SCEV
*ScalarEvolution::getBackedgeTakenCount(const Loop
*L
,
8316 ExitCountKind Kind
) {
8319 return getBackedgeTakenInfo(L
).getExact(L
, this);
8320 case ConstantMaximum
:
8321 return getBackedgeTakenInfo(L
).getConstantMax(this);
8322 case SymbolicMaximum
:
8323 return getBackedgeTakenInfo(L
).getSymbolicMax(L
, this);
8325 llvm_unreachable("Invalid ExitCountKind!");
8328 const SCEV
*ScalarEvolution::getPredicatedSymbolicMaxBackedgeTakenCount(
8329 const Loop
*L
, SmallVectorImpl
<const SCEVPredicate
*> &Preds
) {
8330 return getPredicatedBackedgeTakenInfo(L
).getSymbolicMax(L
, this, &Preds
);
8333 const SCEV
*ScalarEvolution::getPredicatedConstantMaxBackedgeTakenCount(
8334 const Loop
*L
, SmallVectorImpl
<const SCEVPredicate
*> &Preds
) {
8335 return getPredicatedBackedgeTakenInfo(L
).getConstantMax(this, &Preds
);
8338 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop
*L
) {
8339 return getBackedgeTakenInfo(L
).isConstantMaxOrZero(this);
8342 /// Push PHI nodes in the header of the given loop onto the given Worklist.
8343 static void PushLoopPHIs(const Loop
*L
,
8344 SmallVectorImpl
<Instruction
*> &Worklist
,
8345 SmallPtrSetImpl
<Instruction
*> &Visited
) {
8346 BasicBlock
*Header
= L
->getHeader();
8348 // Push all Loop-header PHIs onto the Worklist stack.
8349 for (PHINode
&PN
: Header
->phis())
8350 if (Visited
.insert(&PN
).second
)
8351 Worklist
.push_back(&PN
);
8354 ScalarEvolution::BackedgeTakenInfo
&
8355 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop
*L
) {
8356 auto &BTI
= getBackedgeTakenInfo(L
);
8357 if (BTI
.hasFullInfo())
8360 auto Pair
= PredicatedBackedgeTakenCounts
.insert({L
, BackedgeTakenInfo()});
8363 return Pair
.first
->second
;
8365 BackedgeTakenInfo Result
=
8366 computeBackedgeTakenCount(L
, /*AllowPredicates=*/true);
8368 return PredicatedBackedgeTakenCounts
.find(L
)->second
= std::move(Result
);
8371 ScalarEvolution::BackedgeTakenInfo
&
8372 ScalarEvolution::getBackedgeTakenInfo(const Loop
*L
) {
8373 // Initially insert an invalid entry for this loop. If the insertion
8374 // succeeds, proceed to actually compute a backedge-taken count and
8375 // update the value. The temporary CouldNotCompute value tells SCEV
8376 // code elsewhere that it shouldn't attempt to request a new
8377 // backedge-taken count, which could result in infinite recursion.
8378 std::pair
<DenseMap
<const Loop
*, BackedgeTakenInfo
>::iterator
, bool> Pair
=
8379 BackedgeTakenCounts
.insert({L
, BackedgeTakenInfo()});
8381 return Pair
.first
->second
;
8383 // computeBackedgeTakenCount may allocate memory for its result. Inserting it
8384 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
8385 // must be cleared in this scope.
8386 BackedgeTakenInfo Result
= computeBackedgeTakenCount(L
);
8388 // Now that we know more about the trip count for this loop, forget any
8389 // existing SCEV values for PHI nodes in this loop since they are only
8390 // conservative estimates made without the benefit of trip count
8391 // information. This invalidation is not necessary for correctness, and is
8392 // only done to produce more precise results.
8393 if (Result
.hasAnyInfo()) {
8394 // Invalidate any expression using an addrec in this loop.
8395 SmallVector
<const SCEV
*, 8> ToForget
;
8396 auto LoopUsersIt
= LoopUsers
.find(L
);
8397 if (LoopUsersIt
!= LoopUsers
.end())
8398 append_range(ToForget
, LoopUsersIt
->second
);
8399 forgetMemoizedResults(ToForget
);
8401 // Invalidate constant-evolved loop header phis.
8402 for (PHINode
&PN
: L
->getHeader()->phis())
8403 ConstantEvolutionLoopExitValue
.erase(&PN
);
8406 // Re-lookup the insert position, since the call to
8407 // computeBackedgeTakenCount above could result in a
8408 // recusive call to getBackedgeTakenInfo (on a different
8409 // loop), which would invalidate the iterator computed
8411 return BackedgeTakenCounts
.find(L
)->second
= std::move(Result
);
8414 void ScalarEvolution::forgetAllLoops() {
8415 // This method is intended to forget all info about loops. It should
8416 // invalidate caches as if the following happened:
8417 // - The trip counts of all loops have changed arbitrarily
8418 // - Every llvm::Value has been updated in place to produce a different
8420 BackedgeTakenCounts
.clear();
8421 PredicatedBackedgeTakenCounts
.clear();
8422 BECountUsers
.clear();
8423 LoopPropertiesCache
.clear();
8424 ConstantEvolutionLoopExitValue
.clear();
8425 ValueExprMap
.clear();
8426 ValuesAtScopes
.clear();
8427 ValuesAtScopesUsers
.clear();
8428 LoopDispositions
.clear();
8429 BlockDispositions
.clear();
8430 UnsignedRanges
.clear();
8431 SignedRanges
.clear();
8432 ExprValueMap
.clear();
8434 ConstantMultipleCache
.clear();
8435 PredicatedSCEVRewrites
.clear();
8437 FoldCacheUser
.clear();
8439 void ScalarEvolution::visitAndClearUsers(
8440 SmallVectorImpl
<Instruction
*> &Worklist
,
8441 SmallPtrSetImpl
<Instruction
*> &Visited
,
8442 SmallVectorImpl
<const SCEV
*> &ToForget
) {
8443 while (!Worklist
.empty()) {
8444 Instruction
*I
= Worklist
.pop_back_val();
8445 if (!isSCEVable(I
->getType()) && !isa
<WithOverflowInst
>(I
))
8448 ValueExprMapType::iterator It
=
8449 ValueExprMap
.find_as(static_cast<Value
*>(I
));
8450 if (It
!= ValueExprMap
.end()) {
8451 eraseValueFromMap(It
->first
);
8452 ToForget
.push_back(It
->second
);
8453 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
8454 ConstantEvolutionLoopExitValue
.erase(PN
);
8457 PushDefUseChildren(I
, Worklist
, Visited
);
8461 void ScalarEvolution::forgetLoop(const Loop
*L
) {
8462 SmallVector
<const Loop
*, 16> LoopWorklist(1, L
);
8463 SmallVector
<Instruction
*, 32> Worklist
;
8464 SmallPtrSet
<Instruction
*, 16> Visited
;
8465 SmallVector
<const SCEV
*, 16> ToForget
;
8467 // Iterate over all the loops and sub-loops to drop SCEV information.
8468 while (!LoopWorklist
.empty()) {
8469 auto *CurrL
= LoopWorklist
.pop_back_val();
8471 // Drop any stored trip count value.
8472 forgetBackedgeTakenCounts(CurrL
, /* Predicated */ false);
8473 forgetBackedgeTakenCounts(CurrL
, /* Predicated */ true);
8475 // Drop information about predicated SCEV rewrites for this loop.
8476 for (auto I
= PredicatedSCEVRewrites
.begin();
8477 I
!= PredicatedSCEVRewrites
.end();) {
8478 std::pair
<const SCEV
*, const Loop
*> Entry
= I
->first
;
8479 if (Entry
.second
== CurrL
)
8480 PredicatedSCEVRewrites
.erase(I
++);
8485 auto LoopUsersItr
= LoopUsers
.find(CurrL
);
8486 if (LoopUsersItr
!= LoopUsers
.end()) {
8487 ToForget
.insert(ToForget
.end(), LoopUsersItr
->second
.begin(),
8488 LoopUsersItr
->second
.end());
8491 // Drop information about expressions based on loop-header PHIs.
8492 PushLoopPHIs(CurrL
, Worklist
, Visited
);
8493 visitAndClearUsers(Worklist
, Visited
, ToForget
);
8495 LoopPropertiesCache
.erase(CurrL
);
8496 // Forget all contained loops too, to avoid dangling entries in the
8497 // ValuesAtScopes map.
8498 LoopWorklist
.append(CurrL
->begin(), CurrL
->end());
8500 forgetMemoizedResults(ToForget
);
8503 void ScalarEvolution::forgetTopmostLoop(const Loop
*L
) {
8504 forgetLoop(L
->getOutermostLoop());
8507 void ScalarEvolution::forgetValue(Value
*V
) {
8508 Instruction
*I
= dyn_cast
<Instruction
>(V
);
8511 // Drop information about expressions based on loop-header PHIs.
8512 SmallVector
<Instruction
*, 16> Worklist
;
8513 SmallPtrSet
<Instruction
*, 8> Visited
;
8514 SmallVector
<const SCEV
*, 8> ToForget
;
8515 Worklist
.push_back(I
);
8517 visitAndClearUsers(Worklist
, Visited
, ToForget
);
8519 forgetMemoizedResults(ToForget
);
8522 void ScalarEvolution::forgetLcssaPhiWithNewPredecessor(Loop
*L
, PHINode
*V
) {
8523 if (!isSCEVable(V
->getType()))
8526 // If SCEV looked through a trivial LCSSA phi node, we might have SCEV's
8527 // directly using a SCEVUnknown/SCEVAddRec defined in the loop. After an
8528 // extra predecessor is added, this is no longer valid. Find all Unknowns and
8529 // AddRecs defined in the loop and invalidate any SCEV's making use of them.
8530 if (const SCEV
*S
= getExistingSCEV(V
)) {
8531 struct InvalidationRootCollector
{
8533 SmallVector
<const SCEV
*, 8> Roots
;
8535 InvalidationRootCollector(Loop
*L
) : L(L
) {}
8537 bool follow(const SCEV
*S
) {
8538 if (auto *SU
= dyn_cast
<SCEVUnknown
>(S
)) {
8539 if (auto *I
= dyn_cast
<Instruction
>(SU
->getValue()))
8542 } else if (auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
8543 if (L
->contains(AddRec
->getLoop()))
8548 bool isDone() const { return false; }
8551 InvalidationRootCollector
C(L
);
8553 forgetMemoizedResults(C
.Roots
);
8556 // Also perform the normal invalidation.
8560 void ScalarEvolution::forgetLoopDispositions() { LoopDispositions
.clear(); }
8562 void ScalarEvolution::forgetBlockAndLoopDispositions(Value
*V
) {
8563 // Unless a specific value is passed to invalidation, completely clear both
8566 BlockDispositions
.clear();
8567 LoopDispositions
.clear();
8571 if (!isSCEVable(V
->getType()))
8574 const SCEV
*S
= getExistingSCEV(V
);
8578 // Invalidate the block and loop dispositions cached for S. Dispositions of
8579 // S's users may change if S's disposition changes (i.e. a user may change to
8580 // loop-invariant, if S changes to loop invariant), so also invalidate
8581 // dispositions of S's users recursively.
8582 SmallVector
<const SCEV
*, 8> Worklist
= {S
};
8583 SmallPtrSet
<const SCEV
*, 8> Seen
= {S
};
8584 while (!Worklist
.empty()) {
8585 const SCEV
*Curr
= Worklist
.pop_back_val();
8586 bool LoopDispoRemoved
= LoopDispositions
.erase(Curr
);
8587 bool BlockDispoRemoved
= BlockDispositions
.erase(Curr
);
8588 if (!LoopDispoRemoved
&& !BlockDispoRemoved
)
8590 auto Users
= SCEVUsers
.find(Curr
);
8591 if (Users
!= SCEVUsers
.end())
8592 for (const auto *User
: Users
->second
)
8593 if (Seen
.insert(User
).second
)
8594 Worklist
.push_back(User
);
8598 /// Get the exact loop backedge taken count considering all loop exits. A
8599 /// computable result can only be returned for loops with all exiting blocks
8600 /// dominating the latch. howFarToZero assumes that the limit of each loop test
8601 /// is never skipped. This is a valid assumption as long as the loop exits via
8602 /// that test. For precise results, it is the caller's responsibility to specify
8603 /// the relevant loop exiting block using getExact(ExitingBlock, SE).
8604 const SCEV
*ScalarEvolution::BackedgeTakenInfo::getExact(
8605 const Loop
*L
, ScalarEvolution
*SE
,
8606 SmallVectorImpl
<const SCEVPredicate
*> *Preds
) const {
8607 // If any exits were not computable, the loop is not computable.
8608 if (!isComplete() || ExitNotTaken
.empty())
8609 return SE
->getCouldNotCompute();
8611 const BasicBlock
*Latch
= L
->getLoopLatch();
8612 // All exiting blocks we have collected must dominate the only backedge.
8614 return SE
->getCouldNotCompute();
8616 // All exiting blocks we have gathered dominate loop's latch, so exact trip
8617 // count is simply a minimum out of all these calculated exit counts.
8618 SmallVector
<const SCEV
*, 2> Ops
;
8619 for (const auto &ENT
: ExitNotTaken
) {
8620 const SCEV
*BECount
= ENT
.ExactNotTaken
;
8621 assert(BECount
!= SE
->getCouldNotCompute() && "Bad exit SCEV!");
8622 assert(SE
->DT
.dominates(ENT
.ExitingBlock
, Latch
) &&
8623 "We should only have known counts for exiting blocks that dominate "
8626 Ops
.push_back(BECount
);
8629 append_range(*Preds
, ENT
.Predicates
);
8631 assert((Preds
|| ENT
.hasAlwaysTruePredicate()) &&
8632 "Predicate should be always true!");
8635 // If an earlier exit exits on the first iteration (exit count zero), then
8636 // a later poison exit count should not propagate into the result. This are
8637 // exactly the semantics provided by umin_seq.
8638 return SE
->getUMinFromMismatchedTypes(Ops
, /* Sequential */ true);
8641 const ScalarEvolution::ExitNotTakenInfo
*
8642 ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
8643 const BasicBlock
*ExitingBlock
,
8644 SmallVectorImpl
<const SCEVPredicate
*> *Predicates
) const {
8645 for (const auto &ENT
: ExitNotTaken
)
8646 if (ENT
.ExitingBlock
== ExitingBlock
) {
8647 if (ENT
.hasAlwaysTruePredicate())
8649 else if (Predicates
) {
8650 append_range(*Predicates
, ENT
.Predicates
);
8658 /// getConstantMax - Get the constant max backedge taken count for the loop.
8659 const SCEV
*ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8660 ScalarEvolution
*SE
,
8661 SmallVectorImpl
<const SCEVPredicate
*> *Predicates
) const {
8662 if (!getConstantMax())
8663 return SE
->getCouldNotCompute();
8665 for (const auto &ENT
: ExitNotTaken
)
8666 if (!ENT
.hasAlwaysTruePredicate()) {
8668 return SE
->getCouldNotCompute();
8669 append_range(*Predicates
, ENT
.Predicates
);
8672 assert((isa
<SCEVCouldNotCompute
>(getConstantMax()) ||
8673 isa
<SCEVConstant
>(getConstantMax())) &&
8674 "No point in having a non-constant max backedge taken count!");
8675 return getConstantMax();
8678 const SCEV
*ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
8679 const Loop
*L
, ScalarEvolution
*SE
,
8680 SmallVectorImpl
<const SCEVPredicate
*> *Predicates
) {
8682 // Form an expression for the maximum exit count possible for this loop. We
8683 // merge the max and exact information to approximate a version of
8684 // getConstantMaxBackedgeTakenCount which isn't restricted to just
8686 SmallVector
<const SCEV
*, 4> ExitCounts
;
8688 for (const auto &ENT
: ExitNotTaken
) {
8689 const SCEV
*ExitCount
= ENT
.SymbolicMaxNotTaken
;
8690 if (!isa
<SCEVCouldNotCompute
>(ExitCount
)) {
8691 assert(SE
->DT
.dominates(ENT
.ExitingBlock
, L
->getLoopLatch()) &&
8692 "We should only have known counts for exiting blocks that "
8694 ExitCounts
.push_back(ExitCount
);
8696 append_range(*Predicates
, ENT
.Predicates
);
8698 assert((Predicates
|| ENT
.hasAlwaysTruePredicate()) &&
8699 "Predicate should be always true!");
8702 if (ExitCounts
.empty())
8703 SymbolicMax
= SE
->getCouldNotCompute();
8706 SE
->getUMinFromMismatchedTypes(ExitCounts
, /*Sequential*/ true);
8711 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
8712 ScalarEvolution
*SE
) const {
8713 auto PredicateNotAlwaysTrue
= [](const ExitNotTakenInfo
&ENT
) {
8714 return !ENT
.hasAlwaysTruePredicate();
8716 return MaxOrZero
&& !any_of(ExitNotTaken
, PredicateNotAlwaysTrue
);
8719 ScalarEvolution::ExitLimit::ExitLimit(const SCEV
*E
)
8720 : ExitLimit(E
, E
, E
, false) {}
8722 ScalarEvolution::ExitLimit::ExitLimit(
8723 const SCEV
*E
, const SCEV
*ConstantMaxNotTaken
,
8724 const SCEV
*SymbolicMaxNotTaken
, bool MaxOrZero
,
8725 ArrayRef
<ArrayRef
<const SCEVPredicate
*>> PredLists
)
8726 : ExactNotTaken(E
), ConstantMaxNotTaken(ConstantMaxNotTaken
),
8727 SymbolicMaxNotTaken(SymbolicMaxNotTaken
), MaxOrZero(MaxOrZero
) {
8728 // If we prove the max count is zero, so is the symbolic bound. This happens
8729 // in practice due to differences in a) how context sensitive we've chosen
8730 // to be and b) how we reason about bounds implied by UB.
8731 if (ConstantMaxNotTaken
->isZero()) {
8732 this->ExactNotTaken
= E
= ConstantMaxNotTaken
;
8733 this->SymbolicMaxNotTaken
= SymbolicMaxNotTaken
= ConstantMaxNotTaken
;
8736 assert((isa
<SCEVCouldNotCompute
>(ExactNotTaken
) ||
8737 !isa
<SCEVCouldNotCompute
>(ConstantMaxNotTaken
)) &&
8738 "Exact is not allowed to be less precise than Constant Max");
8739 assert((isa
<SCEVCouldNotCompute
>(ExactNotTaken
) ||
8740 !isa
<SCEVCouldNotCompute
>(SymbolicMaxNotTaken
)) &&
8741 "Exact is not allowed to be less precise than Symbolic Max");
8742 assert((isa
<SCEVCouldNotCompute
>(SymbolicMaxNotTaken
) ||
8743 !isa
<SCEVCouldNotCompute
>(ConstantMaxNotTaken
)) &&
8744 "Symbolic Max is not allowed to be less precise than Constant Max");
8745 assert((isa
<SCEVCouldNotCompute
>(ConstantMaxNotTaken
) ||
8746 isa
<SCEVConstant
>(ConstantMaxNotTaken
)) &&
8747 "No point in having a non-constant max backedge taken count!");
8748 SmallPtrSet
<const SCEVPredicate
*, 4> SeenPreds
;
8749 for (const auto PredList
: PredLists
)
8750 for (const auto *P
: PredList
) {
8751 if (SeenPreds
.contains(P
))
8753 assert(!isa
<SCEVUnionPredicate
>(P
) && "Only add leaf predicates here!");
8754 SeenPreds
.insert(P
);
8755 Predicates
.push_back(P
);
8757 assert((isa
<SCEVCouldNotCompute
>(E
) || !E
->getType()->isPointerTy()) &&
8758 "Backedge count should be int");
8759 assert((isa
<SCEVCouldNotCompute
>(ConstantMaxNotTaken
) ||
8760 !ConstantMaxNotTaken
->getType()->isPointerTy()) &&
8761 "Max backedge count should be int");
8764 ScalarEvolution::ExitLimit::ExitLimit(const SCEV
*E
,
8765 const SCEV
*ConstantMaxNotTaken
,
8766 const SCEV
*SymbolicMaxNotTaken
,
8768 ArrayRef
<const SCEVPredicate
*> PredList
)
8769 : ExitLimit(E
, ConstantMaxNotTaken
, SymbolicMaxNotTaken
, MaxOrZero
,
8770 ArrayRef({PredList
})) {}
8772 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
8773 /// computable exit into a persistent ExitNotTakenInfo array.
8774 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
8775 ArrayRef
<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo
> ExitCounts
,
8776 bool IsComplete
, const SCEV
*ConstantMax
, bool MaxOrZero
)
8777 : ConstantMax(ConstantMax
), IsComplete(IsComplete
), MaxOrZero(MaxOrZero
) {
8778 using EdgeExitInfo
= ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo
;
8780 ExitNotTaken
.reserve(ExitCounts
.size());
8781 std::transform(ExitCounts
.begin(), ExitCounts
.end(),
8782 std::back_inserter(ExitNotTaken
),
8783 [&](const EdgeExitInfo
&EEI
) {
8784 BasicBlock
*ExitBB
= EEI
.first
;
8785 const ExitLimit
&EL
= EEI
.second
;
8786 return ExitNotTakenInfo(ExitBB
, EL
.ExactNotTaken
,
8787 EL
.ConstantMaxNotTaken
, EL
.SymbolicMaxNotTaken
,
8790 assert((isa
<SCEVCouldNotCompute
>(ConstantMax
) ||
8791 isa
<SCEVConstant
>(ConstantMax
)) &&
8792 "No point in having a non-constant max backedge taken count!");
8795 /// Compute the number of times the backedge of the specified loop will execute.
8796 ScalarEvolution::BackedgeTakenInfo
8797 ScalarEvolution::computeBackedgeTakenCount(const Loop
*L
,
8798 bool AllowPredicates
) {
8799 SmallVector
<BasicBlock
*, 8> ExitingBlocks
;
8800 L
->getExitingBlocks(ExitingBlocks
);
8802 using EdgeExitInfo
= ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo
;
8804 SmallVector
<EdgeExitInfo
, 4> ExitCounts
;
8805 bool CouldComputeBECount
= true;
8806 BasicBlock
*Latch
= L
->getLoopLatch(); // may be NULL.
8807 const SCEV
*MustExitMaxBECount
= nullptr;
8808 const SCEV
*MayExitMaxBECount
= nullptr;
8809 bool MustExitMaxOrZero
= false;
8810 bool IsOnlyExit
= ExitingBlocks
.size() == 1;
8812 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
8813 // and compute maxBECount.
8814 // Do a union of all the predicates here.
8815 for (BasicBlock
*ExitBB
: ExitingBlocks
) {
8816 // We canonicalize untaken exits to br (constant), ignore them so that
8817 // proving an exit untaken doesn't negatively impact our ability to reason
8818 // about the loop as whole.
8819 if (auto *BI
= dyn_cast
<BranchInst
>(ExitBB
->getTerminator()))
8820 if (auto *CI
= dyn_cast
<ConstantInt
>(BI
->getCondition())) {
8821 bool ExitIfTrue
= !L
->contains(BI
->getSuccessor(0));
8822 if (ExitIfTrue
== CI
->isZero())
8826 ExitLimit EL
= computeExitLimit(L
, ExitBB
, IsOnlyExit
, AllowPredicates
);
8828 assert((AllowPredicates
|| EL
.Predicates
.empty()) &&
8829 "Predicated exit limit when predicates are not allowed!");
8831 // 1. For each exit that can be computed, add an entry to ExitCounts.
8832 // CouldComputeBECount is true only if all exits can be computed.
8833 if (EL
.ExactNotTaken
!= getCouldNotCompute())
8834 ++NumExitCountsComputed
;
8836 // We couldn't compute an exact value for this exit, so
8837 // we won't be able to compute an exact value for the loop.
8838 CouldComputeBECount
= false;
8839 // Remember exit count if either exact or symbolic is known. Because
8840 // Exact always implies symbolic, only check symbolic.
8841 if (EL
.SymbolicMaxNotTaken
!= getCouldNotCompute())
8842 ExitCounts
.emplace_back(ExitBB
, EL
);
8844 assert(EL
.ExactNotTaken
== getCouldNotCompute() &&
8845 "Exact is known but symbolic isn't?");
8846 ++NumExitCountsNotComputed
;
8849 // 2. Derive the loop's MaxBECount from each exit's max number of
8850 // non-exiting iterations. Partition the loop exits into two kinds:
8851 // LoopMustExits and LoopMayExits.
8853 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
8854 // is a LoopMayExit. If any computable LoopMustExit is found, then
8855 // MaxBECount is the minimum EL.ConstantMaxNotTaken of computable
8856 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum
8857 // EL.ConstantMaxNotTaken, where CouldNotCompute is considered greater than
8859 // computable EL.ConstantMaxNotTaken.
8860 if (EL
.ConstantMaxNotTaken
!= getCouldNotCompute() && Latch
&&
8861 DT
.dominates(ExitBB
, Latch
)) {
8862 if (!MustExitMaxBECount
) {
8863 MustExitMaxBECount
= EL
.ConstantMaxNotTaken
;
8864 MustExitMaxOrZero
= EL
.MaxOrZero
;
8866 MustExitMaxBECount
= getUMinFromMismatchedTypes(MustExitMaxBECount
,
8867 EL
.ConstantMaxNotTaken
);
8869 } else if (MayExitMaxBECount
!= getCouldNotCompute()) {
8870 if (!MayExitMaxBECount
|| EL
.ConstantMaxNotTaken
== getCouldNotCompute())
8871 MayExitMaxBECount
= EL
.ConstantMaxNotTaken
;
8873 MayExitMaxBECount
= getUMaxFromMismatchedTypes(MayExitMaxBECount
,
8874 EL
.ConstantMaxNotTaken
);
8878 const SCEV
*MaxBECount
= MustExitMaxBECount
? MustExitMaxBECount
:
8879 (MayExitMaxBECount
? MayExitMaxBECount
: getCouldNotCompute());
8880 // The loop backedge will be taken the maximum or zero times if there's
8881 // a single exit that must be taken the maximum or zero times.
8882 bool MaxOrZero
= (MustExitMaxOrZero
&& ExitingBlocks
.size() == 1);
8884 // Remember which SCEVs are used in exit limits for invalidation purposes.
8885 // We only care about non-constant SCEVs here, so we can ignore
8886 // EL.ConstantMaxNotTaken
8887 // and MaxBECount, which must be SCEVConstant.
8888 for (const auto &Pair
: ExitCounts
) {
8889 if (!isa
<SCEVConstant
>(Pair
.second
.ExactNotTaken
))
8890 BECountUsers
[Pair
.second
.ExactNotTaken
].insert({L
, AllowPredicates
});
8891 if (!isa
<SCEVConstant
>(Pair
.second
.SymbolicMaxNotTaken
))
8892 BECountUsers
[Pair
.second
.SymbolicMaxNotTaken
].insert(
8893 {L
, AllowPredicates
});
8895 return BackedgeTakenInfo(std::move(ExitCounts
), CouldComputeBECount
,
8896 MaxBECount
, MaxOrZero
);
8899 ScalarEvolution::ExitLimit
8900 ScalarEvolution::computeExitLimit(const Loop
*L
, BasicBlock
*ExitingBlock
,
8901 bool IsOnlyExit
, bool AllowPredicates
) {
8902 assert(L
->contains(ExitingBlock
) && "Exit count for non-loop block?");
8903 // If our exiting block does not dominate the latch, then its connection with
8904 // loop's exit limit may be far from trivial.
8905 const BasicBlock
*Latch
= L
->getLoopLatch();
8906 if (!Latch
|| !DT
.dominates(ExitingBlock
, Latch
))
8907 return getCouldNotCompute();
8909 Instruction
*Term
= ExitingBlock
->getTerminator();
8910 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(Term
)) {
8911 assert(BI
->isConditional() && "If unconditional, it can't be in loop!");
8912 bool ExitIfTrue
= !L
->contains(BI
->getSuccessor(0));
8913 assert(ExitIfTrue
== L
->contains(BI
->getSuccessor(1)) &&
8914 "It should have one successor in loop and one exit block!");
8915 // Proceed to the next level to examine the exit condition expression.
8916 return computeExitLimitFromCond(L
, BI
->getCondition(), ExitIfTrue
,
8917 /*ControlsOnlyExit=*/IsOnlyExit
,
8921 if (SwitchInst
*SI
= dyn_cast
<SwitchInst
>(Term
)) {
8922 // For switch, make sure that there is a single exit from the loop.
8923 BasicBlock
*Exit
= nullptr;
8924 for (auto *SBB
: successors(ExitingBlock
))
8925 if (!L
->contains(SBB
)) {
8926 if (Exit
) // Multiple exit successors.
8927 return getCouldNotCompute();
8930 assert(Exit
&& "Exiting block must have at least one exit");
8931 return computeExitLimitFromSingleExitSwitch(
8932 L
, SI
, Exit
, /*ControlsOnlyExit=*/IsOnlyExit
);
8935 return getCouldNotCompute();
8938 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromCond(
8939 const Loop
*L
, Value
*ExitCond
, bool ExitIfTrue
, bool ControlsOnlyExit
,
8940 bool AllowPredicates
) {
8941 ScalarEvolution::ExitLimitCacheTy
Cache(L
, ExitIfTrue
, AllowPredicates
);
8942 return computeExitLimitFromCondCached(Cache
, L
, ExitCond
, ExitIfTrue
,
8943 ControlsOnlyExit
, AllowPredicates
);
8946 std::optional
<ScalarEvolution::ExitLimit
>
8947 ScalarEvolution::ExitLimitCache::find(const Loop
*L
, Value
*ExitCond
,
8948 bool ExitIfTrue
, bool ControlsOnlyExit
,
8949 bool AllowPredicates
) {
8951 (void)this->ExitIfTrue
;
8952 (void)this->AllowPredicates
;
8954 assert(this->L
== L
&& this->ExitIfTrue
== ExitIfTrue
&&
8955 this->AllowPredicates
== AllowPredicates
&&
8956 "Variance in assumed invariant key components!");
8957 auto Itr
= TripCountMap
.find({ExitCond
, ControlsOnlyExit
});
8958 if (Itr
== TripCountMap
.end())
8959 return std::nullopt
;
8963 void ScalarEvolution::ExitLimitCache::insert(const Loop
*L
, Value
*ExitCond
,
8965 bool ControlsOnlyExit
,
8966 bool AllowPredicates
,
8967 const ExitLimit
&EL
) {
8968 assert(this->L
== L
&& this->ExitIfTrue
== ExitIfTrue
&&
8969 this->AllowPredicates
== AllowPredicates
&&
8970 "Variance in assumed invariant key components!");
8972 auto InsertResult
= TripCountMap
.insert({{ExitCond
, ControlsOnlyExit
}, EL
});
8973 assert(InsertResult
.second
&& "Expected successful insertion!");
8978 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromCondCached(
8979 ExitLimitCacheTy
&Cache
, const Loop
*L
, Value
*ExitCond
, bool ExitIfTrue
,
8980 bool ControlsOnlyExit
, bool AllowPredicates
) {
8982 if (auto MaybeEL
= Cache
.find(L
, ExitCond
, ExitIfTrue
, ControlsOnlyExit
,
8986 ExitLimit EL
= computeExitLimitFromCondImpl(
8987 Cache
, L
, ExitCond
, ExitIfTrue
, ControlsOnlyExit
, AllowPredicates
);
8988 Cache
.insert(L
, ExitCond
, ExitIfTrue
, ControlsOnlyExit
, AllowPredicates
, EL
);
8992 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromCondImpl(
8993 ExitLimitCacheTy
&Cache
, const Loop
*L
, Value
*ExitCond
, bool ExitIfTrue
,
8994 bool ControlsOnlyExit
, bool AllowPredicates
) {
8995 // Handle BinOp conditions (And, Or).
8996 if (auto LimitFromBinOp
= computeExitLimitFromCondFromBinOp(
8997 Cache
, L
, ExitCond
, ExitIfTrue
, ControlsOnlyExit
, AllowPredicates
))
8998 return *LimitFromBinOp
;
9000 // With an icmp, it may be feasible to compute an exact backedge-taken count.
9001 // Proceed to the next level to examine the icmp.
9002 if (ICmpInst
*ExitCondICmp
= dyn_cast
<ICmpInst
>(ExitCond
)) {
9004 computeExitLimitFromICmp(L
, ExitCondICmp
, ExitIfTrue
, ControlsOnlyExit
);
9005 if (EL
.hasFullInfo() || !AllowPredicates
)
9008 // Try again, but use SCEV predicates this time.
9009 return computeExitLimitFromICmp(L
, ExitCondICmp
, ExitIfTrue
,
9011 /*AllowPredicates=*/true);
9014 // Check for a constant condition. These are normally stripped out by
9015 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
9016 // preserve the CFG and is temporarily leaving constant conditions
9018 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(ExitCond
)) {
9019 if (ExitIfTrue
== !CI
->getZExtValue())
9020 // The backedge is always taken.
9021 return getCouldNotCompute();
9022 // The backedge is never taken.
9023 return getZero(CI
->getType());
9026 // If we're exiting based on the overflow flag of an x.with.overflow intrinsic
9027 // with a constant step, we can form an equivalent icmp predicate and figure
9028 // out how many iterations will be taken before we exit.
9029 const WithOverflowInst
*WO
;
9031 if (match(ExitCond
, m_ExtractValue
<1>(m_WithOverflowInst(WO
))) &&
9032 match(WO
->getRHS(), m_APInt(C
))) {
9034 ConstantRange::makeExactNoWrapRegion(WO
->getBinaryOp(), *C
,
9035 WO
->getNoWrapKind());
9036 CmpInst::Predicate Pred
;
9037 APInt NewRHSC
, Offset
;
9038 NWR
.getEquivalentICmp(Pred
, NewRHSC
, Offset
);
9040 Pred
= ICmpInst::getInversePredicate(Pred
);
9041 auto *LHS
= getSCEV(WO
->getLHS());
9043 LHS
= getAddExpr(LHS
, getConstant(Offset
));
9044 auto EL
= computeExitLimitFromICmp(L
, Pred
, LHS
, getConstant(NewRHSC
),
9045 ControlsOnlyExit
, AllowPredicates
);
9046 if (EL
.hasAnyInfo())
9050 // If it's not an integer or pointer comparison then compute it the hard way.
9051 return computeExitCountExhaustively(L
, ExitCond
, ExitIfTrue
);
9054 std::optional
<ScalarEvolution::ExitLimit
>
9055 ScalarEvolution::computeExitLimitFromCondFromBinOp(
9056 ExitLimitCacheTy
&Cache
, const Loop
*L
, Value
*ExitCond
, bool ExitIfTrue
,
9057 bool ControlsOnlyExit
, bool AllowPredicates
) {
9058 // Check if the controlling expression for this loop is an And or Or.
9061 if (match(ExitCond
, m_LogicalAnd(m_Value(Op0
), m_Value(Op1
))))
9063 else if (match(ExitCond
, m_LogicalOr(m_Value(Op0
), m_Value(Op1
))))
9066 return std::nullopt
;
9068 // EitherMayExit is true in these two cases:
9069 // br (and Op0 Op1), loop, exit
9070 // br (or Op0 Op1), exit, loop
9071 bool EitherMayExit
= IsAnd
^ ExitIfTrue
;
9072 ExitLimit EL0
= computeExitLimitFromCondCached(
9073 Cache
, L
, Op0
, ExitIfTrue
, ControlsOnlyExit
&& !EitherMayExit
,
9075 ExitLimit EL1
= computeExitLimitFromCondCached(
9076 Cache
, L
, Op1
, ExitIfTrue
, ControlsOnlyExit
&& !EitherMayExit
,
9079 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement"
9080 const Constant
*NeutralElement
= ConstantInt::get(ExitCond
->getType(), IsAnd
);
9081 if (isa
<ConstantInt
>(Op1
))
9082 return Op1
== NeutralElement
? EL0
: EL1
;
9083 if (isa
<ConstantInt
>(Op0
))
9084 return Op0
== NeutralElement
? EL1
: EL0
;
9086 const SCEV
*BECount
= getCouldNotCompute();
9087 const SCEV
*ConstantMaxBECount
= getCouldNotCompute();
9088 const SCEV
*SymbolicMaxBECount
= getCouldNotCompute();
9089 if (EitherMayExit
) {
9090 bool UseSequentialUMin
= !isa
<BinaryOperator
>(ExitCond
);
9091 // Both conditions must be same for the loop to continue executing.
9092 // Choose the less conservative count.
9093 if (EL0
.ExactNotTaken
!= getCouldNotCompute() &&
9094 EL1
.ExactNotTaken
!= getCouldNotCompute()) {
9095 BECount
= getUMinFromMismatchedTypes(EL0
.ExactNotTaken
, EL1
.ExactNotTaken
,
9098 if (EL0
.ConstantMaxNotTaken
== getCouldNotCompute())
9099 ConstantMaxBECount
= EL1
.ConstantMaxNotTaken
;
9100 else if (EL1
.ConstantMaxNotTaken
== getCouldNotCompute())
9101 ConstantMaxBECount
= EL0
.ConstantMaxNotTaken
;
9103 ConstantMaxBECount
= getUMinFromMismatchedTypes(EL0
.ConstantMaxNotTaken
,
9104 EL1
.ConstantMaxNotTaken
);
9105 if (EL0
.SymbolicMaxNotTaken
== getCouldNotCompute())
9106 SymbolicMaxBECount
= EL1
.SymbolicMaxNotTaken
;
9107 else if (EL1
.SymbolicMaxNotTaken
== getCouldNotCompute())
9108 SymbolicMaxBECount
= EL0
.SymbolicMaxNotTaken
;
9110 SymbolicMaxBECount
= getUMinFromMismatchedTypes(
9111 EL0
.SymbolicMaxNotTaken
, EL1
.SymbolicMaxNotTaken
, UseSequentialUMin
);
9113 // Both conditions must be same at the same time for the loop to exit.
9114 // For now, be conservative.
9115 if (EL0
.ExactNotTaken
== EL1
.ExactNotTaken
)
9116 BECount
= EL0
.ExactNotTaken
;
9119 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
9120 // to be more aggressive when computing BECount than when computing
9121 // ConstantMaxBECount. In these cases it is possible for EL0.ExactNotTaken
9123 // EL1.ExactNotTaken to match, but for EL0.ConstantMaxNotTaken and
9124 // EL1.ConstantMaxNotTaken to not.
9125 if (isa
<SCEVCouldNotCompute
>(ConstantMaxBECount
) &&
9126 !isa
<SCEVCouldNotCompute
>(BECount
))
9127 ConstantMaxBECount
= getConstant(getUnsignedRangeMax(BECount
));
9128 if (isa
<SCEVCouldNotCompute
>(SymbolicMaxBECount
))
9129 SymbolicMaxBECount
=
9130 isa
<SCEVCouldNotCompute
>(BECount
) ? ConstantMaxBECount
: BECount
;
9131 return ExitLimit(BECount
, ConstantMaxBECount
, SymbolicMaxBECount
, false,
9132 {ArrayRef(EL0
.Predicates
), ArrayRef(EL1
.Predicates
)});
9135 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromICmp(
9136 const Loop
*L
, ICmpInst
*ExitCond
, bool ExitIfTrue
, bool ControlsOnlyExit
,
9137 bool AllowPredicates
) {
9138 // If the condition was exit on true, convert the condition to exit on false
9139 ICmpInst::Predicate Pred
;
9141 Pred
= ExitCond
->getPredicate();
9143 Pred
= ExitCond
->getInversePredicate();
9144 const ICmpInst::Predicate OriginalPred
= Pred
;
9146 const SCEV
*LHS
= getSCEV(ExitCond
->getOperand(0));
9147 const SCEV
*RHS
= getSCEV(ExitCond
->getOperand(1));
9149 ExitLimit EL
= computeExitLimitFromICmp(L
, Pred
, LHS
, RHS
, ControlsOnlyExit
,
9151 if (EL
.hasAnyInfo())
9154 auto *ExhaustiveCount
=
9155 computeExitCountExhaustively(L
, ExitCond
, ExitIfTrue
);
9157 if (!isa
<SCEVCouldNotCompute
>(ExhaustiveCount
))
9158 return ExhaustiveCount
;
9160 return computeShiftCompareExitLimit(ExitCond
->getOperand(0),
9161 ExitCond
->getOperand(1), L
, OriginalPred
);
9163 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromICmp(
9164 const Loop
*L
, ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
,
9165 bool ControlsOnlyExit
, bool AllowPredicates
) {
9167 // Try to evaluate any dependencies out of the loop.
9168 LHS
= getSCEVAtScope(LHS
, L
);
9169 RHS
= getSCEVAtScope(RHS
, L
);
9171 // At this point, we would like to compute how many iterations of the
9172 // loop the predicate will return true for these inputs.
9173 if (isLoopInvariant(LHS
, L
) && !isLoopInvariant(RHS
, L
)) {
9174 // If there is a loop-invariant, force it into the RHS.
9175 std::swap(LHS
, RHS
);
9176 Pred
= ICmpInst::getSwappedPredicate(Pred
);
9179 bool ControllingFiniteLoop
= ControlsOnlyExit
&& loopHasNoAbnormalExits(L
) &&
9180 loopIsFiniteByAssumption(L
);
9181 // Simplify the operands before analyzing them.
9182 (void)SimplifyICmpOperands(Pred
, LHS
, RHS
, /*Depth=*/0);
9184 // If we have a comparison of a chrec against a constant, try to use value
9185 // ranges to answer this query.
9186 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
))
9187 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(LHS
))
9188 if (AddRec
->getLoop() == L
) {
9189 // Form the constant range.
9190 ConstantRange CompRange
=
9191 ConstantRange::makeExactICmpRegion(Pred
, RHSC
->getAPInt());
9193 const SCEV
*Ret
= AddRec
->getNumIterationsInRange(CompRange
, *this);
9194 if (!isa
<SCEVCouldNotCompute
>(Ret
)) return Ret
;
9197 // If this loop must exit based on this condition (or execute undefined
9198 // behaviour), see if we can improve wrap flags. This is essentially
9199 // a must execute style proof.
9200 if (ControllingFiniteLoop
&& isLoopInvariant(RHS
, L
)) {
9201 // If we can prove the test sequence produced must repeat the same values
9202 // on self-wrap of the IV, then we can infer that IV doesn't self wrap
9203 // because if it did, we'd have an infinite (undefined) loop.
9204 // TODO: We can peel off any functions which are invertible *in L*. Loop
9205 // invariant terms are effectively constants for our purposes here.
9206 auto *InnerLHS
= LHS
;
9207 if (auto *ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(LHS
))
9208 InnerLHS
= ZExt
->getOperand();
9209 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(InnerLHS
);
9210 AR
&& !AR
->hasNoSelfWrap() && AR
->getLoop() == L
&& AR
->isAffine() &&
9211 isKnownToBeAPowerOfTwo(AR
->getStepRecurrence(*this), /*OrZero=*/true,
9212 /*OrNegative=*/true)) {
9213 auto Flags
= AR
->getNoWrapFlags();
9214 Flags
= setFlags(Flags
, SCEV::FlagNW
);
9215 SmallVector
<const SCEV
*> Operands
{AR
->operands()};
9216 Flags
= StrengthenNoWrapFlags(this, scAddRecExpr
, Operands
, Flags
);
9217 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
), Flags
);
9220 // For a slt/ult condition with a positive step, can we prove nsw/nuw?
9221 // From no-self-wrap, this follows trivially from the fact that every
9222 // (un)signed-wrapped, but not self-wrapped value must be LT than the
9223 // last value before (un)signed wrap. Since we know that last value
9224 // didn't exit, nor will any smaller one.
9225 if (Pred
== ICmpInst::ICMP_SLT
|| Pred
== ICmpInst::ICMP_ULT
) {
9226 auto WrapType
= Pred
== ICmpInst::ICMP_SLT
? SCEV::FlagNSW
: SCEV::FlagNUW
;
9227 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
9228 AR
&& AR
->getLoop() == L
&& AR
->isAffine() &&
9229 !AR
->getNoWrapFlags(WrapType
) && AR
->hasNoSelfWrap() &&
9230 isKnownPositive(AR
->getStepRecurrence(*this))) {
9231 auto Flags
= AR
->getNoWrapFlags();
9232 Flags
= setFlags(Flags
, WrapType
);
9233 SmallVector
<const SCEV
*> Operands
{AR
->operands()};
9234 Flags
= StrengthenNoWrapFlags(this, scAddRecExpr
, Operands
, Flags
);
9235 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
), Flags
);
9241 case ICmpInst::ICMP_NE
: { // while (X != Y)
9242 // Convert to: while (X-Y != 0)
9243 if (LHS
->getType()->isPointerTy()) {
9244 LHS
= getLosslessPtrToIntExpr(LHS
);
9245 if (isa
<SCEVCouldNotCompute
>(LHS
))
9248 if (RHS
->getType()->isPointerTy()) {
9249 RHS
= getLosslessPtrToIntExpr(RHS
);
9250 if (isa
<SCEVCouldNotCompute
>(RHS
))
9253 ExitLimit EL
= howFarToZero(getMinusSCEV(LHS
, RHS
), L
, ControlsOnlyExit
,
9255 if (EL
.hasAnyInfo())
9259 case ICmpInst::ICMP_EQ
: { // while (X == Y)
9260 // Convert to: while (X-Y == 0)
9261 if (LHS
->getType()->isPointerTy()) {
9262 LHS
= getLosslessPtrToIntExpr(LHS
);
9263 if (isa
<SCEVCouldNotCompute
>(LHS
))
9266 if (RHS
->getType()->isPointerTy()) {
9267 RHS
= getLosslessPtrToIntExpr(RHS
);
9268 if (isa
<SCEVCouldNotCompute
>(RHS
))
9271 ExitLimit EL
= howFarToNonZero(getMinusSCEV(LHS
, RHS
), L
);
9272 if (EL
.hasAnyInfo()) return EL
;
9275 case ICmpInst::ICMP_SLE
:
9276 case ICmpInst::ICMP_ULE
:
9277 // Since the loop is finite, an invariant RHS cannot include the boundary
9278 // value, otherwise it would loop forever.
9279 if (!EnableFiniteLoopControl
|| !ControllingFiniteLoop
||
9280 !isLoopInvariant(RHS
, L
)) {
9281 // Otherwise, perform the addition in a wider type, to avoid overflow.
9282 // If the LHS is an addrec with the appropriate nowrap flag, the
9283 // extension will be sunk into it and the exit count can be analyzed.
9284 auto *OldType
= dyn_cast
<IntegerType
>(LHS
->getType());
9287 // Prefer doubling the bitwidth over adding a single bit to make it more
9288 // likely that we use a legal type.
9290 Type::getIntNTy(OldType
->getContext(), OldType
->getBitWidth() * 2);
9291 if (ICmpInst::isSigned(Pred
)) {
9292 LHS
= getSignExtendExpr(LHS
, NewType
);
9293 RHS
= getSignExtendExpr(RHS
, NewType
);
9295 LHS
= getZeroExtendExpr(LHS
, NewType
);
9296 RHS
= getZeroExtendExpr(RHS
, NewType
);
9299 RHS
= getAddExpr(getOne(RHS
->getType()), RHS
);
9301 case ICmpInst::ICMP_SLT
:
9302 case ICmpInst::ICMP_ULT
: { // while (X < Y)
9303 bool IsSigned
= ICmpInst::isSigned(Pred
);
9304 ExitLimit EL
= howManyLessThans(LHS
, RHS
, L
, IsSigned
, ControlsOnlyExit
,
9306 if (EL
.hasAnyInfo())
9310 case ICmpInst::ICMP_SGE
:
9311 case ICmpInst::ICMP_UGE
:
9312 // Since the loop is finite, an invariant RHS cannot include the boundary
9313 // value, otherwise it would loop forever.
9314 if (!EnableFiniteLoopControl
|| !ControllingFiniteLoop
||
9315 !isLoopInvariant(RHS
, L
))
9317 RHS
= getAddExpr(getMinusOne(RHS
->getType()), RHS
);
9319 case ICmpInst::ICMP_SGT
:
9320 case ICmpInst::ICMP_UGT
: { // while (X > Y)
9321 bool IsSigned
= ICmpInst::isSigned(Pred
);
9322 ExitLimit EL
= howManyGreaterThans(LHS
, RHS
, L
, IsSigned
, ControlsOnlyExit
,
9324 if (EL
.hasAnyInfo())
9332 return getCouldNotCompute();
9335 ScalarEvolution::ExitLimit
9336 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop
*L
,
9338 BasicBlock
*ExitingBlock
,
9339 bool ControlsOnlyExit
) {
9340 assert(!L
->contains(ExitingBlock
) && "Not an exiting block!");
9342 // Give up if the exit is the default dest of a switch.
9343 if (Switch
->getDefaultDest() == ExitingBlock
)
9344 return getCouldNotCompute();
9346 assert(L
->contains(Switch
->getDefaultDest()) &&
9347 "Default case must not exit the loop!");
9348 const SCEV
*LHS
= getSCEVAtScope(Switch
->getCondition(), L
);
9349 const SCEV
*RHS
= getConstant(Switch
->findCaseDest(ExitingBlock
));
9351 // while (X != Y) --> while (X-Y != 0)
9352 ExitLimit EL
= howFarToZero(getMinusSCEV(LHS
, RHS
), L
, ControlsOnlyExit
);
9353 if (EL
.hasAnyInfo())
9356 return getCouldNotCompute();
9359 static ConstantInt
*
9360 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr
*AddRec
, ConstantInt
*C
,
9361 ScalarEvolution
&SE
) {
9362 const SCEV
*InVal
= SE
.getConstant(C
);
9363 const SCEV
*Val
= AddRec
->evaluateAtIteration(InVal
, SE
);
9364 assert(isa
<SCEVConstant
>(Val
) &&
9365 "Evaluation of SCEV at constant didn't fold correctly?");
9366 return cast
<SCEVConstant
>(Val
)->getValue();
9369 ScalarEvolution::ExitLimit
ScalarEvolution::computeShiftCompareExitLimit(
9370 Value
*LHS
, Value
*RHSV
, const Loop
*L
, ICmpInst::Predicate Pred
) {
9371 ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(RHSV
);
9373 return getCouldNotCompute();
9375 const BasicBlock
*Latch
= L
->getLoopLatch();
9377 return getCouldNotCompute();
9379 const BasicBlock
*Predecessor
= L
->getLoopPredecessor();
9381 return getCouldNotCompute();
9383 // Return true if V is of the form "LHS `shift_op` <positive constant>".
9384 // Return LHS in OutLHS and shift_opt in OutOpCode.
9385 auto MatchPositiveShift
=
9386 [](Value
*V
, Value
*&OutLHS
, Instruction::BinaryOps
&OutOpCode
) {
9388 using namespace PatternMatch
;
9390 ConstantInt
*ShiftAmt
;
9391 if (match(V
, m_LShr(m_Value(OutLHS
), m_ConstantInt(ShiftAmt
))))
9392 OutOpCode
= Instruction::LShr
;
9393 else if (match(V
, m_AShr(m_Value(OutLHS
), m_ConstantInt(ShiftAmt
))))
9394 OutOpCode
= Instruction::AShr
;
9395 else if (match(V
, m_Shl(m_Value(OutLHS
), m_ConstantInt(ShiftAmt
))))
9396 OutOpCode
= Instruction::Shl
;
9400 return ShiftAmt
->getValue().isStrictlyPositive();
9403 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in
9406 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ]
9407 // %iv.shifted = lshr i32 %iv, <positive constant>
9409 // Return true on a successful match. Return the corresponding PHI node (%iv
9410 // above) in PNOut and the opcode of the shift operation in OpCodeOut.
9411 auto MatchShiftRecurrence
=
9412 [&](Value
*V
, PHINode
*&PNOut
, Instruction::BinaryOps
&OpCodeOut
) {
9413 std::optional
<Instruction::BinaryOps
> PostShiftOpCode
;
9416 Instruction::BinaryOps OpC
;
9419 // If we encounter a shift instruction, "peel off" the shift operation,
9420 // and remember that we did so. Later when we inspect %iv's backedge
9421 // value, we will make sure that the backedge value uses the same
9424 // Note: the peeled shift operation does not have to be the same
9425 // instruction as the one feeding into the PHI's backedge value. We only
9426 // really care about it being the same *kind* of shift instruction --
9427 // that's all that is required for our later inferences to hold.
9428 if (MatchPositiveShift(LHS
, V
, OpC
)) {
9429 PostShiftOpCode
= OpC
;
9434 PNOut
= dyn_cast
<PHINode
>(LHS
);
9435 if (!PNOut
|| PNOut
->getParent() != L
->getHeader())
9438 Value
*BEValue
= PNOut
->getIncomingValueForBlock(Latch
);
9442 // The backedge value for the PHI node must be a shift by a positive
9444 MatchPositiveShift(BEValue
, OpLHS
, OpCodeOut
) &&
9446 // of the PHI node itself
9449 // and the kind of shift should be match the kind of shift we peeled
9451 (!PostShiftOpCode
|| *PostShiftOpCode
== OpCodeOut
);
9455 Instruction::BinaryOps OpCode
;
9456 if (!MatchShiftRecurrence(LHS
, PN
, OpCode
))
9457 return getCouldNotCompute();
9459 const DataLayout
&DL
= getDataLayout();
9461 // The key rationale for this optimization is that for some kinds of shift
9462 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1
9463 // within a finite number of iterations. If the condition guarding the
9464 // backedge (in the sense that the backedge is taken if the condition is true)
9465 // is false for the value the shift recurrence stabilizes to, then we know
9466 // that the backedge is taken only a finite number of times.
9468 ConstantInt
*StableValue
= nullptr;
9471 llvm_unreachable("Impossible case!");
9473 case Instruction::AShr
: {
9474 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
9475 // bitwidth(K) iterations.
9476 Value
*FirstValue
= PN
->getIncomingValueForBlock(Predecessor
);
9477 KnownBits Known
= computeKnownBits(FirstValue
, DL
, 0, &AC
,
9478 Predecessor
->getTerminator(), &DT
);
9479 auto *Ty
= cast
<IntegerType
>(RHS
->getType());
9480 if (Known
.isNonNegative())
9481 StableValue
= ConstantInt::get(Ty
, 0);
9482 else if (Known
.isNegative())
9483 StableValue
= ConstantInt::get(Ty
, -1, true);
9485 return getCouldNotCompute();
9489 case Instruction::LShr
:
9490 case Instruction::Shl
:
9491 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>}
9492 // stabilize to 0 in at most bitwidth(K) iterations.
9493 StableValue
= ConstantInt::get(cast
<IntegerType
>(RHS
->getType()), 0);
9498 ConstantFoldCompareInstOperands(Pred
, StableValue
, RHS
, DL
, &TLI
);
9499 assert(Result
->getType()->isIntegerTy(1) &&
9500 "Otherwise cannot be an operand to a branch instruction");
9502 if (Result
->isZeroValue()) {
9503 unsigned BitWidth
= getTypeSizeInBits(RHS
->getType());
9504 const SCEV
*UpperBound
=
9505 getConstant(getEffectiveSCEVType(RHS
->getType()), BitWidth
);
9506 return ExitLimit(getCouldNotCompute(), UpperBound
, UpperBound
, false);
9509 return getCouldNotCompute();
9512 /// Return true if we can constant fold an instruction of the specified type,
9513 /// assuming that all operands were constants.
9514 static bool CanConstantFold(const Instruction
*I
) {
9515 if (isa
<BinaryOperator
>(I
) || isa
<CmpInst
>(I
) ||
9516 isa
<SelectInst
>(I
) || isa
<CastInst
>(I
) || isa
<GetElementPtrInst
>(I
) ||
9517 isa
<LoadInst
>(I
) || isa
<ExtractValueInst
>(I
))
9520 if (const CallInst
*CI
= dyn_cast
<CallInst
>(I
))
9521 if (const Function
*F
= CI
->getCalledFunction())
9522 return canConstantFoldCallTo(CI
, F
);
9526 /// Determine whether this instruction can constant evolve within this loop
9527 /// assuming its operands can all constant evolve.
9528 static bool canConstantEvolve(Instruction
*I
, const Loop
*L
) {
9529 // An instruction outside of the loop can't be derived from a loop PHI.
9530 if (!L
->contains(I
)) return false;
9532 if (isa
<PHINode
>(I
)) {
9533 // We don't currently keep track of the control flow needed to evaluate
9534 // PHIs, so we cannot handle PHIs inside of loops.
9535 return L
->getHeader() == I
->getParent();
9538 // If we won't be able to constant fold this expression even if the operands
9539 // are constants, bail early.
9540 return CanConstantFold(I
);
9543 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
9544 /// recursing through each instruction operand until reaching a loop header phi.
9546 getConstantEvolvingPHIOperands(Instruction
*UseInst
, const Loop
*L
,
9547 DenseMap
<Instruction
*, PHINode
*> &PHIMap
,
9549 if (Depth
> MaxConstantEvolvingDepth
)
9552 // Otherwise, we can evaluate this instruction if all of its operands are
9553 // constant or derived from a PHI node themselves.
9554 PHINode
*PHI
= nullptr;
9555 for (Value
*Op
: UseInst
->operands()) {
9556 if (isa
<Constant
>(Op
)) continue;
9558 Instruction
*OpInst
= dyn_cast
<Instruction
>(Op
);
9559 if (!OpInst
|| !canConstantEvolve(OpInst
, L
)) return nullptr;
9561 PHINode
*P
= dyn_cast
<PHINode
>(OpInst
);
9563 // If this operand is already visited, reuse the prior result.
9564 // We may have P != PHI if this is the deepest point at which the
9565 // inconsistent paths meet.
9566 P
= PHIMap
.lookup(OpInst
);
9568 // Recurse and memoize the results, whether a phi is found or not.
9569 // This recursive call invalidates pointers into PHIMap.
9570 P
= getConstantEvolvingPHIOperands(OpInst
, L
, PHIMap
, Depth
+ 1);
9574 return nullptr; // Not evolving from PHI
9575 if (PHI
&& PHI
!= P
)
9576 return nullptr; // Evolving from multiple different PHIs.
9579 // This is a expression evolving from a constant PHI!
9583 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
9584 /// in the loop that V is derived from. We allow arbitrary operations along the
9585 /// way, but the operands of an operation must either be constants or a value
9586 /// derived from a constant PHI. If this expression does not fit with these
9587 /// constraints, return null.
9588 static PHINode
*getConstantEvolvingPHI(Value
*V
, const Loop
*L
) {
9589 Instruction
*I
= dyn_cast
<Instruction
>(V
);
9590 if (!I
|| !canConstantEvolve(I
, L
)) return nullptr;
9592 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
9595 // Record non-constant instructions contained by the loop.
9596 DenseMap
<Instruction
*, PHINode
*> PHIMap
;
9597 return getConstantEvolvingPHIOperands(I
, L
, PHIMap
, 0);
9600 /// EvaluateExpression - Given an expression that passes the
9601 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
9602 /// in the loop has the value PHIVal. If we can't fold this expression for some
9603 /// reason, return null.
9604 static Constant
*EvaluateExpression(Value
*V
, const Loop
*L
,
9605 DenseMap
<Instruction
*, Constant
*> &Vals
,
9606 const DataLayout
&DL
,
9607 const TargetLibraryInfo
*TLI
) {
9608 // Convenient constant check, but redundant for recursive calls.
9609 if (Constant
*C
= dyn_cast
<Constant
>(V
)) return C
;
9610 Instruction
*I
= dyn_cast
<Instruction
>(V
);
9611 if (!I
) return nullptr;
9613 if (Constant
*C
= Vals
.lookup(I
)) return C
;
9615 // An instruction inside the loop depends on a value outside the loop that we
9616 // weren't given a mapping for, or a value such as a call inside the loop.
9617 if (!canConstantEvolve(I
, L
)) return nullptr;
9619 // An unmapped PHI can be due to a branch or another loop inside this loop,
9620 // or due to this not being the initial iteration through a loop where we
9621 // couldn't compute the evolution of this particular PHI last time.
9622 if (isa
<PHINode
>(I
)) return nullptr;
9624 std::vector
<Constant
*> Operands(I
->getNumOperands());
9626 for (unsigned i
= 0, e
= I
->getNumOperands(); i
!= e
; ++i
) {
9627 Instruction
*Operand
= dyn_cast
<Instruction
>(I
->getOperand(i
));
9629 Operands
[i
] = dyn_cast
<Constant
>(I
->getOperand(i
));
9630 if (!Operands
[i
]) return nullptr;
9633 Constant
*C
= EvaluateExpression(Operand
, L
, Vals
, DL
, TLI
);
9635 if (!C
) return nullptr;
9639 return ConstantFoldInstOperands(I
, Operands
, DL
, TLI
,
9640 /*AllowNonDeterministic=*/false);
9644 // If every incoming value to PN except the one for BB is a specific Constant,
9645 // return that, else return nullptr.
9646 static Constant
*getOtherIncomingValue(PHINode
*PN
, BasicBlock
*BB
) {
9647 Constant
*IncomingVal
= nullptr;
9649 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
9650 if (PN
->getIncomingBlock(i
) == BB
)
9653 auto *CurrentVal
= dyn_cast
<Constant
>(PN
->getIncomingValue(i
));
9657 if (IncomingVal
!= CurrentVal
) {
9660 IncomingVal
= CurrentVal
;
9667 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
9668 /// in the header of its containing loop, we know the loop executes a
9669 /// constant number of times, and the PHI node is just a recurrence
9670 /// involving constants, fold it.
9672 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode
*PN
,
9675 auto [I
, Inserted
] = ConstantEvolutionLoopExitValue
.try_emplace(PN
);
9679 if (BEs
.ugt(MaxBruteForceIterations
))
9680 return nullptr; // Not going to evaluate it.
9682 Constant
*&RetVal
= I
->second
;
9684 DenseMap
<Instruction
*, Constant
*> CurrentIterVals
;
9685 BasicBlock
*Header
= L
->getHeader();
9686 assert(PN
->getParent() == Header
&& "Can't evaluate PHI not in loop header!");
9688 BasicBlock
*Latch
= L
->getLoopLatch();
9692 for (PHINode
&PHI
: Header
->phis()) {
9693 if (auto *StartCST
= getOtherIncomingValue(&PHI
, Latch
))
9694 CurrentIterVals
[&PHI
] = StartCST
;
9696 if (!CurrentIterVals
.count(PN
))
9697 return RetVal
= nullptr;
9699 Value
*BEValue
= PN
->getIncomingValueForBlock(Latch
);
9701 // Execute the loop symbolically to determine the exit value.
9702 assert(BEs
.getActiveBits() < CHAR_BIT
* sizeof(unsigned) &&
9703 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
9705 unsigned NumIterations
= BEs
.getZExtValue(); // must be in range
9706 unsigned IterationNum
= 0;
9707 const DataLayout
&DL
= getDataLayout();
9708 for (; ; ++IterationNum
) {
9709 if (IterationNum
== NumIterations
)
9710 return RetVal
= CurrentIterVals
[PN
]; // Got exit value!
9712 // Compute the value of the PHIs for the next iteration.
9713 // EvaluateExpression adds non-phi values to the CurrentIterVals map.
9714 DenseMap
<Instruction
*, Constant
*> NextIterVals
;
9716 EvaluateExpression(BEValue
, L
, CurrentIterVals
, DL
, &TLI
);
9718 return nullptr; // Couldn't evaluate!
9719 NextIterVals
[PN
] = NextPHI
;
9721 bool StoppedEvolving
= NextPHI
== CurrentIterVals
[PN
];
9723 // Also evaluate the other PHI nodes. However, we don't get to stop if we
9724 // cease to be able to evaluate one of them or if they stop evolving,
9725 // because that doesn't necessarily prevent us from computing PN.
9726 SmallVector
<std::pair
<PHINode
*, Constant
*>, 8> PHIsToCompute
;
9727 for (const auto &I
: CurrentIterVals
) {
9728 PHINode
*PHI
= dyn_cast
<PHINode
>(I
.first
);
9729 if (!PHI
|| PHI
== PN
|| PHI
->getParent() != Header
) continue;
9730 PHIsToCompute
.emplace_back(PHI
, I
.second
);
9732 // We use two distinct loops because EvaluateExpression may invalidate any
9733 // iterators into CurrentIterVals.
9734 for (const auto &I
: PHIsToCompute
) {
9735 PHINode
*PHI
= I
.first
;
9736 Constant
*&NextPHI
= NextIterVals
[PHI
];
9737 if (!NextPHI
) { // Not already computed.
9738 Value
*BEValue
= PHI
->getIncomingValueForBlock(Latch
);
9739 NextPHI
= EvaluateExpression(BEValue
, L
, CurrentIterVals
, DL
, &TLI
);
9741 if (NextPHI
!= I
.second
)
9742 StoppedEvolving
= false;
9745 // If all entries in CurrentIterVals == NextIterVals then we can stop
9746 // iterating, the loop can't continue to change.
9747 if (StoppedEvolving
)
9748 return RetVal
= CurrentIterVals
[PN
];
9750 CurrentIterVals
.swap(NextIterVals
);
9754 const SCEV
*ScalarEvolution::computeExitCountExhaustively(const Loop
*L
,
9757 PHINode
*PN
= getConstantEvolvingPHI(Cond
, L
);
9758 if (!PN
) return getCouldNotCompute();
9760 // If the loop is canonicalized, the PHI will have exactly two entries.
9761 // That's the only form we support here.
9762 if (PN
->getNumIncomingValues() != 2) return getCouldNotCompute();
9764 DenseMap
<Instruction
*, Constant
*> CurrentIterVals
;
9765 BasicBlock
*Header
= L
->getHeader();
9766 assert(PN
->getParent() == Header
&& "Can't evaluate PHI not in loop header!");
9768 BasicBlock
*Latch
= L
->getLoopLatch();
9769 assert(Latch
&& "Should follow from NumIncomingValues == 2!");
9771 for (PHINode
&PHI
: Header
->phis()) {
9772 if (auto *StartCST
= getOtherIncomingValue(&PHI
, Latch
))
9773 CurrentIterVals
[&PHI
] = StartCST
;
9775 if (!CurrentIterVals
.count(PN
))
9776 return getCouldNotCompute();
9778 // Okay, we find a PHI node that defines the trip count of this loop. Execute
9779 // the loop symbolically to determine when the condition gets a value of
9781 unsigned MaxIterations
= MaxBruteForceIterations
; // Limit analysis.
9782 const DataLayout
&DL
= getDataLayout();
9783 for (unsigned IterationNum
= 0; IterationNum
!= MaxIterations
;++IterationNum
){
9784 auto *CondVal
= dyn_cast_or_null
<ConstantInt
>(
9785 EvaluateExpression(Cond
, L
, CurrentIterVals
, DL
, &TLI
));
9787 // Couldn't symbolically evaluate.
9788 if (!CondVal
) return getCouldNotCompute();
9790 if (CondVal
->getValue() == uint64_t(ExitWhen
)) {
9791 ++NumBruteForceTripCountsComputed
;
9792 return getConstant(Type::getInt32Ty(getContext()), IterationNum
);
9795 // Update all the PHI nodes for the next iteration.
9796 DenseMap
<Instruction
*, Constant
*> NextIterVals
;
9798 // Create a list of which PHIs we need to compute. We want to do this before
9799 // calling EvaluateExpression on them because that may invalidate iterators
9800 // into CurrentIterVals.
9801 SmallVector
<PHINode
*, 8> PHIsToCompute
;
9802 for (const auto &I
: CurrentIterVals
) {
9803 PHINode
*PHI
= dyn_cast
<PHINode
>(I
.first
);
9804 if (!PHI
|| PHI
->getParent() != Header
) continue;
9805 PHIsToCompute
.push_back(PHI
);
9807 for (PHINode
*PHI
: PHIsToCompute
) {
9808 Constant
*&NextPHI
= NextIterVals
[PHI
];
9809 if (NextPHI
) continue; // Already computed!
9811 Value
*BEValue
= PHI
->getIncomingValueForBlock(Latch
);
9812 NextPHI
= EvaluateExpression(BEValue
, L
, CurrentIterVals
, DL
, &TLI
);
9814 CurrentIterVals
.swap(NextIterVals
);
9817 // Too many iterations were needed to evaluate.
9818 return getCouldNotCompute();
9821 const SCEV
*ScalarEvolution::getSCEVAtScope(const SCEV
*V
, const Loop
*L
) {
9822 SmallVector
<std::pair
<const Loop
*, const SCEV
*>, 2> &Values
=
9824 // Check to see if we've folded this expression at this loop before.
9825 for (auto &LS
: Values
)
9827 return LS
.second
? LS
.second
: V
;
9829 Values
.emplace_back(L
, nullptr);
9831 // Otherwise compute it.
9832 const SCEV
*C
= computeSCEVAtScope(V
, L
);
9833 for (auto &LS
: reverse(ValuesAtScopes
[V
]))
9834 if (LS
.first
== L
) {
9836 if (!isa
<SCEVConstant
>(C
))
9837 ValuesAtScopesUsers
[C
].push_back({L
, V
});
9843 /// This builds up a Constant using the ConstantExpr interface. That way, we
9844 /// will return Constants for objects which aren't represented by a
9845 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
9846 /// Returns NULL if the SCEV isn't representable as a Constant.
9847 static Constant
*BuildConstantFromSCEV(const SCEV
*V
) {
9848 switch (V
->getSCEVType()) {
9849 case scCouldNotCompute
:
9854 return cast
<SCEVConstant
>(V
)->getValue();
9856 return dyn_cast
<Constant
>(cast
<SCEVUnknown
>(V
)->getValue());
9858 const SCEVPtrToIntExpr
*P2I
= cast
<SCEVPtrToIntExpr
>(V
);
9859 if (Constant
*CastOp
= BuildConstantFromSCEV(P2I
->getOperand()))
9860 return ConstantExpr::getPtrToInt(CastOp
, P2I
->getType());
9865 const SCEVTruncateExpr
*ST
= cast
<SCEVTruncateExpr
>(V
);
9866 if (Constant
*CastOp
= BuildConstantFromSCEV(ST
->getOperand()))
9867 return ConstantExpr::getTrunc(CastOp
, ST
->getType());
9871 const SCEVAddExpr
*SA
= cast
<SCEVAddExpr
>(V
);
9872 Constant
*C
= nullptr;
9873 for (const SCEV
*Op
: SA
->operands()) {
9874 Constant
*OpC
= BuildConstantFromSCEV(Op
);
9881 assert(!C
->getType()->isPointerTy() &&
9882 "Can only have one pointer, and it must be last");
9883 if (OpC
->getType()->isPointerTy()) {
9884 // The offsets have been converted to bytes. We can add bytes using
9886 C
= ConstantExpr::getGetElementPtr(Type::getInt8Ty(C
->getContext()),
9889 C
= ConstantExpr::getAdd(C
, OpC
);
9902 case scSequentialUMinExpr
:
9905 llvm_unreachable("Unknown SCEV kind!");
9909 ScalarEvolution::getWithOperands(const SCEV
*S
,
9910 SmallVectorImpl
<const SCEV
*> &NewOps
) {
9911 switch (S
->getSCEVType()) {
9916 return getCastExpr(S
->getSCEVType(), NewOps
[0], S
->getType());
9917 case scAddRecExpr
: {
9918 auto *AddRec
= cast
<SCEVAddRecExpr
>(S
);
9919 return getAddRecExpr(NewOps
, AddRec
->getLoop(), AddRec
->getNoWrapFlags());
9922 return getAddExpr(NewOps
, cast
<SCEVAddExpr
>(S
)->getNoWrapFlags());
9924 return getMulExpr(NewOps
, cast
<SCEVMulExpr
>(S
)->getNoWrapFlags());
9926 return getUDivExpr(NewOps
[0], NewOps
[1]);
9931 return getMinMaxExpr(S
->getSCEVType(), NewOps
);
9932 case scSequentialUMinExpr
:
9933 return getSequentialMinMaxExpr(S
->getSCEVType(), NewOps
);
9938 case scCouldNotCompute
:
9939 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
9941 llvm_unreachable("Unknown SCEV kind!");
9944 const SCEV
*ScalarEvolution::computeSCEVAtScope(const SCEV
*V
, const Loop
*L
) {
9945 switch (V
->getSCEVType()) {
9949 case scAddRecExpr
: {
9950 // If this is a loop recurrence for a loop that does not contain L, then we
9951 // are dealing with the final value computed by the loop.
9952 const SCEVAddRecExpr
*AddRec
= cast
<SCEVAddRecExpr
>(V
);
9953 // First, attempt to evaluate each operand.
9954 // Avoid performing the look-up in the common case where the specified
9955 // expression has no loop-variant portions.
9956 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
) {
9957 const SCEV
*OpAtScope
= getSCEVAtScope(AddRec
->getOperand(i
), L
);
9958 if (OpAtScope
== AddRec
->getOperand(i
))
9961 // Okay, at least one of these operands is loop variant but might be
9962 // foldable. Build a new instance of the folded commutative expression.
9963 SmallVector
<const SCEV
*, 8> NewOps
;
9964 NewOps
.reserve(AddRec
->getNumOperands());
9965 append_range(NewOps
, AddRec
->operands().take_front(i
));
9966 NewOps
.push_back(OpAtScope
);
9967 for (++i
; i
!= e
; ++i
)
9968 NewOps
.push_back(getSCEVAtScope(AddRec
->getOperand(i
), L
));
9970 const SCEV
*FoldedRec
= getAddRecExpr(
9971 NewOps
, AddRec
->getLoop(), AddRec
->getNoWrapFlags(SCEV::FlagNW
));
9972 AddRec
= dyn_cast
<SCEVAddRecExpr
>(FoldedRec
);
9973 // The addrec may be folded to a nonrecurrence, for example, if the
9974 // induction variable is multiplied by zero after constant folding. Go
9975 // ahead and return the folded value.
9981 // If the scope is outside the addrec's loop, evaluate it by using the
9982 // loop exit value of the addrec.
9983 if (!AddRec
->getLoop()->contains(L
)) {
9984 // To evaluate this recurrence, we need to know how many times the AddRec
9985 // loop iterates. Compute this now.
9986 const SCEV
*BackedgeTakenCount
= getBackedgeTakenCount(AddRec
->getLoop());
9987 if (BackedgeTakenCount
== getCouldNotCompute())
9990 // Then, evaluate the AddRec.
9991 return AddRec
->evaluateAtIteration(BackedgeTakenCount
, *this);
10007 case scSequentialUMinExpr
: {
10008 ArrayRef
<const SCEV
*> Ops
= V
->operands();
10009 // Avoid performing the look-up in the common case where the specified
10010 // expression has no loop-variant portions.
10011 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
10012 const SCEV
*OpAtScope
= getSCEVAtScope(Ops
[i
], L
);
10013 if (OpAtScope
!= Ops
[i
]) {
10014 // Okay, at least one of these operands is loop variant but might be
10015 // foldable. Build a new instance of the folded commutative expression.
10016 SmallVector
<const SCEV
*, 8> NewOps
;
10017 NewOps
.reserve(Ops
.size());
10018 append_range(NewOps
, Ops
.take_front(i
));
10019 NewOps
.push_back(OpAtScope
);
10021 for (++i
; i
!= e
; ++i
) {
10022 OpAtScope
= getSCEVAtScope(Ops
[i
], L
);
10023 NewOps
.push_back(OpAtScope
);
10026 return getWithOperands(V
, NewOps
);
10029 // If we got here, all operands are loop invariant.
10033 // If this instruction is evolved from a constant-evolving PHI, compute the
10034 // exit value from the loop without using SCEVs.
10035 const SCEVUnknown
*SU
= cast
<SCEVUnknown
>(V
);
10036 Instruction
*I
= dyn_cast
<Instruction
>(SU
->getValue());
10038 return V
; // This is some other type of SCEVUnknown, just return it.
10040 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
)) {
10041 const Loop
*CurrLoop
= this->LI
[I
->getParent()];
10042 // Looking for loop exit value.
10043 if (CurrLoop
&& CurrLoop
->getParentLoop() == L
&&
10044 PN
->getParent() == CurrLoop
->getHeader()) {
10045 // Okay, there is no closed form solution for the PHI node. Check
10046 // to see if the loop that contains it has a known backedge-taken
10047 // count. If so, we may be able to force computation of the exit
10049 const SCEV
*BackedgeTakenCount
= getBackedgeTakenCount(CurrLoop
);
10050 // This trivial case can show up in some degenerate cases where
10051 // the incoming IR has not yet been fully simplified.
10052 if (BackedgeTakenCount
->isZero()) {
10053 Value
*InitValue
= nullptr;
10054 bool MultipleInitValues
= false;
10055 for (unsigned i
= 0; i
< PN
->getNumIncomingValues(); i
++) {
10056 if (!CurrLoop
->contains(PN
->getIncomingBlock(i
))) {
10058 InitValue
= PN
->getIncomingValue(i
);
10059 else if (InitValue
!= PN
->getIncomingValue(i
)) {
10060 MultipleInitValues
= true;
10065 if (!MultipleInitValues
&& InitValue
)
10066 return getSCEV(InitValue
);
10068 // Do we have a loop invariant value flowing around the backedge
10069 // for a loop which must execute the backedge?
10070 if (!isa
<SCEVCouldNotCompute
>(BackedgeTakenCount
) &&
10071 isKnownNonZero(BackedgeTakenCount
) &&
10072 PN
->getNumIncomingValues() == 2) {
10074 unsigned InLoopPred
=
10075 CurrLoop
->contains(PN
->getIncomingBlock(0)) ? 0 : 1;
10076 Value
*BackedgeVal
= PN
->getIncomingValue(InLoopPred
);
10077 if (CurrLoop
->isLoopInvariant(BackedgeVal
))
10078 return getSCEV(BackedgeVal
);
10080 if (auto *BTCC
= dyn_cast
<SCEVConstant
>(BackedgeTakenCount
)) {
10081 // Okay, we know how many times the containing loop executes. If
10082 // this is a constant evolving PHI node, get the final value at
10083 // the specified iteration number.
10085 getConstantEvolutionLoopExitValue(PN
, BTCC
->getAPInt(), CurrLoop
);
10087 return getSCEV(RV
);
10092 // Okay, this is an expression that we cannot symbolically evaluate
10093 // into a SCEV. Check to see if it's possible to symbolically evaluate
10094 // the arguments into constants, and if so, try to constant propagate the
10095 // result. This is particularly useful for computing loop exit values.
10096 if (!CanConstantFold(I
))
10097 return V
; // This is some other type of SCEVUnknown, just return it.
10099 SmallVector
<Constant
*, 4> Operands
;
10100 Operands
.reserve(I
->getNumOperands());
10101 bool MadeImprovement
= false;
10102 for (Value
*Op
: I
->operands()) {
10103 if (Constant
*C
= dyn_cast
<Constant
>(Op
)) {
10104 Operands
.push_back(C
);
10108 // If any of the operands is non-constant and if they are
10109 // non-integer and non-pointer, don't even try to analyze them
10110 // with scev techniques.
10111 if (!isSCEVable(Op
->getType()))
10114 const SCEV
*OrigV
= getSCEV(Op
);
10115 const SCEV
*OpV
= getSCEVAtScope(OrigV
, L
);
10116 MadeImprovement
|= OrigV
!= OpV
;
10118 Constant
*C
= BuildConstantFromSCEV(OpV
);
10121 assert(C
->getType() == Op
->getType() && "Type mismatch");
10122 Operands
.push_back(C
);
10125 // Check to see if getSCEVAtScope actually made an improvement.
10126 if (!MadeImprovement
)
10127 return V
; // This is some other type of SCEVUnknown, just return it.
10129 Constant
*C
= nullptr;
10130 const DataLayout
&DL
= getDataLayout();
10131 C
= ConstantFoldInstOperands(I
, Operands
, DL
, &TLI
,
10132 /*AllowNonDeterministic=*/false);
10137 case scCouldNotCompute
:
10138 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
10140 llvm_unreachable("Unknown SCEV type!");
10143 const SCEV
*ScalarEvolution::getSCEVAtScope(Value
*V
, const Loop
*L
) {
10144 return getSCEVAtScope(getSCEV(V
), L
);
10147 const SCEV
*ScalarEvolution::stripInjectiveFunctions(const SCEV
*S
) const {
10148 if (const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(S
))
10149 return stripInjectiveFunctions(ZExt
->getOperand());
10150 if (const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(S
))
10151 return stripInjectiveFunctions(SExt
->getOperand());
10155 /// Finds the minimum unsigned root of the following equation:
10157 /// A * X = B (mod N)
10159 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
10160 /// A and B isn't important.
10162 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
10163 static const SCEV
*
10164 SolveLinEquationWithOverflow(const APInt
&A
, const SCEV
*B
,
10165 SmallVectorImpl
<const SCEVPredicate
*> *Predicates
,
10167 ScalarEvolution
&SE
) {
10168 uint32_t BW
= A
.getBitWidth();
10169 assert(BW
== SE
.getTypeSizeInBits(B
->getType()));
10170 assert(A
!= 0 && "A must be non-zero.");
10172 // 1. D = gcd(A, N)
10174 // The gcd of A and N may have only one prime factor: 2. The number of
10175 // trailing zeros in A is its multiplicity
10176 uint32_t Mult2
= A
.countr_zero();
10179 // 2. Check if B is divisible by D.
10181 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
10182 // is not less than multiplicity of this prime factor for D.
10183 if (SE
.getMinTrailingZeros(B
) < Mult2
) {
10184 // Check if we can prove there's no remainder using URem.
10186 SE
.getURemExpr(B
, SE
.getConstant(APInt::getOneBitSet(BW
, Mult2
)));
10187 const SCEV
*Zero
= SE
.getZero(B
->getType());
10188 if (!SE
.isKnownPredicate(CmpInst::ICMP_EQ
, URem
, Zero
)) {
10189 // Try to add a predicate ensuring B is a multiple of 1 << Mult2.
10191 return SE
.getCouldNotCompute();
10193 // Avoid adding a predicate that is known to be false.
10194 if (SE
.isKnownPredicate(CmpInst::ICMP_NE
, URem
, Zero
))
10195 return SE
.getCouldNotCompute();
10196 Predicates
->push_back(SE
.getEqualPredicate(URem
, Zero
));
10200 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
10203 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent
10204 // (N / D) in general. The inverse itself always fits into BW bits, though,
10205 // so we immediately truncate it.
10206 APInt AD
= A
.lshr(Mult2
).trunc(BW
- Mult2
); // AD = A / D
10207 APInt I
= AD
.multiplicativeInverse().zext(BW
);
10209 // 4. Compute the minimum unsigned root of the equation:
10210 // I * (B / D) mod (N / D)
10211 // To simplify the computation, we factor out the divide by D:
10212 // (I * B mod N) / D
10213 const SCEV
*D
= SE
.getConstant(APInt::getOneBitSet(BW
, Mult2
));
10214 return SE
.getUDivExactExpr(SE
.getMulExpr(B
, SE
.getConstant(I
)), D
);
10217 /// For a given quadratic addrec, generate coefficients of the corresponding
10218 /// quadratic equation, multiplied by a common value to ensure that they are
10220 /// The returned value is a tuple { A, B, C, M, BitWidth }, where
10221 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C
10222 /// were multiplied by, and BitWidth is the bit width of the original addrec
10224 /// This function returns std::nullopt if the addrec coefficients are not
10225 /// compile- time constants.
10226 static std::optional
<std::tuple
<APInt
, APInt
, APInt
, APInt
, unsigned>>
10227 GetQuadraticEquation(const SCEVAddRecExpr
*AddRec
) {
10228 assert(AddRec
->getNumOperands() == 3 && "This is not a quadratic chrec!");
10229 const SCEVConstant
*LC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(0));
10230 const SCEVConstant
*MC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(1));
10231 const SCEVConstant
*NC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(2));
10232 LLVM_DEBUG(dbgs() << __func__
<< ": analyzing quadratic addrec: "
10233 << *AddRec
<< '\n');
10235 // We currently can only solve this if the coefficients are constants.
10236 if (!LC
|| !MC
|| !NC
) {
10237 LLVM_DEBUG(dbgs() << __func__
<< ": coefficients are not constant\n");
10238 return std::nullopt
;
10241 APInt L
= LC
->getAPInt();
10242 APInt M
= MC
->getAPInt();
10243 APInt N
= NC
->getAPInt();
10244 assert(!N
.isZero() && "This is not a quadratic addrec");
10246 unsigned BitWidth
= LC
->getAPInt().getBitWidth();
10247 unsigned NewWidth
= BitWidth
+ 1;
10248 LLVM_DEBUG(dbgs() << __func__
<< ": addrec coeff bw: "
10249 << BitWidth
<< '\n');
10250 // The sign-extension (as opposed to a zero-extension) here matches the
10251 // extension used in SolveQuadraticEquationWrap (with the same motivation).
10252 N
= N
.sext(NewWidth
);
10253 M
= M
.sext(NewWidth
);
10254 L
= L
.sext(NewWidth
);
10256 // The increments are M, M+N, M+2N, ..., so the accumulated values are
10257 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is,
10258 // L+M, L+2M+N, L+3M+3N, ...
10259 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N.
10261 // The equation Acc = 0 is then
10262 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0.
10263 // In a quadratic form it becomes:
10264 // N n^2 + (2M-N) n + 2L = 0.
10267 APInt B
= 2 * M
- A
;
10269 APInt T
= APInt(NewWidth
, 2);
10270 LLVM_DEBUG(dbgs() << __func__
<< ": equation " << A
<< "x^2 + " << B
10271 << "x + " << C
<< ", coeff bw: " << NewWidth
10272 << ", multiplied by " << T
<< '\n');
10273 return std::make_tuple(A
, B
, C
, T
, BitWidth
);
10276 /// Helper function to compare optional APInts:
10277 /// (a) if X and Y both exist, return min(X, Y),
10278 /// (b) if neither X nor Y exist, return std::nullopt,
10279 /// (c) if exactly one of X and Y exists, return that value.
10280 static std::optional
<APInt
> MinOptional(std::optional
<APInt
> X
,
10281 std::optional
<APInt
> Y
) {
10283 unsigned W
= std::max(X
->getBitWidth(), Y
->getBitWidth());
10284 APInt XW
= X
->sext(W
);
10285 APInt YW
= Y
->sext(W
);
10286 return XW
.slt(YW
) ? *X
: *Y
;
10289 return std::nullopt
;
10290 return X
? *X
: *Y
;
10293 /// Helper function to truncate an optional APInt to a given BitWidth.
10294 /// When solving addrec-related equations, it is preferable to return a value
10295 /// that has the same bit width as the original addrec's coefficients. If the
10296 /// solution fits in the original bit width, truncate it (except for i1).
10297 /// Returning a value of a different bit width may inhibit some optimizations.
10299 /// In general, a solution to a quadratic equation generated from an addrec
10300 /// may require BW+1 bits, where BW is the bit width of the addrec's
10301 /// coefficients. The reason is that the coefficients of the quadratic
10302 /// equation are BW+1 bits wide (to avoid truncation when converting from
10303 /// the addrec to the equation).
10304 static std::optional
<APInt
> TruncIfPossible(std::optional
<APInt
> X
,
10305 unsigned BitWidth
) {
10307 return std::nullopt
;
10308 unsigned W
= X
->getBitWidth();
10309 if (BitWidth
> 1 && BitWidth
< W
&& X
->isIntN(BitWidth
))
10310 return X
->trunc(BitWidth
);
10314 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n
10315 /// iterations. The values L, M, N are assumed to be signed, and they
10316 /// should all have the same bit widths.
10317 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW,
10318 /// where BW is the bit width of the addrec's coefficients.
10319 /// If the calculated value is a BW-bit integer (for BW > 1), it will be
10320 /// returned as such, otherwise the bit width of the returned value may
10321 /// be greater than BW.
10323 /// This function returns std::nullopt if
10324 /// (a) the addrec coefficients are not constant, or
10325 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases
10326 /// like x^2 = 5, no integer solutions exist, in other cases an integer
10327 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it.
10328 static std::optional
<APInt
>
10329 SolveQuadraticAddRecExact(const SCEVAddRecExpr
*AddRec
, ScalarEvolution
&SE
) {
10332 auto T
= GetQuadraticEquation(AddRec
);
10334 return std::nullopt
;
10336 std::tie(A
, B
, C
, M
, BitWidth
) = *T
;
10337 LLVM_DEBUG(dbgs() << __func__
<< ": solving for unsigned overflow\n");
10338 std::optional
<APInt
> X
=
10339 APIntOps::SolveQuadraticEquationWrap(A
, B
, C
, BitWidth
+ 1);
10341 return std::nullopt
;
10343 ConstantInt
*CX
= ConstantInt::get(SE
.getContext(), *X
);
10344 ConstantInt
*V
= EvaluateConstantChrecAtConstant(AddRec
, CX
, SE
);
10346 return std::nullopt
;
10348 return TruncIfPossible(X
, BitWidth
);
10351 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n
10352 /// iterations. The values M, N are assumed to be signed, and they
10353 /// should all have the same bit widths.
10354 /// Find the least n such that c(n) does not belong to the given range,
10355 /// while c(n-1) does.
10357 /// This function returns std::nullopt if
10358 /// (a) the addrec coefficients are not constant, or
10359 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the
10360 /// bounds of the range.
10361 static std::optional
<APInt
>
10362 SolveQuadraticAddRecRange(const SCEVAddRecExpr
*AddRec
,
10363 const ConstantRange
&Range
, ScalarEvolution
&SE
) {
10364 assert(AddRec
->getOperand(0)->isZero() &&
10365 "Starting value of addrec should be 0");
10366 LLVM_DEBUG(dbgs() << __func__
<< ": solving boundary crossing for range "
10367 << Range
<< ", addrec " << *AddRec
<< '\n');
10368 // This case is handled in getNumIterationsInRange. Here we can assume that
10369 // we start in the range.
10370 assert(Range
.contains(APInt(SE
.getTypeSizeInBits(AddRec
->getType()), 0)) &&
10371 "Addrec's initial value should be in range");
10375 auto T
= GetQuadraticEquation(AddRec
);
10377 return std::nullopt
;
10379 // Be careful about the return value: there can be two reasons for not
10380 // returning an actual number. First, if no solutions to the equations
10381 // were found, and second, if the solutions don't leave the given range.
10382 // The first case means that the actual solution is "unknown", the second
10383 // means that it's known, but not valid. If the solution is unknown, we
10384 // cannot make any conclusions.
10385 // Return a pair: the optional solution and a flag indicating if the
10386 // solution was found.
10387 auto SolveForBoundary
=
10388 [&](APInt Bound
) -> std::pair
<std::optional
<APInt
>, bool> {
10389 // Solve for signed overflow and unsigned overflow, pick the lower
10391 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary "
10392 << Bound
<< " (before multiplying by " << M
<< ")\n");
10393 Bound
*= M
; // The quadratic equation multiplier.
10395 std::optional
<APInt
> SO
;
10396 if (BitWidth
> 1) {
10397 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
10398 "signed overflow\n");
10399 SO
= APIntOps::SolveQuadraticEquationWrap(A
, B
, -Bound
, BitWidth
);
10401 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
10402 "unsigned overflow\n");
10403 std::optional
<APInt
> UO
=
10404 APIntOps::SolveQuadraticEquationWrap(A
, B
, -Bound
, BitWidth
+ 1);
10406 auto LeavesRange
= [&] (const APInt
&X
) {
10407 ConstantInt
*C0
= ConstantInt::get(SE
.getContext(), X
);
10408 ConstantInt
*V0
= EvaluateConstantChrecAtConstant(AddRec
, C0
, SE
);
10409 if (Range
.contains(V0
->getValue()))
10411 // X should be at least 1, so X-1 is non-negative.
10412 ConstantInt
*C1
= ConstantInt::get(SE
.getContext(), X
-1);
10413 ConstantInt
*V1
= EvaluateConstantChrecAtConstant(AddRec
, C1
, SE
);
10414 if (Range
.contains(V1
->getValue()))
10419 // If SolveQuadraticEquationWrap returns std::nullopt, it means that there
10420 // can be a solution, but the function failed to find it. We cannot treat it
10421 // as "no solution".
10423 return {std::nullopt
, false};
10425 // Check the smaller value first to see if it leaves the range.
10426 // At this point, both SO and UO must have values.
10427 std::optional
<APInt
> Min
= MinOptional(SO
, UO
);
10428 if (LeavesRange(*Min
))
10429 return { Min
, true };
10430 std::optional
<APInt
> Max
= Min
== SO
? UO
: SO
;
10431 if (LeavesRange(*Max
))
10432 return { Max
, true };
10434 // Solutions were found, but were eliminated, hence the "true".
10435 return {std::nullopt
, true};
10438 std::tie(A
, B
, C
, M
, BitWidth
) = *T
;
10439 // Lower bound is inclusive, subtract 1 to represent the exiting value.
10440 APInt Lower
= Range
.getLower().sext(A
.getBitWidth()) - 1;
10441 APInt Upper
= Range
.getUpper().sext(A
.getBitWidth());
10442 auto SL
= SolveForBoundary(Lower
);
10443 auto SU
= SolveForBoundary(Upper
);
10444 // If any of the solutions was unknown, no meaninigful conclusions can
10446 if (!SL
.second
|| !SU
.second
)
10447 return std::nullopt
;
10449 // Claim: The correct solution is not some value between Min and Max.
10451 // Justification: Assuming that Min and Max are different values, one of
10452 // them is when the first signed overflow happens, the other is when the
10453 // first unsigned overflow happens. Crossing the range boundary is only
10454 // possible via an overflow (treating 0 as a special case of it, modeling
10455 // an overflow as crossing k*2^W for some k).
10457 // The interesting case here is when Min was eliminated as an invalid
10458 // solution, but Max was not. The argument is that if there was another
10459 // overflow between Min and Max, it would also have been eliminated if
10460 // it was considered.
10462 // For a given boundary, it is possible to have two overflows of the same
10463 // type (signed/unsigned) without having the other type in between: this
10464 // can happen when the vertex of the parabola is between the iterations
10465 // corresponding to the overflows. This is only possible when the two
10466 // overflows cross k*2^W for the same k. In such case, if the second one
10467 // left the range (and was the first one to do so), the first overflow
10468 // would have to enter the range, which would mean that either we had left
10469 // the range before or that we started outside of it. Both of these cases
10470 // are contradictions.
10472 // Claim: In the case where SolveForBoundary returns std::nullopt, the correct
10473 // solution is not some value between the Max for this boundary and the
10474 // Min of the other boundary.
10476 // Justification: Assume that we had such Max_A and Min_B corresponding
10477 // to range boundaries A and B and such that Max_A < Min_B. If there was
10478 // a solution between Max_A and Min_B, it would have to be caused by an
10479 // overflow corresponding to either A or B. It cannot correspond to B,
10480 // since Min_B is the first occurrence of such an overflow. If it
10481 // corresponded to A, it would have to be either a signed or an unsigned
10482 // overflow that is larger than both eliminated overflows for A. But
10483 // between the eliminated overflows and this overflow, the values would
10484 // cover the entire value space, thus crossing the other boundary, which
10485 // is a contradiction.
10487 return TruncIfPossible(MinOptional(SL
.first
, SU
.first
), BitWidth
);
10490 ScalarEvolution::ExitLimit
ScalarEvolution::howFarToZero(const SCEV
*V
,
10492 bool ControlsOnlyExit
,
10493 bool AllowPredicates
) {
10495 // This is only used for loops with a "x != y" exit test. The exit condition
10496 // is now expressed as a single expression, V = x-y. So the exit test is
10497 // effectively V != 0. We know and take advantage of the fact that this
10498 // expression only being used in a comparison by zero context.
10500 SmallVector
<const SCEVPredicate
*> Predicates
;
10501 // If the value is a constant
10502 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(V
)) {
10503 // If the value is already zero, the branch will execute zero times.
10504 if (C
->getValue()->isZero()) return C
;
10505 return getCouldNotCompute(); // Otherwise it will loop infinitely.
10508 const SCEVAddRecExpr
*AddRec
=
10509 dyn_cast
<SCEVAddRecExpr
>(stripInjectiveFunctions(V
));
10511 if (!AddRec
&& AllowPredicates
)
10512 // Try to make this an AddRec using runtime tests, in the first X
10513 // iterations of this loop, where X is the SCEV expression found by the
10514 // algorithm below.
10515 AddRec
= convertSCEVToAddRecWithPredicates(V
, L
, Predicates
);
10517 if (!AddRec
|| AddRec
->getLoop() != L
)
10518 return getCouldNotCompute();
10520 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
10521 // the quadratic equation to solve it.
10522 if (AddRec
->isQuadratic() && AddRec
->getType()->isIntegerTy()) {
10523 // We can only use this value if the chrec ends up with an exact zero
10524 // value at this index. When solving for "X*X != 5", for example, we
10525 // should not accept a root of 2.
10526 if (auto S
= SolveQuadraticAddRecExact(AddRec
, *this)) {
10527 const auto *R
= cast
<SCEVConstant
>(getConstant(*S
));
10528 return ExitLimit(R
, R
, R
, false, Predicates
);
10530 return getCouldNotCompute();
10533 // Otherwise we can only handle this if it is affine.
10534 if (!AddRec
->isAffine())
10535 return getCouldNotCompute();
10537 // If this is an affine expression, the execution count of this branch is
10538 // the minimum unsigned root of the following equation:
10540 // Start + Step*N = 0 (mod 2^BW)
10544 // Step*N = -Start (mod 2^BW)
10546 // where BW is the common bit width of Start and Step.
10548 // Get the initial value for the loop.
10549 const SCEV
*Start
= getSCEVAtScope(AddRec
->getStart(), L
->getParentLoop());
10550 const SCEV
*Step
= getSCEVAtScope(AddRec
->getOperand(1), L
->getParentLoop());
10551 const SCEVConstant
*StepC
= dyn_cast
<SCEVConstant
>(Step
);
10553 if (!isLoopInvariant(Step
, L
))
10554 return getCouldNotCompute();
10556 LoopGuards Guards
= LoopGuards::collect(L
, *this);
10557 // Specialize step for this loop so we get context sensitive facts below.
10558 const SCEV
*StepWLG
= applyLoopGuards(Step
, Guards
);
10560 // For positive steps (counting up until unsigned overflow):
10561 // N = -Start/Step (as unsigned)
10562 // For negative steps (counting down to zero):
10564 // First compute the unsigned distance from zero in the direction of Step.
10565 bool CountDown
= isKnownNegative(StepWLG
);
10566 if (!CountDown
&& !isKnownNonNegative(StepWLG
))
10567 return getCouldNotCompute();
10569 const SCEV
*Distance
= CountDown
? Start
: getNegativeSCEV(Start
);
10570 // Handle unitary steps, which cannot wraparound.
10571 // 1*N = -Start; -1*N = Start (mod 2^BW), so:
10572 // N = Distance (as unsigned)
10574 (StepC
->getValue()->isOne() || StepC
->getValue()->isMinusOne())) {
10575 APInt MaxBECount
= getUnsignedRangeMax(applyLoopGuards(Distance
, Guards
));
10576 MaxBECount
= APIntOps::umin(MaxBECount
, getUnsignedRangeMax(Distance
));
10578 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
10579 // we end up with a loop whose backedge-taken count is n - 1. Detect this
10580 // case, and see if we can improve the bound.
10582 // Explicitly handling this here is necessary because getUnsignedRange
10583 // isn't context-sensitive; it doesn't know that we only care about the
10584 // range inside the loop.
10585 const SCEV
*Zero
= getZero(Distance
->getType());
10586 const SCEV
*One
= getOne(Distance
->getType());
10587 const SCEV
*DistancePlusOne
= getAddExpr(Distance
, One
);
10588 if (isLoopEntryGuardedByCond(L
, ICmpInst::ICMP_NE
, DistancePlusOne
, Zero
)) {
10589 // If Distance + 1 doesn't overflow, we can compute the maximum distance
10590 // as "unsigned_max(Distance + 1) - 1".
10591 ConstantRange CR
= getUnsignedRange(DistancePlusOne
);
10592 MaxBECount
= APIntOps::umin(MaxBECount
, CR
.getUnsignedMax() - 1);
10594 return ExitLimit(Distance
, getConstant(MaxBECount
), Distance
, false,
10598 // If the condition controls loop exit (the loop exits only if the expression
10599 // is true) and the addition is no-wrap we can use unsigned divide to
10600 // compute the backedge count. In this case, the step may not divide the
10601 // distance, but we don't care because if the condition is "missed" the loop
10602 // will have undefined behavior due to wrapping.
10603 if (ControlsOnlyExit
&& AddRec
->hasNoSelfWrap() &&
10604 loopHasNoAbnormalExits(AddRec
->getLoop())) {
10606 // If the stride is zero, the loop must be infinite. In C++, most loops
10607 // are finite by assumption, in which case the step being zero implies
10608 // UB must execute if the loop is entered.
10609 if (!loopIsFiniteByAssumption(L
) && !isKnownNonZero(StepWLG
))
10610 return getCouldNotCompute();
10612 const SCEV
*Exact
=
10613 getUDivExpr(Distance
, CountDown
? getNegativeSCEV(Step
) : Step
);
10614 const SCEV
*ConstantMax
= getCouldNotCompute();
10615 if (Exact
!= getCouldNotCompute()) {
10616 APInt MaxInt
= getUnsignedRangeMax(applyLoopGuards(Exact
, Guards
));
10618 getConstant(APIntOps::umin(MaxInt
, getUnsignedRangeMax(Exact
)));
10620 const SCEV
*SymbolicMax
=
10621 isa
<SCEVCouldNotCompute
>(Exact
) ? ConstantMax
: Exact
;
10622 return ExitLimit(Exact
, ConstantMax
, SymbolicMax
, false, Predicates
);
10625 // Solve the general equation.
10626 if (!StepC
|| StepC
->getValue()->isZero())
10627 return getCouldNotCompute();
10628 const SCEV
*E
= SolveLinEquationWithOverflow(
10629 StepC
->getAPInt(), getNegativeSCEV(Start
),
10630 AllowPredicates
? &Predicates
: nullptr, *this);
10633 if (E
!= getCouldNotCompute()) {
10634 APInt MaxWithGuards
= getUnsignedRangeMax(applyLoopGuards(E
, Guards
));
10635 M
= getConstant(APIntOps::umin(MaxWithGuards
, getUnsignedRangeMax(E
)));
10637 auto *S
= isa
<SCEVCouldNotCompute
>(E
) ? M
: E
;
10638 return ExitLimit(E
, M
, S
, false, Predicates
);
10641 ScalarEvolution::ExitLimit
10642 ScalarEvolution::howFarToNonZero(const SCEV
*V
, const Loop
*L
) {
10643 // Loops that look like: while (X == 0) are very strange indeed. We don't
10644 // handle them yet except for the trivial case. This could be expanded in the
10645 // future as needed.
10647 // If the value is a constant, check to see if it is known to be non-zero
10648 // already. If so, the backedge will execute zero times.
10649 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(V
)) {
10650 if (!C
->getValue()->isZero())
10651 return getZero(C
->getType());
10652 return getCouldNotCompute(); // Otherwise it will loop infinitely.
10655 // We could implement others, but I really doubt anyone writes loops like
10656 // this, and if they did, they would already be constant folded.
10657 return getCouldNotCompute();
10660 std::pair
<const BasicBlock
*, const BasicBlock
*>
10661 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock
*BB
)
10663 // If the block has a unique predecessor, then there is no path from the
10664 // predecessor to the block that does not go through the direct edge
10665 // from the predecessor to the block.
10666 if (const BasicBlock
*Pred
= BB
->getSinglePredecessor())
10669 // A loop's header is defined to be a block that dominates the loop.
10670 // If the header has a unique predecessor outside the loop, it must be
10671 // a block that has exactly one successor that can reach the loop.
10672 if (const Loop
*L
= LI
.getLoopFor(BB
))
10673 return {L
->getLoopPredecessor(), L
->getHeader()};
10675 return {nullptr, BB
};
10678 /// SCEV structural equivalence is usually sufficient for testing whether two
10679 /// expressions are equal, however for the purposes of looking for a condition
10680 /// guarding a loop, it can be useful to be a little more general, since a
10681 /// front-end may have replicated the controlling expression.
10682 static bool HasSameValue(const SCEV
*A
, const SCEV
*B
) {
10683 // Quick check to see if they are the same SCEV.
10684 if (A
== B
) return true;
10686 auto ComputesEqualValues
= [](const Instruction
*A
, const Instruction
*B
) {
10687 // Not all instructions that are "identical" compute the same value. For
10688 // instance, two distinct alloca instructions allocating the same type are
10689 // identical and do not read memory; but compute distinct values.
10690 return A
->isIdenticalTo(B
) && (isa
<BinaryOperator
>(A
) || isa
<GetElementPtrInst
>(A
));
10693 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
10694 // two different instructions with the same value. Check for this case.
10695 if (const SCEVUnknown
*AU
= dyn_cast
<SCEVUnknown
>(A
))
10696 if (const SCEVUnknown
*BU
= dyn_cast
<SCEVUnknown
>(B
))
10697 if (const Instruction
*AI
= dyn_cast
<Instruction
>(AU
->getValue()))
10698 if (const Instruction
*BI
= dyn_cast
<Instruction
>(BU
->getValue()))
10699 if (ComputesEqualValues(AI
, BI
))
10702 // Otherwise assume they may have a different value.
10706 static bool MatchBinarySub(const SCEV
*S
, const SCEV
*&LHS
, const SCEV
*&RHS
) {
10707 const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
);
10708 if (!Add
|| Add
->getNumOperands() != 2)
10710 if (auto *ME
= dyn_cast
<SCEVMulExpr
>(Add
->getOperand(0));
10711 ME
&& ME
->getNumOperands() == 2 && ME
->getOperand(0)->isAllOnesValue()) {
10712 LHS
= Add
->getOperand(1);
10713 RHS
= ME
->getOperand(1);
10716 if (auto *ME
= dyn_cast
<SCEVMulExpr
>(Add
->getOperand(1));
10717 ME
&& ME
->getNumOperands() == 2 && ME
->getOperand(0)->isAllOnesValue()) {
10718 LHS
= Add
->getOperand(0);
10719 RHS
= ME
->getOperand(1);
10725 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate
&Pred
,
10726 const SCEV
*&LHS
, const SCEV
*&RHS
,
10728 bool Changed
= false;
10729 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or
10731 auto TrivialCase
= [&](bool TriviallyTrue
) {
10732 LHS
= RHS
= getConstant(ConstantInt::getFalse(getContext()));
10733 Pred
= TriviallyTrue
? ICmpInst::ICMP_EQ
: ICmpInst::ICMP_NE
;
10736 // If we hit the max recursion limit bail out.
10740 // Canonicalize a constant to the right side.
10741 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(LHS
)) {
10742 // Check for both operands constant.
10743 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
10744 if (!ICmpInst::compare(LHSC
->getAPInt(), RHSC
->getAPInt(), Pred
))
10745 return TrivialCase(false);
10746 return TrivialCase(true);
10748 // Otherwise swap the operands to put the constant on the right.
10749 std::swap(LHS
, RHS
);
10750 Pred
= ICmpInst::getSwappedPredicate(Pred
);
10754 // If we're comparing an addrec with a value which is loop-invariant in the
10755 // addrec's loop, put the addrec on the left. Also make a dominance check,
10756 // as both operands could be addrecs loop-invariant in each other's loop.
10757 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(RHS
)) {
10758 const Loop
*L
= AR
->getLoop();
10759 if (isLoopInvariant(LHS
, L
) && properlyDominates(LHS
, L
->getHeader())) {
10760 std::swap(LHS
, RHS
);
10761 Pred
= ICmpInst::getSwappedPredicate(Pred
);
10766 // If there's a constant operand, canonicalize comparisons with boundary
10767 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
10768 if (const SCEVConstant
*RC
= dyn_cast
<SCEVConstant
>(RHS
)) {
10769 const APInt
&RA
= RC
->getAPInt();
10771 bool SimplifiedByConstantRange
= false;
10773 if (!ICmpInst::isEquality(Pred
)) {
10774 ConstantRange ExactCR
= ConstantRange::makeExactICmpRegion(Pred
, RA
);
10775 if (ExactCR
.isFullSet())
10776 return TrivialCase(true);
10777 if (ExactCR
.isEmptySet())
10778 return TrivialCase(false);
10781 CmpInst::Predicate NewPred
;
10782 if (ExactCR
.getEquivalentICmp(NewPred
, NewRHS
) &&
10783 ICmpInst::isEquality(NewPred
)) {
10784 // We were able to convert an inequality to an equality.
10786 RHS
= getConstant(NewRHS
);
10787 Changed
= SimplifiedByConstantRange
= true;
10791 if (!SimplifiedByConstantRange
) {
10795 case ICmpInst::ICMP_EQ
:
10796 case ICmpInst::ICMP_NE
:
10797 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
10798 if (RA
.isZero() && MatchBinarySub(LHS
, LHS
, RHS
))
10802 // The "Should have been caught earlier!" messages refer to the fact
10803 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above
10804 // should have fired on the corresponding cases, and canonicalized the
10805 // check to trivial case.
10807 case ICmpInst::ICMP_UGE
:
10808 assert(!RA
.isMinValue() && "Should have been caught earlier!");
10809 Pred
= ICmpInst::ICMP_UGT
;
10810 RHS
= getConstant(RA
- 1);
10813 case ICmpInst::ICMP_ULE
:
10814 assert(!RA
.isMaxValue() && "Should have been caught earlier!");
10815 Pred
= ICmpInst::ICMP_ULT
;
10816 RHS
= getConstant(RA
+ 1);
10819 case ICmpInst::ICMP_SGE
:
10820 assert(!RA
.isMinSignedValue() && "Should have been caught earlier!");
10821 Pred
= ICmpInst::ICMP_SGT
;
10822 RHS
= getConstant(RA
- 1);
10825 case ICmpInst::ICMP_SLE
:
10826 assert(!RA
.isMaxSignedValue() && "Should have been caught earlier!");
10827 Pred
= ICmpInst::ICMP_SLT
;
10828 RHS
= getConstant(RA
+ 1);
10835 // Check for obvious equality.
10836 if (HasSameValue(LHS
, RHS
)) {
10837 if (ICmpInst::isTrueWhenEqual(Pred
))
10838 return TrivialCase(true);
10839 if (ICmpInst::isFalseWhenEqual(Pred
))
10840 return TrivialCase(false);
10843 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
10844 // adding or subtracting 1 from one of the operands.
10846 case ICmpInst::ICMP_SLE
:
10847 if (!getSignedRangeMax(RHS
).isMaxSignedValue()) {
10848 RHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), RHS
,
10850 Pred
= ICmpInst::ICMP_SLT
;
10852 } else if (!getSignedRangeMin(LHS
).isMinSignedValue()) {
10853 LHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), LHS
,
10855 Pred
= ICmpInst::ICMP_SLT
;
10859 case ICmpInst::ICMP_SGE
:
10860 if (!getSignedRangeMin(RHS
).isMinSignedValue()) {
10861 RHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), RHS
,
10863 Pred
= ICmpInst::ICMP_SGT
;
10865 } else if (!getSignedRangeMax(LHS
).isMaxSignedValue()) {
10866 LHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), LHS
,
10868 Pred
= ICmpInst::ICMP_SGT
;
10872 case ICmpInst::ICMP_ULE
:
10873 if (!getUnsignedRangeMax(RHS
).isMaxValue()) {
10874 RHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), RHS
,
10876 Pred
= ICmpInst::ICMP_ULT
;
10878 } else if (!getUnsignedRangeMin(LHS
).isMinValue()) {
10879 LHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), LHS
);
10880 Pred
= ICmpInst::ICMP_ULT
;
10884 case ICmpInst::ICMP_UGE
:
10885 if (!getUnsignedRangeMin(RHS
).isMinValue()) {
10886 RHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), RHS
);
10887 Pred
= ICmpInst::ICMP_UGT
;
10889 } else if (!getUnsignedRangeMax(LHS
).isMaxValue()) {
10890 LHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), LHS
,
10892 Pred
= ICmpInst::ICMP_UGT
;
10900 // TODO: More simplifications are possible here.
10902 // Recursively simplify until we either hit a recursion limit or nothing
10905 return SimplifyICmpOperands(Pred
, LHS
, RHS
, Depth
+ 1);
10910 bool ScalarEvolution::isKnownNegative(const SCEV
*S
) {
10911 return getSignedRangeMax(S
).isNegative();
10914 bool ScalarEvolution::isKnownPositive(const SCEV
*S
) {
10915 return getSignedRangeMin(S
).isStrictlyPositive();
10918 bool ScalarEvolution::isKnownNonNegative(const SCEV
*S
) {
10919 return !getSignedRangeMin(S
).isNegative();
10922 bool ScalarEvolution::isKnownNonPositive(const SCEV
*S
) {
10923 return !getSignedRangeMax(S
).isStrictlyPositive();
10926 bool ScalarEvolution::isKnownNonZero(const SCEV
*S
) {
10927 // Query push down for cases where the unsigned range is
10928 // less than sufficient.
10929 if (const auto *SExt
= dyn_cast
<SCEVSignExtendExpr
>(S
))
10930 return isKnownNonZero(SExt
->getOperand(0));
10931 return getUnsignedRangeMin(S
) != 0;
10934 bool ScalarEvolution::isKnownToBeAPowerOfTwo(const SCEV
*S
, bool OrZero
,
10936 auto NonRecursive
= [this, OrNegative
](const SCEV
*S
) {
10937 if (auto *C
= dyn_cast
<SCEVConstant
>(S
))
10938 return C
->getAPInt().isPowerOf2() ||
10939 (OrNegative
&& C
->getAPInt().isNegatedPowerOf2());
10941 // The vscale_range indicates vscale is a power-of-two.
10942 return isa
<SCEVVScale
>(S
) && F
.hasFnAttribute(Attribute::VScaleRange
);
10945 if (NonRecursive(S
))
10948 auto *Mul
= dyn_cast
<SCEVMulExpr
>(S
);
10951 return all_of(Mul
->operands(), NonRecursive
) && (OrZero
|| isKnownNonZero(S
));
10954 std::pair
<const SCEV
*, const SCEV
*>
10955 ScalarEvolution::SplitIntoInitAndPostInc(const Loop
*L
, const SCEV
*S
) {
10956 // Compute SCEV on entry of loop L.
10957 const SCEV
*Start
= SCEVInitRewriter::rewrite(S
, L
, *this);
10958 if (Start
== getCouldNotCompute())
10959 return { Start
, Start
};
10960 // Compute post increment SCEV for loop L.
10961 const SCEV
*PostInc
= SCEVPostIncRewriter::rewrite(S
, L
, *this);
10962 assert(PostInc
!= getCouldNotCompute() && "Unexpected could not compute");
10963 return { Start
, PostInc
};
10966 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred
,
10967 const SCEV
*LHS
, const SCEV
*RHS
) {
10968 // First collect all loops.
10969 SmallPtrSet
<const Loop
*, 8> LoopsUsed
;
10970 getUsedLoops(LHS
, LoopsUsed
);
10971 getUsedLoops(RHS
, LoopsUsed
);
10973 if (LoopsUsed
.empty())
10976 // Domination relationship must be a linear order on collected loops.
10978 for (const auto *L1
: LoopsUsed
)
10979 for (const auto *L2
: LoopsUsed
)
10980 assert((DT
.dominates(L1
->getHeader(), L2
->getHeader()) ||
10981 DT
.dominates(L2
->getHeader(), L1
->getHeader())) &&
10982 "Domination relationship is not a linear order");
10986 *llvm::max_element(LoopsUsed
, [&](const Loop
*L1
, const Loop
*L2
) {
10987 return DT
.properlyDominates(L1
->getHeader(), L2
->getHeader());
10990 // Get init and post increment value for LHS.
10991 auto SplitLHS
= SplitIntoInitAndPostInc(MDL
, LHS
);
10992 // if LHS contains unknown non-invariant SCEV then bail out.
10993 if (SplitLHS
.first
== getCouldNotCompute())
10995 assert (SplitLHS
.second
!= getCouldNotCompute() && "Unexpected CNC");
10996 // Get init and post increment value for RHS.
10997 auto SplitRHS
= SplitIntoInitAndPostInc(MDL
, RHS
);
10998 // if RHS contains unknown non-invariant SCEV then bail out.
10999 if (SplitRHS
.first
== getCouldNotCompute())
11001 assert (SplitRHS
.second
!= getCouldNotCompute() && "Unexpected CNC");
11002 // It is possible that init SCEV contains an invariant load but it does
11003 // not dominate MDL and is not available at MDL loop entry, so we should
11005 if (!isAvailableAtLoopEntry(SplitLHS
.first
, MDL
) ||
11006 !isAvailableAtLoopEntry(SplitRHS
.first
, MDL
))
11009 // It seems backedge guard check is faster than entry one so in some cases
11010 // it can speed up whole estimation by short circuit
11011 return isLoopBackedgeGuardedByCond(MDL
, Pred
, SplitLHS
.second
,
11012 SplitRHS
.second
) &&
11013 isLoopEntryGuardedByCond(MDL
, Pred
, SplitLHS
.first
, SplitRHS
.first
);
11016 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred
,
11017 const SCEV
*LHS
, const SCEV
*RHS
) {
11018 // Canonicalize the inputs first.
11019 (void)SimplifyICmpOperands(Pred
, LHS
, RHS
);
11021 if (isKnownViaInduction(Pred
, LHS
, RHS
))
11024 if (isKnownPredicateViaSplitting(Pred
, LHS
, RHS
))
11027 // Otherwise see what can be done with some simple reasoning.
11028 return isKnownViaNonRecursiveReasoning(Pred
, LHS
, RHS
);
11031 std::optional
<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred
,
11034 if (isKnownPredicate(Pred
, LHS
, RHS
))
11036 if (isKnownPredicate(ICmpInst::getInversePredicate(Pred
), LHS
, RHS
))
11038 return std::nullopt
;
11041 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred
,
11042 const SCEV
*LHS
, const SCEV
*RHS
,
11043 const Instruction
*CtxI
) {
11044 // TODO: Analyze guards and assumes from Context's block.
11045 return isKnownPredicate(Pred
, LHS
, RHS
) ||
11046 isBasicBlockEntryGuardedByCond(CtxI
->getParent(), Pred
, LHS
, RHS
);
11049 std::optional
<bool>
11050 ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred
, const SCEV
*LHS
,
11051 const SCEV
*RHS
, const Instruction
*CtxI
) {
11052 std::optional
<bool> KnownWithoutContext
= evaluatePredicate(Pred
, LHS
, RHS
);
11053 if (KnownWithoutContext
)
11054 return KnownWithoutContext
;
11056 if (isBasicBlockEntryGuardedByCond(CtxI
->getParent(), Pred
, LHS
, RHS
))
11058 if (isBasicBlockEntryGuardedByCond(CtxI
->getParent(),
11059 ICmpInst::getInversePredicate(Pred
),
11062 return std::nullopt
;
11065 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred
,
11066 const SCEVAddRecExpr
*LHS
,
11068 const Loop
*L
= LHS
->getLoop();
11069 return isLoopEntryGuardedByCond(L
, Pred
, LHS
->getStart(), RHS
) &&
11070 isLoopBackedgeGuardedByCond(L
, Pred
, LHS
->getPostIncExpr(*this), RHS
);
11073 std::optional
<ScalarEvolution::MonotonicPredicateType
>
11074 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr
*LHS
,
11075 ICmpInst::Predicate Pred
) {
11076 auto Result
= getMonotonicPredicateTypeImpl(LHS
, Pred
);
11079 // Verify an invariant: inverting the predicate should turn a monotonically
11080 // increasing change to a monotonically decreasing one, and vice versa.
11082 auto ResultSwapped
=
11083 getMonotonicPredicateTypeImpl(LHS
, ICmpInst::getSwappedPredicate(Pred
));
11085 assert(*ResultSwapped
!= *Result
&&
11086 "monotonicity should flip as we flip the predicate");
11093 std::optional
<ScalarEvolution::MonotonicPredicateType
>
11094 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr
*LHS
,
11095 ICmpInst::Predicate Pred
) {
11096 // A zero step value for LHS means the induction variable is essentially a
11097 // loop invariant value. We don't really depend on the predicate actually
11098 // flipping from false to true (for increasing predicates, and the other way
11099 // around for decreasing predicates), all we care about is that *if* the
11100 // predicate changes then it only changes from false to true.
11102 // A zero step value in itself is not very useful, but there may be places
11103 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be
11104 // as general as possible.
11106 // Only handle LE/LT/GE/GT predicates.
11107 if (!ICmpInst::isRelational(Pred
))
11108 return std::nullopt
;
11110 bool IsGreater
= ICmpInst::isGE(Pred
) || ICmpInst::isGT(Pred
);
11111 assert((IsGreater
|| ICmpInst::isLE(Pred
) || ICmpInst::isLT(Pred
)) &&
11112 "Should be greater or less!");
11114 // Check that AR does not wrap.
11115 if (ICmpInst::isUnsigned(Pred
)) {
11116 if (!LHS
->hasNoUnsignedWrap())
11117 return std::nullopt
;
11118 return IsGreater
? MonotonicallyIncreasing
: MonotonicallyDecreasing
;
11120 assert(ICmpInst::isSigned(Pred
) &&
11121 "Relational predicate is either signed or unsigned!");
11122 if (!LHS
->hasNoSignedWrap())
11123 return std::nullopt
;
11125 const SCEV
*Step
= LHS
->getStepRecurrence(*this);
11127 if (isKnownNonNegative(Step
))
11128 return IsGreater
? MonotonicallyIncreasing
: MonotonicallyDecreasing
;
11130 if (isKnownNonPositive(Step
))
11131 return !IsGreater
? MonotonicallyIncreasing
: MonotonicallyDecreasing
;
11133 return std::nullopt
;
11136 std::optional
<ScalarEvolution::LoopInvariantPredicate
>
11137 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred
,
11138 const SCEV
*LHS
, const SCEV
*RHS
,
11140 const Instruction
*CtxI
) {
11141 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
11142 if (!isLoopInvariant(RHS
, L
)) {
11143 if (!isLoopInvariant(LHS
, L
))
11144 return std::nullopt
;
11146 std::swap(LHS
, RHS
);
11147 Pred
= ICmpInst::getSwappedPredicate(Pred
);
11150 const SCEVAddRecExpr
*ArLHS
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
11151 if (!ArLHS
|| ArLHS
->getLoop() != L
)
11152 return std::nullopt
;
11154 auto MonotonicType
= getMonotonicPredicateType(ArLHS
, Pred
);
11155 if (!MonotonicType
)
11156 return std::nullopt
;
11157 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
11158 // true as the loop iterates, and the backedge is control dependent on
11159 // "ArLHS `Pred` RHS" == true then we can reason as follows:
11161 // * if the predicate was false in the first iteration then the predicate
11162 // is never evaluated again, since the loop exits without taking the
11164 // * if the predicate was true in the first iteration then it will
11165 // continue to be true for all future iterations since it is
11166 // monotonically increasing.
11168 // For both the above possibilities, we can replace the loop varying
11169 // predicate with its value on the first iteration of the loop (which is
11170 // loop invariant).
11172 // A similar reasoning applies for a monotonically decreasing predicate, by
11173 // replacing true with false and false with true in the above two bullets.
11174 bool Increasing
= *MonotonicType
== ScalarEvolution::MonotonicallyIncreasing
;
11175 auto P
= Increasing
? Pred
: ICmpInst::getInversePredicate(Pred
);
11177 if (isLoopBackedgeGuardedByCond(L
, P
, LHS
, RHS
))
11178 return ScalarEvolution::LoopInvariantPredicate(Pred
, ArLHS
->getStart(),
11182 return std::nullopt
;
11183 // Try to prove via context.
11184 // TODO: Support other cases.
11188 case ICmpInst::ICMP_ULE
:
11189 case ICmpInst::ICMP_ULT
: {
11190 assert(ArLHS
->hasNoUnsignedWrap() && "Is a requirement of monotonicity!");
11191 // Given preconditions
11192 // (1) ArLHS does not cross the border of positive and negative parts of
11193 // range because of:
11194 // - Positive step; (TODO: lift this limitation)
11195 // - nuw - does not cross zero boundary;
11196 // - nsw - does not cross SINT_MAX boundary;
11197 // (2) ArLHS <s RHS
11199 // we can replace the loop variant ArLHS <u RHS condition with loop
11200 // invariant Start(ArLHS) <u RHS.
11202 // Because of (1) there are two options:
11203 // - ArLHS is always negative. It means that ArLHS <u RHS is always false;
11204 // - ArLHS is always non-negative. Because of (3) RHS is also non-negative.
11205 // It means that ArLHS <s RHS <=> ArLHS <u RHS.
11206 // Because of (2) ArLHS <u RHS is trivially true.
11207 // All together it means that ArLHS <u RHS <=> Start(ArLHS) >=s 0.
11208 // We can strengthen this to Start(ArLHS) <u RHS.
11209 auto SignFlippedPred
= ICmpInst::getFlippedSignednessPredicate(Pred
);
11210 if (ArLHS
->hasNoSignedWrap() && ArLHS
->isAffine() &&
11211 isKnownPositive(ArLHS
->getStepRecurrence(*this)) &&
11212 isKnownNonNegative(RHS
) &&
11213 isKnownPredicateAt(SignFlippedPred
, ArLHS
, RHS
, CtxI
))
11214 return ScalarEvolution::LoopInvariantPredicate(Pred
, ArLHS
->getStart(),
11219 return std::nullopt
;
11222 std::optional
<ScalarEvolution::LoopInvariantPredicate
>
11223 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
11224 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
, const Loop
*L
,
11225 const Instruction
*CtxI
, const SCEV
*MaxIter
) {
11226 if (auto LIP
= getLoopInvariantExitCondDuringFirstIterationsImpl(
11227 Pred
, LHS
, RHS
, L
, CtxI
, MaxIter
))
11229 if (auto *UMin
= dyn_cast
<SCEVUMinExpr
>(MaxIter
))
11230 // Number of iterations expressed as UMIN isn't always great for expressing
11231 // the value on the last iteration. If the straightforward approach didn't
11232 // work, try the following trick: if the a predicate is invariant for X, it
11233 // is also invariant for umin(X, ...). So try to find something that works
11234 // among subexpressions of MaxIter expressed as umin.
11235 for (auto *Op
: UMin
->operands())
11236 if (auto LIP
= getLoopInvariantExitCondDuringFirstIterationsImpl(
11237 Pred
, LHS
, RHS
, L
, CtxI
, Op
))
11239 return std::nullopt
;
11242 std::optional
<ScalarEvolution::LoopInvariantPredicate
>
11243 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterationsImpl(
11244 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
, const Loop
*L
,
11245 const Instruction
*CtxI
, const SCEV
*MaxIter
) {
11246 // Try to prove the following set of facts:
11247 // - The predicate is monotonic in the iteration space.
11248 // - If the check does not fail on the 1st iteration:
11249 // - No overflow will happen during first MaxIter iterations;
11250 // - It will not fail on the MaxIter'th iteration.
11251 // If the check does fail on the 1st iteration, we leave the loop and no
11252 // other checks matter.
11254 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
11255 if (!isLoopInvariant(RHS
, L
)) {
11256 if (!isLoopInvariant(LHS
, L
))
11257 return std::nullopt
;
11259 std::swap(LHS
, RHS
);
11260 Pred
= ICmpInst::getSwappedPredicate(Pred
);
11263 auto *AR
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
11264 if (!AR
|| AR
->getLoop() != L
)
11265 return std::nullopt
;
11267 // The predicate must be relational (i.e. <, <=, >=, >).
11268 if (!ICmpInst::isRelational(Pred
))
11269 return std::nullopt
;
11271 // TODO: Support steps other than +/- 1.
11272 const SCEV
*Step
= AR
->getStepRecurrence(*this);
11273 auto *One
= getOne(Step
->getType());
11274 auto *MinusOne
= getNegativeSCEV(One
);
11275 if (Step
!= One
&& Step
!= MinusOne
)
11276 return std::nullopt
;
11278 // Type mismatch here means that MaxIter is potentially larger than max
11279 // unsigned value in start type, which mean we cannot prove no wrap for the
11281 if (AR
->getType() != MaxIter
->getType())
11282 return std::nullopt
;
11284 // Value of IV on suggested last iteration.
11285 const SCEV
*Last
= AR
->evaluateAtIteration(MaxIter
, *this);
11286 // Does it still meet the requirement?
11287 if (!isLoopBackedgeGuardedByCond(L
, Pred
, Last
, RHS
))
11288 return std::nullopt
;
11289 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does
11290 // not exceed max unsigned value of this type), this effectively proves
11291 // that there is no wrap during the iteration. To prove that there is no
11292 // signed/unsigned wrap, we need to check that
11293 // Start <= Last for step = 1 or Start >= Last for step = -1.
11294 ICmpInst::Predicate NoOverflowPred
=
11295 CmpInst::isSigned(Pred
) ? ICmpInst::ICMP_SLE
: ICmpInst::ICMP_ULE
;
11296 if (Step
== MinusOne
)
11297 NoOverflowPred
= CmpInst::getSwappedPredicate(NoOverflowPred
);
11298 const SCEV
*Start
= AR
->getStart();
11299 if (!isKnownPredicateAt(NoOverflowPred
, Start
, Last
, CtxI
))
11300 return std::nullopt
;
11302 // Everything is fine.
11303 return ScalarEvolution::LoopInvariantPredicate(Pred
, Start
, RHS
);
11306 bool ScalarEvolution::isKnownPredicateViaConstantRanges(
11307 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
) {
11308 if (HasSameValue(LHS
, RHS
))
11309 return ICmpInst::isTrueWhenEqual(Pred
);
11311 // This code is split out from isKnownPredicate because it is called from
11312 // within isLoopEntryGuardedByCond.
11314 auto CheckRanges
= [&](const ConstantRange
&RangeLHS
,
11315 const ConstantRange
&RangeRHS
) {
11316 return RangeLHS
.icmp(Pred
, RangeRHS
);
11319 // The check at the top of the function catches the case where the values are
11320 // known to be equal.
11321 if (Pred
== CmpInst::ICMP_EQ
)
11324 if (Pred
== CmpInst::ICMP_NE
) {
11325 auto SL
= getSignedRange(LHS
);
11326 auto SR
= getSignedRange(RHS
);
11327 if (CheckRanges(SL
, SR
))
11329 auto UL
= getUnsignedRange(LHS
);
11330 auto UR
= getUnsignedRange(RHS
);
11331 if (CheckRanges(UL
, UR
))
11333 auto *Diff
= getMinusSCEV(LHS
, RHS
);
11334 return !isa
<SCEVCouldNotCompute
>(Diff
) && isKnownNonZero(Diff
);
11337 if (CmpInst::isSigned(Pred
)) {
11338 auto SL
= getSignedRange(LHS
);
11339 auto SR
= getSignedRange(RHS
);
11340 return CheckRanges(SL
, SR
);
11343 auto UL
= getUnsignedRange(LHS
);
11344 auto UR
= getUnsignedRange(RHS
);
11345 return CheckRanges(UL
, UR
);
11348 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred
,
11351 // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where
11352 // C1 and C2 are constant integers. If either X or Y are not add expressions,
11353 // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via
11354 // OutC1 and OutC2.
11355 auto MatchBinaryAddToConst
= [this](const SCEV
*X
, const SCEV
*Y
,
11356 APInt
&OutC1
, APInt
&OutC2
,
11357 SCEV::NoWrapFlags ExpectedFlags
) {
11358 const SCEV
*XNonConstOp
, *XConstOp
;
11359 const SCEV
*YNonConstOp
, *YConstOp
;
11360 SCEV::NoWrapFlags XFlagsPresent
;
11361 SCEV::NoWrapFlags YFlagsPresent
;
11363 if (!splitBinaryAdd(X
, XConstOp
, XNonConstOp
, XFlagsPresent
)) {
11364 XConstOp
= getZero(X
->getType());
11366 XFlagsPresent
= ExpectedFlags
;
11368 if (!isa
<SCEVConstant
>(XConstOp
) ||
11369 (XFlagsPresent
& ExpectedFlags
) != ExpectedFlags
)
11372 if (!splitBinaryAdd(Y
, YConstOp
, YNonConstOp
, YFlagsPresent
)) {
11373 YConstOp
= getZero(Y
->getType());
11375 YFlagsPresent
= ExpectedFlags
;
11378 if (!isa
<SCEVConstant
>(YConstOp
) ||
11379 (YFlagsPresent
& ExpectedFlags
) != ExpectedFlags
)
11382 if (YNonConstOp
!= XNonConstOp
)
11385 OutC1
= cast
<SCEVConstant
>(XConstOp
)->getAPInt();
11386 OutC2
= cast
<SCEVConstant
>(YConstOp
)->getAPInt();
11398 case ICmpInst::ICMP_SGE
:
11399 std::swap(LHS
, RHS
);
11401 case ICmpInst::ICMP_SLE
:
11402 // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2.
11403 if (MatchBinaryAddToConst(LHS
, RHS
, C1
, C2
, SCEV::FlagNSW
) && C1
.sle(C2
))
11408 case ICmpInst::ICMP_SGT
:
11409 std::swap(LHS
, RHS
);
11411 case ICmpInst::ICMP_SLT
:
11412 // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2.
11413 if (MatchBinaryAddToConst(LHS
, RHS
, C1
, C2
, SCEV::FlagNSW
) && C1
.slt(C2
))
11418 case ICmpInst::ICMP_UGE
:
11419 std::swap(LHS
, RHS
);
11421 case ICmpInst::ICMP_ULE
:
11422 // (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2.
11423 if (MatchBinaryAddToConst(LHS
, RHS
, C1
, C2
, SCEV::FlagNUW
) && C1
.ule(C2
))
11428 case ICmpInst::ICMP_UGT
:
11429 std::swap(LHS
, RHS
);
11431 case ICmpInst::ICMP_ULT
:
11432 // (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2.
11433 if (MatchBinaryAddToConst(LHS
, RHS
, C1
, C2
, SCEV::FlagNUW
) && C1
.ult(C2
))
11441 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred
,
11444 if (Pred
!= ICmpInst::ICMP_ULT
|| ProvingSplitPredicate
)
11447 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on
11448 // the stack can result in exponential time complexity.
11449 SaveAndRestore
Restore(ProvingSplitPredicate
, true);
11451 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L
11453 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use
11454 // isKnownPredicate. isKnownPredicate is more powerful, but also more
11455 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the
11456 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to
11457 // use isKnownPredicate later if needed.
11458 return isKnownNonNegative(RHS
) &&
11459 isKnownPredicate(CmpInst::ICMP_SGE
, LHS
, getZero(LHS
->getType())) &&
11460 isKnownPredicate(CmpInst::ICMP_SLT
, LHS
, RHS
);
11463 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock
*BB
,
11464 ICmpInst::Predicate Pred
,
11465 const SCEV
*LHS
, const SCEV
*RHS
) {
11466 // No need to even try if we know the module has no guards.
11470 return any_of(*BB
, [&](const Instruction
&I
) {
11471 using namespace llvm::PatternMatch
;
11474 return match(&I
, m_Intrinsic
<Intrinsic::experimental_guard
>(
11475 m_Value(Condition
))) &&
11476 isImpliedCond(Pred
, LHS
, RHS
, Condition
, false);
11480 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
11481 /// protected by a conditional between LHS and RHS. This is used to
11482 /// to eliminate casts.
11484 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop
*L
,
11485 ICmpInst::Predicate Pred
,
11486 const SCEV
*LHS
, const SCEV
*RHS
) {
11487 // Interpret a null as meaning no loop, where there is obviously no guard
11488 // (interprocedural conditions notwithstanding). Do not bother about
11489 // unreachable loops.
11490 if (!L
|| !DT
.isReachableFromEntry(L
->getHeader()))
11494 assert(!verifyFunction(*L
->getHeader()->getParent(), &dbgs()) &&
11495 "This cannot be done on broken IR!");
11498 if (isKnownViaNonRecursiveReasoning(Pred
, LHS
, RHS
))
11501 BasicBlock
*Latch
= L
->getLoopLatch();
11505 BranchInst
*LoopContinuePredicate
=
11506 dyn_cast
<BranchInst
>(Latch
->getTerminator());
11507 if (LoopContinuePredicate
&& LoopContinuePredicate
->isConditional() &&
11508 isImpliedCond(Pred
, LHS
, RHS
,
11509 LoopContinuePredicate
->getCondition(),
11510 LoopContinuePredicate
->getSuccessor(0) != L
->getHeader()))
11513 // We don't want more than one activation of the following loops on the stack
11514 // -- that can lead to O(n!) time complexity.
11515 if (WalkingBEDominatingConds
)
11518 SaveAndRestore
ClearOnExit(WalkingBEDominatingConds
, true);
11520 // See if we can exploit a trip count to prove the predicate.
11521 const auto &BETakenInfo
= getBackedgeTakenInfo(L
);
11522 const SCEV
*LatchBECount
= BETakenInfo
.getExact(Latch
, this);
11523 if (LatchBECount
!= getCouldNotCompute()) {
11524 // We know that Latch branches back to the loop header exactly
11525 // LatchBECount times. This means the backdege condition at Latch is
11526 // equivalent to "{0,+,1} u< LatchBECount".
11527 Type
*Ty
= LatchBECount
->getType();
11528 auto NoWrapFlags
= SCEV::NoWrapFlags(SCEV::FlagNUW
| SCEV::FlagNW
);
11529 const SCEV
*LoopCounter
=
11530 getAddRecExpr(getZero(Ty
), getOne(Ty
), L
, NoWrapFlags
);
11531 if (isImpliedCond(Pred
, LHS
, RHS
, ICmpInst::ICMP_ULT
, LoopCounter
,
11536 // Check conditions due to any @llvm.assume intrinsics.
11537 for (auto &AssumeVH
: AC
.assumptions()) {
11540 auto *CI
= cast
<CallInst
>(AssumeVH
);
11541 if (!DT
.dominates(CI
, Latch
->getTerminator()))
11544 if (isImpliedCond(Pred
, LHS
, RHS
, CI
->getArgOperand(0), false))
11548 if (isImpliedViaGuard(Latch
, Pred
, LHS
, RHS
))
11551 for (DomTreeNode
*DTN
= DT
[Latch
], *HeaderDTN
= DT
[L
->getHeader()];
11552 DTN
!= HeaderDTN
; DTN
= DTN
->getIDom()) {
11553 assert(DTN
&& "should reach the loop header before reaching the root!");
11555 BasicBlock
*BB
= DTN
->getBlock();
11556 if (isImpliedViaGuard(BB
, Pred
, LHS
, RHS
))
11559 BasicBlock
*PBB
= BB
->getSinglePredecessor();
11563 BranchInst
*ContinuePredicate
= dyn_cast
<BranchInst
>(PBB
->getTerminator());
11564 if (!ContinuePredicate
|| !ContinuePredicate
->isConditional())
11567 Value
*Condition
= ContinuePredicate
->getCondition();
11569 // If we have an edge `E` within the loop body that dominates the only
11570 // latch, the condition guarding `E` also guards the backedge. This
11571 // reasoning works only for loops with a single latch.
11573 BasicBlockEdge
DominatingEdge(PBB
, BB
);
11574 if (DominatingEdge
.isSingleEdge()) {
11575 // We're constructively (and conservatively) enumerating edges within the
11576 // loop body that dominate the latch. The dominator tree better agree
11577 // with us on this:
11578 assert(DT
.dominates(DominatingEdge
, Latch
) && "should be!");
11580 if (isImpliedCond(Pred
, LHS
, RHS
, Condition
,
11581 BB
!= ContinuePredicate
->getSuccessor(0)))
11589 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock
*BB
,
11590 ICmpInst::Predicate Pred
,
11593 // Do not bother proving facts for unreachable code.
11594 if (!DT
.isReachableFromEntry(BB
))
11597 assert(!verifyFunction(*BB
->getParent(), &dbgs()) &&
11598 "This cannot be done on broken IR!");
11600 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove
11601 // the facts (a >= b && a != b) separately. A typical situation is when the
11602 // non-strict comparison is known from ranges and non-equality is known from
11603 // dominating predicates. If we are proving strict comparison, we always try
11604 // to prove non-equality and non-strict comparison separately.
11605 auto NonStrictPredicate
= ICmpInst::getNonStrictPredicate(Pred
);
11606 const bool ProvingStrictComparison
= (Pred
!= NonStrictPredicate
);
11607 bool ProvedNonStrictComparison
= false;
11608 bool ProvedNonEquality
= false;
11610 auto SplitAndProve
=
11611 [&](std::function
<bool(ICmpInst::Predicate
)> Fn
) -> bool {
11612 if (!ProvedNonStrictComparison
)
11613 ProvedNonStrictComparison
= Fn(NonStrictPredicate
);
11614 if (!ProvedNonEquality
)
11615 ProvedNonEquality
= Fn(ICmpInst::ICMP_NE
);
11616 if (ProvedNonStrictComparison
&& ProvedNonEquality
)
11621 if (ProvingStrictComparison
) {
11622 auto ProofFn
= [&](ICmpInst::Predicate P
) {
11623 return isKnownViaNonRecursiveReasoning(P
, LHS
, RHS
);
11625 if (SplitAndProve(ProofFn
))
11629 // Try to prove (Pred, LHS, RHS) using isImpliedCond.
11630 auto ProveViaCond
= [&](const Value
*Condition
, bool Inverse
) {
11631 const Instruction
*CtxI
= &BB
->front();
11632 if (isImpliedCond(Pred
, LHS
, RHS
, Condition
, Inverse
, CtxI
))
11634 if (ProvingStrictComparison
) {
11635 auto ProofFn
= [&](ICmpInst::Predicate P
) {
11636 return isImpliedCond(P
, LHS
, RHS
, Condition
, Inverse
, CtxI
);
11638 if (SplitAndProve(ProofFn
))
11644 // Starting at the block's predecessor, climb up the predecessor chain, as long
11645 // as there are predecessors that can be found that have unique successors
11646 // leading to the original block.
11647 const Loop
*ContainingLoop
= LI
.getLoopFor(BB
);
11648 const BasicBlock
*PredBB
;
11649 if (ContainingLoop
&& ContainingLoop
->getHeader() == BB
)
11650 PredBB
= ContainingLoop
->getLoopPredecessor();
11652 PredBB
= BB
->getSinglePredecessor();
11653 for (std::pair
<const BasicBlock
*, const BasicBlock
*> Pair(PredBB
, BB
);
11654 Pair
.first
; Pair
= getPredecessorWithUniqueSuccessorForBB(Pair
.first
)) {
11655 const BranchInst
*BlockEntryPredicate
=
11656 dyn_cast
<BranchInst
>(Pair
.first
->getTerminator());
11657 if (!BlockEntryPredicate
|| BlockEntryPredicate
->isUnconditional())
11660 if (ProveViaCond(BlockEntryPredicate
->getCondition(),
11661 BlockEntryPredicate
->getSuccessor(0) != Pair
.second
))
11665 // Check conditions due to any @llvm.assume intrinsics.
11666 for (auto &AssumeVH
: AC
.assumptions()) {
11669 auto *CI
= cast
<CallInst
>(AssumeVH
);
11670 if (!DT
.dominates(CI
, BB
))
11673 if (ProveViaCond(CI
->getArgOperand(0), false))
11677 // Check conditions due to any @llvm.experimental.guard intrinsics.
11678 auto *GuardDecl
= Intrinsic::getDeclarationIfExists(
11679 F
.getParent(), Intrinsic::experimental_guard
);
11681 for (const auto *GU
: GuardDecl
->users())
11682 if (const auto *Guard
= dyn_cast
<IntrinsicInst
>(GU
))
11683 if (Guard
->getFunction() == BB
->getParent() && DT
.dominates(Guard
, BB
))
11684 if (ProveViaCond(Guard
->getArgOperand(0), false))
11689 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop
*L
,
11690 ICmpInst::Predicate Pred
,
11693 // Interpret a null as meaning no loop, where there is obviously no guard
11694 // (interprocedural conditions notwithstanding).
11698 // Both LHS and RHS must be available at loop entry.
11699 assert(isAvailableAtLoopEntry(LHS
, L
) &&
11700 "LHS is not available at Loop Entry");
11701 assert(isAvailableAtLoopEntry(RHS
, L
) &&
11702 "RHS is not available at Loop Entry");
11704 if (isKnownViaNonRecursiveReasoning(Pred
, LHS
, RHS
))
11707 return isBasicBlockEntryGuardedByCond(L
->getHeader(), Pred
, LHS
, RHS
);
11710 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred
, const SCEV
*LHS
,
11712 const Value
*FoundCondValue
, bool Inverse
,
11713 const Instruction
*CtxI
) {
11714 // False conditions implies anything. Do not bother analyzing it further.
11715 if (FoundCondValue
==
11716 ConstantInt::getBool(FoundCondValue
->getContext(), Inverse
))
11719 if (!PendingLoopPredicates
.insert(FoundCondValue
).second
)
11723 make_scope_exit([&]() { PendingLoopPredicates
.erase(FoundCondValue
); });
11725 // Recursively handle And and Or conditions.
11726 const Value
*Op0
, *Op1
;
11727 if (match(FoundCondValue
, m_LogicalAnd(m_Value(Op0
), m_Value(Op1
)))) {
11729 return isImpliedCond(Pred
, LHS
, RHS
, Op0
, Inverse
, CtxI
) ||
11730 isImpliedCond(Pred
, LHS
, RHS
, Op1
, Inverse
, CtxI
);
11731 } else if (match(FoundCondValue
, m_LogicalOr(m_Value(Op0
), m_Value(Op1
)))) {
11733 return isImpliedCond(Pred
, LHS
, RHS
, Op0
, Inverse
, CtxI
) ||
11734 isImpliedCond(Pred
, LHS
, RHS
, Op1
, Inverse
, CtxI
);
11737 const ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(FoundCondValue
);
11738 if (!ICI
) return false;
11740 // Now that we found a conditional branch that dominates the loop or controls
11741 // the loop latch. Check to see if it is the comparison we are looking for.
11742 ICmpInst::Predicate FoundPred
;
11744 FoundPred
= ICI
->getInversePredicate();
11746 FoundPred
= ICI
->getPredicate();
11748 const SCEV
*FoundLHS
= getSCEV(ICI
->getOperand(0));
11749 const SCEV
*FoundRHS
= getSCEV(ICI
->getOperand(1));
11751 return isImpliedCond(Pred
, LHS
, RHS
, FoundPred
, FoundLHS
, FoundRHS
, CtxI
);
11754 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred
, const SCEV
*LHS
,
11756 ICmpInst::Predicate FoundPred
,
11757 const SCEV
*FoundLHS
, const SCEV
*FoundRHS
,
11758 const Instruction
*CtxI
) {
11759 // Balance the types.
11760 if (getTypeSizeInBits(LHS
->getType()) <
11761 getTypeSizeInBits(FoundLHS
->getType())) {
11762 // For unsigned and equality predicates, try to prove that both found
11763 // operands fit into narrow unsigned range. If so, try to prove facts in
11765 if (!CmpInst::isSigned(FoundPred
) && !FoundLHS
->getType()->isPointerTy() &&
11766 !FoundRHS
->getType()->isPointerTy()) {
11767 auto *NarrowType
= LHS
->getType();
11768 auto *WideType
= FoundLHS
->getType();
11769 auto BitWidth
= getTypeSizeInBits(NarrowType
);
11770 const SCEV
*MaxValue
= getZeroExtendExpr(
11771 getConstant(APInt::getMaxValue(BitWidth
)), WideType
);
11772 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE
, FoundLHS
,
11774 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE
, FoundRHS
,
11776 const SCEV
*TruncFoundLHS
= getTruncateExpr(FoundLHS
, NarrowType
);
11777 const SCEV
*TruncFoundRHS
= getTruncateExpr(FoundRHS
, NarrowType
);
11778 if (isImpliedCondBalancedTypes(Pred
, LHS
, RHS
, FoundPred
, TruncFoundLHS
,
11779 TruncFoundRHS
, CtxI
))
11784 if (LHS
->getType()->isPointerTy() || RHS
->getType()->isPointerTy())
11786 if (CmpInst::isSigned(Pred
)) {
11787 LHS
= getSignExtendExpr(LHS
, FoundLHS
->getType());
11788 RHS
= getSignExtendExpr(RHS
, FoundLHS
->getType());
11790 LHS
= getZeroExtendExpr(LHS
, FoundLHS
->getType());
11791 RHS
= getZeroExtendExpr(RHS
, FoundLHS
->getType());
11793 } else if (getTypeSizeInBits(LHS
->getType()) >
11794 getTypeSizeInBits(FoundLHS
->getType())) {
11795 if (FoundLHS
->getType()->isPointerTy() || FoundRHS
->getType()->isPointerTy())
11797 if (CmpInst::isSigned(FoundPred
)) {
11798 FoundLHS
= getSignExtendExpr(FoundLHS
, LHS
->getType());
11799 FoundRHS
= getSignExtendExpr(FoundRHS
, LHS
->getType());
11801 FoundLHS
= getZeroExtendExpr(FoundLHS
, LHS
->getType());
11802 FoundRHS
= getZeroExtendExpr(FoundRHS
, LHS
->getType());
11805 return isImpliedCondBalancedTypes(Pred
, LHS
, RHS
, FoundPred
, FoundLHS
,
11809 bool ScalarEvolution::isImpliedCondBalancedTypes(
11810 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
,
11811 ICmpInst::Predicate FoundPred
, const SCEV
*FoundLHS
, const SCEV
*FoundRHS
,
11812 const Instruction
*CtxI
) {
11813 assert(getTypeSizeInBits(LHS
->getType()) ==
11814 getTypeSizeInBits(FoundLHS
->getType()) &&
11815 "Types should be balanced!");
11816 // Canonicalize the query to match the way instcombine will have
11817 // canonicalized the comparison.
11818 if (SimplifyICmpOperands(Pred
, LHS
, RHS
))
11820 return CmpInst::isTrueWhenEqual(Pred
);
11821 if (SimplifyICmpOperands(FoundPred
, FoundLHS
, FoundRHS
))
11822 if (FoundLHS
== FoundRHS
)
11823 return CmpInst::isFalseWhenEqual(FoundPred
);
11825 // Check to see if we can make the LHS or RHS match.
11826 if (LHS
== FoundRHS
|| RHS
== FoundLHS
) {
11827 if (isa
<SCEVConstant
>(RHS
)) {
11828 std::swap(FoundLHS
, FoundRHS
);
11829 FoundPred
= ICmpInst::getSwappedPredicate(FoundPred
);
11831 std::swap(LHS
, RHS
);
11832 Pred
= ICmpInst::getSwappedPredicate(Pred
);
11836 // Check whether the found predicate is the same as the desired predicate.
11837 if (FoundPred
== Pred
)
11838 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
, CtxI
);
11840 // Check whether swapping the found predicate makes it the same as the
11841 // desired predicate.
11842 if (ICmpInst::getSwappedPredicate(FoundPred
) == Pred
) {
11843 // We can write the implication
11844 // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS
11845 // using one of the following ways:
11846 // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS
11847 // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS
11848 // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS
11849 // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS
11850 // Forms 1. and 2. require swapping the operands of one condition. Don't
11851 // do this if it would break canonical constant/addrec ordering.
11852 if (!isa
<SCEVConstant
>(RHS
) && !isa
<SCEVAddRecExpr
>(LHS
))
11853 return isImpliedCondOperands(FoundPred
, RHS
, LHS
, FoundLHS
, FoundRHS
,
11855 if (!isa
<SCEVConstant
>(FoundRHS
) && !isa
<SCEVAddRecExpr
>(FoundLHS
))
11856 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundRHS
, FoundLHS
, CtxI
);
11858 // There's no clear preference between forms 3. and 4., try both. Avoid
11859 // forming getNotSCEV of pointer values as the resulting subtract is
11861 if (!LHS
->getType()->isPointerTy() && !RHS
->getType()->isPointerTy() &&
11862 isImpliedCondOperands(FoundPred
, getNotSCEV(LHS
), getNotSCEV(RHS
),
11863 FoundLHS
, FoundRHS
, CtxI
))
11866 if (!FoundLHS
->getType()->isPointerTy() &&
11867 !FoundRHS
->getType()->isPointerTy() &&
11868 isImpliedCondOperands(Pred
, LHS
, RHS
, getNotSCEV(FoundLHS
),
11869 getNotSCEV(FoundRHS
), CtxI
))
11875 auto IsSignFlippedPredicate
= [](CmpInst::Predicate P1
,
11876 CmpInst::Predicate P2
) {
11877 assert(P1
!= P2
&& "Handled earlier!");
11878 return CmpInst::isRelational(P2
) &&
11879 P1
== ICmpInst::getFlippedSignednessPredicate(P2
);
11881 if (IsSignFlippedPredicate(Pred
, FoundPred
)) {
11882 // Unsigned comparison is the same as signed comparison when both the
11883 // operands are non-negative or negative.
11884 if ((isKnownNonNegative(FoundLHS
) && isKnownNonNegative(FoundRHS
)) ||
11885 (isKnownNegative(FoundLHS
) && isKnownNegative(FoundRHS
)))
11886 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
, CtxI
);
11887 // Create local copies that we can freely swap and canonicalize our
11888 // conditions to "le/lt".
11889 ICmpInst::Predicate CanonicalPred
= Pred
, CanonicalFoundPred
= FoundPred
;
11890 const SCEV
*CanonicalLHS
= LHS
, *CanonicalRHS
= RHS
,
11891 *CanonicalFoundLHS
= FoundLHS
, *CanonicalFoundRHS
= FoundRHS
;
11892 if (ICmpInst::isGT(CanonicalPred
) || ICmpInst::isGE(CanonicalPred
)) {
11893 CanonicalPred
= ICmpInst::getSwappedPredicate(CanonicalPred
);
11894 CanonicalFoundPred
= ICmpInst::getSwappedPredicate(CanonicalFoundPred
);
11895 std::swap(CanonicalLHS
, CanonicalRHS
);
11896 std::swap(CanonicalFoundLHS
, CanonicalFoundRHS
);
11898 assert((ICmpInst::isLT(CanonicalPred
) || ICmpInst::isLE(CanonicalPred
)) &&
11900 assert((ICmpInst::isLT(CanonicalFoundPred
) ||
11901 ICmpInst::isLE(CanonicalFoundPred
)) &&
11903 if (ICmpInst::isSigned(CanonicalPred
) && isKnownNonNegative(CanonicalRHS
))
11904 // Use implication:
11905 // x <u y && y >=s 0 --> x <s y.
11906 // If we can prove the left part, the right part is also proven.
11907 return isImpliedCondOperands(CanonicalFoundPred
, CanonicalLHS
,
11908 CanonicalRHS
, CanonicalFoundLHS
,
11909 CanonicalFoundRHS
);
11910 if (ICmpInst::isUnsigned(CanonicalPred
) && isKnownNegative(CanonicalRHS
))
11911 // Use implication:
11912 // x <s y && y <s 0 --> x <u y.
11913 // If we can prove the left part, the right part is also proven.
11914 return isImpliedCondOperands(CanonicalFoundPred
, CanonicalLHS
,
11915 CanonicalRHS
, CanonicalFoundLHS
,
11916 CanonicalFoundRHS
);
11919 // Check if we can make progress by sharpening ranges.
11920 if (FoundPred
== ICmpInst::ICMP_NE
&&
11921 (isa
<SCEVConstant
>(FoundLHS
) || isa
<SCEVConstant
>(FoundRHS
))) {
11923 const SCEVConstant
*C
= nullptr;
11924 const SCEV
*V
= nullptr;
11926 if (isa
<SCEVConstant
>(FoundLHS
)) {
11927 C
= cast
<SCEVConstant
>(FoundLHS
);
11930 C
= cast
<SCEVConstant
>(FoundRHS
);
11934 // The guarding predicate tells us that C != V. If the known range
11935 // of V is [C, t), we can sharpen the range to [C + 1, t). The
11936 // range we consider has to correspond to same signedness as the
11937 // predicate we're interested in folding.
11939 APInt Min
= ICmpInst::isSigned(Pred
) ?
11940 getSignedRangeMin(V
) : getUnsignedRangeMin(V
);
11942 if (Min
== C
->getAPInt()) {
11943 // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
11944 // This is true even if (Min + 1) wraps around -- in case of
11945 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
11947 APInt SharperMin
= Min
+ 1;
11950 case ICmpInst::ICMP_SGE
:
11951 case ICmpInst::ICMP_UGE
:
11952 // We know V `Pred` SharperMin. If this implies LHS `Pred`
11953 // RHS, we're done.
11954 if (isImpliedCondOperands(Pred
, LHS
, RHS
, V
, getConstant(SharperMin
),
11959 case ICmpInst::ICMP_SGT
:
11960 case ICmpInst::ICMP_UGT
:
11961 // We know from the range information that (V `Pred` Min ||
11962 // V == Min). We know from the guarding condition that !(V
11963 // == Min). This gives us
11965 // V `Pred` Min || V == Min && !(V == Min)
11968 // If V `Pred` Min implies LHS `Pred` RHS, we're done.
11970 if (isImpliedCondOperands(Pred
, LHS
, RHS
, V
, getConstant(Min
), CtxI
))
11974 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively.
11975 case ICmpInst::ICMP_SLE
:
11976 case ICmpInst::ICMP_ULE
:
11977 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred
), RHS
,
11978 LHS
, V
, getConstant(SharperMin
), CtxI
))
11982 case ICmpInst::ICMP_SLT
:
11983 case ICmpInst::ICMP_ULT
:
11984 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred
), RHS
,
11985 LHS
, V
, getConstant(Min
), CtxI
))
11996 // Check whether the actual condition is beyond sufficient.
11997 if (FoundPred
== ICmpInst::ICMP_EQ
)
11998 if (ICmpInst::isTrueWhenEqual(Pred
))
11999 if (isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
, CtxI
))
12001 if (Pred
== ICmpInst::ICMP_NE
)
12002 if (!ICmpInst::isTrueWhenEqual(FoundPred
))
12003 if (isImpliedCondOperands(FoundPred
, LHS
, RHS
, FoundLHS
, FoundRHS
, CtxI
))
12006 if (isImpliedCondOperandsViaRanges(Pred
, LHS
, RHS
, FoundPred
, FoundLHS
, FoundRHS
))
12009 // Otherwise assume the worst.
12013 bool ScalarEvolution::splitBinaryAdd(const SCEV
*Expr
,
12014 const SCEV
*&L
, const SCEV
*&R
,
12015 SCEV::NoWrapFlags
&Flags
) {
12016 const auto *AE
= dyn_cast
<SCEVAddExpr
>(Expr
);
12017 if (!AE
|| AE
->getNumOperands() != 2)
12020 L
= AE
->getOperand(0);
12021 R
= AE
->getOperand(1);
12022 Flags
= AE
->getNoWrapFlags();
12026 std::optional
<APInt
>
12027 ScalarEvolution::computeConstantDifference(const SCEV
*More
, const SCEV
*Less
) {
12028 // We avoid subtracting expressions here because this function is usually
12029 // fairly deep in the call stack (i.e. is called many times).
12031 unsigned BW
= getTypeSizeInBits(More
->getType());
12033 APInt
DiffMul(BW
, 1);
12034 // Try various simplifications to reduce the difference to a constant. Limit
12035 // the number of allowed simplifications to keep compile-time low.
12036 for (unsigned I
= 0; I
< 8; ++I
) {
12040 // Reduce addrecs with identical steps to their start value.
12041 if (isa
<SCEVAddRecExpr
>(Less
) && isa
<SCEVAddRecExpr
>(More
)) {
12042 const auto *LAR
= cast
<SCEVAddRecExpr
>(Less
);
12043 const auto *MAR
= cast
<SCEVAddRecExpr
>(More
);
12045 if (LAR
->getLoop() != MAR
->getLoop())
12046 return std::nullopt
;
12048 // We look at affine expressions only; not for correctness but to keep
12049 // getStepRecurrence cheap.
12050 if (!LAR
->isAffine() || !MAR
->isAffine())
12051 return std::nullopt
;
12053 if (LAR
->getStepRecurrence(*this) != MAR
->getStepRecurrence(*this))
12054 return std::nullopt
;
12056 Less
= LAR
->getStart();
12057 More
= MAR
->getStart();
12061 // Try to match a common constant multiply.
12062 auto MatchConstMul
=
12063 [](const SCEV
*S
) -> std::optional
<std::pair
<const SCEV
*, APInt
>> {
12064 auto *M
= dyn_cast
<SCEVMulExpr
>(S
);
12065 if (!M
|| M
->getNumOperands() != 2 ||
12066 !isa
<SCEVConstant
>(M
->getOperand(0)))
12067 return std::nullopt
;
12069 {M
->getOperand(1), cast
<SCEVConstant
>(M
->getOperand(0))->getAPInt()}};
12071 if (auto MatchedMore
= MatchConstMul(More
)) {
12072 if (auto MatchedLess
= MatchConstMul(Less
)) {
12073 if (MatchedMore
->second
== MatchedLess
->second
) {
12074 More
= MatchedMore
->first
;
12075 Less
= MatchedLess
->first
;
12076 DiffMul
*= MatchedMore
->second
;
12082 // Try to cancel out common factors in two add expressions.
12083 SmallDenseMap
<const SCEV
*, int, 8> Multiplicity
;
12084 auto Add
= [&](const SCEV
*S
, int Mul
) {
12085 if (auto *C
= dyn_cast
<SCEVConstant
>(S
)) {
12087 Diff
+= C
->getAPInt() * DiffMul
;
12090 Diff
-= C
->getAPInt() * DiffMul
;
12093 Multiplicity
[S
] += Mul
;
12095 auto Decompose
= [&](const SCEV
*S
, int Mul
) {
12096 if (isa
<SCEVAddExpr
>(S
)) {
12097 for (const SCEV
*Op
: S
->operands())
12102 Decompose(More
, 1);
12103 Decompose(Less
, -1);
12105 // Check whether all the non-constants cancel out, or reduce to new
12106 // More/Less values.
12107 const SCEV
*NewMore
= nullptr, *NewLess
= nullptr;
12108 for (const auto &[S
, Mul
] : Multiplicity
) {
12113 return std::nullopt
;
12115 } else if (Mul
== -1) {
12117 return std::nullopt
;
12120 return std::nullopt
;
12123 // Values stayed the same, no point in trying further.
12124 if (NewMore
== More
|| NewLess
== Less
)
12125 return std::nullopt
;
12130 // Reduced to constant.
12131 if (!More
&& !Less
)
12134 // Left with variable on only one side, bail out.
12135 if (!More
|| !Less
)
12136 return std::nullopt
;
12139 // Did not reduce to constant.
12140 return std::nullopt
;
12143 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
12144 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
,
12145 const SCEV
*FoundLHS
, const SCEV
*FoundRHS
, const Instruction
*CtxI
) {
12146 // Try to recognize the following pattern:
12151 // FoundLHS = {Start,+,W}
12152 // context_bb: // Basic block from the same loop
12153 // known(Pred, FoundLHS, FoundRHS)
12155 // If some predicate is known in the context of a loop, it is also known on
12156 // each iteration of this loop, including the first iteration. Therefore, in
12157 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to
12158 // prove the original pred using this fact.
12161 const BasicBlock
*ContextBB
= CtxI
->getParent();
12162 // Make sure AR varies in the context block.
12163 if (auto *AR
= dyn_cast
<SCEVAddRecExpr
>(FoundLHS
)) {
12164 const Loop
*L
= AR
->getLoop();
12165 // Make sure that context belongs to the loop and executes on 1st iteration
12166 // (if it ever executes at all).
12167 if (!L
->contains(ContextBB
) || !DT
.dominates(ContextBB
, L
->getLoopLatch()))
12169 if (!isAvailableAtLoopEntry(FoundRHS
, AR
->getLoop()))
12171 return isImpliedCondOperands(Pred
, LHS
, RHS
, AR
->getStart(), FoundRHS
);
12174 if (auto *AR
= dyn_cast
<SCEVAddRecExpr
>(FoundRHS
)) {
12175 const Loop
*L
= AR
->getLoop();
12176 // Make sure that context belongs to the loop and executes on 1st iteration
12177 // (if it ever executes at all).
12178 if (!L
->contains(ContextBB
) || !DT
.dominates(ContextBB
, L
->getLoopLatch()))
12180 if (!isAvailableAtLoopEntry(FoundLHS
, AR
->getLoop()))
12182 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, AR
->getStart());
12188 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
12189 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
,
12190 const SCEV
*FoundLHS
, const SCEV
*FoundRHS
) {
12191 if (Pred
!= CmpInst::ICMP_SLT
&& Pred
!= CmpInst::ICMP_ULT
)
12194 const auto *AddRecLHS
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
12198 const auto *AddRecFoundLHS
= dyn_cast
<SCEVAddRecExpr
>(FoundLHS
);
12199 if (!AddRecFoundLHS
)
12202 // We'd like to let SCEV reason about control dependencies, so we constrain
12203 // both the inequalities to be about add recurrences on the same loop. This
12204 // way we can use isLoopEntryGuardedByCond later.
12206 const Loop
*L
= AddRecFoundLHS
->getLoop();
12207 if (L
!= AddRecLHS
->getLoop())
12210 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1)
12212 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C)
12215 // Informal proof for (2), assuming (1) [*]:
12217 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**]
12221 // FoundLHS s< FoundRHS s< INT_MIN - C
12222 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ]
12223 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ]
12224 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s<
12225 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ]
12226 // <=> FoundLHS + C s< FoundRHS + C
12228 // [*]: (1) can be proved by ruling out overflow.
12230 // [**]: This can be proved by analyzing all the four possibilities:
12231 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and
12232 // (A s>= 0, B s>= 0).
12235 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C"
12236 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS
12237 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS
12238 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is
12239 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS +
12242 std::optional
<APInt
> LDiff
= computeConstantDifference(LHS
, FoundLHS
);
12245 std::optional
<APInt
> RDiff
= computeConstantDifference(RHS
, FoundRHS
);
12246 if (!RDiff
|| *LDiff
!= *RDiff
)
12249 if (LDiff
->isMinValue())
12252 APInt FoundRHSLimit
;
12254 if (Pred
== CmpInst::ICMP_ULT
) {
12255 FoundRHSLimit
= -(*RDiff
);
12257 assert(Pred
== CmpInst::ICMP_SLT
&& "Checked above!");
12258 FoundRHSLimit
= APInt::getSignedMinValue(getTypeSizeInBits(RHS
->getType())) - *RDiff
;
12261 // Try to prove (1) or (2), as needed.
12262 return isAvailableAtLoopEntry(FoundRHS
, L
) &&
12263 isLoopEntryGuardedByCond(L
, Pred
, FoundRHS
,
12264 getConstant(FoundRHSLimit
));
12267 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred
,
12268 const SCEV
*LHS
, const SCEV
*RHS
,
12269 const SCEV
*FoundLHS
,
12270 const SCEV
*FoundRHS
, unsigned Depth
) {
12271 const PHINode
*LPhi
= nullptr, *RPhi
= nullptr;
12273 auto ClearOnExit
= make_scope_exit([&]() {
12275 bool Erased
= PendingMerges
.erase(LPhi
);
12276 assert(Erased
&& "Failed to erase LPhi!");
12280 bool Erased
= PendingMerges
.erase(RPhi
);
12281 assert(Erased
&& "Failed to erase RPhi!");
12286 // Find respective Phis and check that they are not being pending.
12287 if (const SCEVUnknown
*LU
= dyn_cast
<SCEVUnknown
>(LHS
))
12288 if (auto *Phi
= dyn_cast
<PHINode
>(LU
->getValue())) {
12289 if (!PendingMerges
.insert(Phi
).second
)
12293 if (const SCEVUnknown
*RU
= dyn_cast
<SCEVUnknown
>(RHS
))
12294 if (auto *Phi
= dyn_cast
<PHINode
>(RU
->getValue())) {
12295 // If we detect a loop of Phi nodes being processed by this method, for
12298 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ]
12299 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ]
12301 // we don't want to deal with a case that complex, so return conservative
12303 if (!PendingMerges
.insert(Phi
).second
)
12308 // If none of LHS, RHS is a Phi, nothing to do here.
12309 if (!LPhi
&& !RPhi
)
12312 // If there is a SCEVUnknown Phi we are interested in, make it left.
12314 std::swap(LHS
, RHS
);
12315 std::swap(FoundLHS
, FoundRHS
);
12316 std::swap(LPhi
, RPhi
);
12317 Pred
= ICmpInst::getSwappedPredicate(Pred
);
12320 assert(LPhi
&& "LPhi should definitely be a SCEVUnknown Phi!");
12321 const BasicBlock
*LBB
= LPhi
->getParent();
12322 const SCEVAddRecExpr
*RAR
= dyn_cast
<SCEVAddRecExpr
>(RHS
);
12324 auto ProvedEasily
= [&](const SCEV
*S1
, const SCEV
*S2
) {
12325 return isKnownViaNonRecursiveReasoning(Pred
, S1
, S2
) ||
12326 isImpliedCondOperandsViaRanges(Pred
, S1
, S2
, Pred
, FoundLHS
, FoundRHS
) ||
12327 isImpliedViaOperations(Pred
, S1
, S2
, FoundLHS
, FoundRHS
, Depth
);
12330 if (RPhi
&& RPhi
->getParent() == LBB
) {
12331 // Case one: RHS is also a SCEVUnknown Phi from the same basic block.
12332 // If we compare two Phis from the same block, and for each entry block
12333 // the predicate is true for incoming values from this block, then the
12334 // predicate is also true for the Phis.
12335 for (const BasicBlock
*IncBB
: predecessors(LBB
)) {
12336 const SCEV
*L
= getSCEV(LPhi
->getIncomingValueForBlock(IncBB
));
12337 const SCEV
*R
= getSCEV(RPhi
->getIncomingValueForBlock(IncBB
));
12338 if (!ProvedEasily(L
, R
))
12341 } else if (RAR
&& RAR
->getLoop()->getHeader() == LBB
) {
12342 // Case two: RHS is also a Phi from the same basic block, and it is an
12343 // AddRec. It means that there is a loop which has both AddRec and Unknown
12344 // PHIs, for it we can compare incoming values of AddRec from above the loop
12345 // and latch with their respective incoming values of LPhi.
12346 // TODO: Generalize to handle loops with many inputs in a header.
12347 if (LPhi
->getNumIncomingValues() != 2) return false;
12349 auto *RLoop
= RAR
->getLoop();
12350 auto *Predecessor
= RLoop
->getLoopPredecessor();
12351 assert(Predecessor
&& "Loop with AddRec with no predecessor?");
12352 const SCEV
*L1
= getSCEV(LPhi
->getIncomingValueForBlock(Predecessor
));
12353 if (!ProvedEasily(L1
, RAR
->getStart()))
12355 auto *Latch
= RLoop
->getLoopLatch();
12356 assert(Latch
&& "Loop with AddRec with no latch?");
12357 const SCEV
*L2
= getSCEV(LPhi
->getIncomingValueForBlock(Latch
));
12358 if (!ProvedEasily(L2
, RAR
->getPostIncExpr(*this)))
12361 // In all other cases go over inputs of LHS and compare each of them to RHS,
12362 // the predicate is true for (LHS, RHS) if it is true for all such pairs.
12363 // At this point RHS is either a non-Phi, or it is a Phi from some block
12364 // different from LBB.
12365 for (const BasicBlock
*IncBB
: predecessors(LBB
)) {
12366 // Check that RHS is available in this block.
12367 if (!dominates(RHS
, IncBB
))
12369 const SCEV
*L
= getSCEV(LPhi
->getIncomingValueForBlock(IncBB
));
12370 // Make sure L does not refer to a value from a potentially previous
12371 // iteration of a loop.
12372 if (!properlyDominates(L
, LBB
))
12374 if (!ProvedEasily(L
, RHS
))
12381 bool ScalarEvolution::isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred
,
12384 const SCEV
*FoundLHS
,
12385 const SCEV
*FoundRHS
) {
12386 // We want to imply LHS < RHS from LHS < (RHS >> shiftvalue). First, make
12387 // sure that we are dealing with same LHS.
12388 if (RHS
== FoundRHS
) {
12389 std::swap(LHS
, RHS
);
12390 std::swap(FoundLHS
, FoundRHS
);
12391 Pred
= ICmpInst::getSwappedPredicate(Pred
);
12393 if (LHS
!= FoundLHS
)
12396 auto *SUFoundRHS
= dyn_cast
<SCEVUnknown
>(FoundRHS
);
12400 Value
*Shiftee
, *ShiftValue
;
12402 using namespace PatternMatch
;
12403 if (match(SUFoundRHS
->getValue(),
12404 m_LShr(m_Value(Shiftee
), m_Value(ShiftValue
)))) {
12405 auto *ShifteeS
= getSCEV(Shiftee
);
12406 // Prove one of the following:
12407 // LHS <u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <u RHS
12408 // LHS <=u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <=u RHS
12409 // LHS <s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0
12411 // LHS <=s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0
12412 // ---> LHS <=s RHS
12413 if (Pred
== ICmpInst::ICMP_ULT
|| Pred
== ICmpInst::ICMP_ULE
)
12414 return isKnownPredicate(ICmpInst::ICMP_ULE
, ShifteeS
, RHS
);
12415 if (Pred
== ICmpInst::ICMP_SLT
|| Pred
== ICmpInst::ICMP_SLE
)
12416 if (isKnownNonNegative(ShifteeS
))
12417 return isKnownPredicate(ICmpInst::ICMP_SLE
, ShifteeS
, RHS
);
12423 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred
,
12424 const SCEV
*LHS
, const SCEV
*RHS
,
12425 const SCEV
*FoundLHS
,
12426 const SCEV
*FoundRHS
,
12427 const Instruction
*CtxI
) {
12428 if (isImpliedCondOperandsViaRanges(Pred
, LHS
, RHS
, Pred
, FoundLHS
, FoundRHS
))
12431 if (isImpliedCondOperandsViaNoOverflow(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
12434 if (isImpliedCondOperandsViaShift(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
12437 if (isImpliedCondOperandsViaAddRecStart(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
,
12441 return isImpliedCondOperandsHelper(Pred
, LHS
, RHS
,
12442 FoundLHS
, FoundRHS
);
12445 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
12446 template <typename MinMaxExprType
>
12447 static bool IsMinMaxConsistingOf(const SCEV
*MaybeMinMaxExpr
,
12448 const SCEV
*Candidate
) {
12449 const MinMaxExprType
*MinMaxExpr
= dyn_cast
<MinMaxExprType
>(MaybeMinMaxExpr
);
12453 return is_contained(MinMaxExpr
->operands(), Candidate
);
12456 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution
&SE
,
12457 ICmpInst::Predicate Pred
,
12458 const SCEV
*LHS
, const SCEV
*RHS
) {
12459 // If both sides are affine addrecs for the same loop, with equal
12460 // steps, and we know the recurrences don't wrap, then we only
12461 // need to check the predicate on the starting values.
12463 if (!ICmpInst::isRelational(Pred
))
12466 const SCEVAddRecExpr
*LAR
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
12469 const SCEVAddRecExpr
*RAR
= dyn_cast
<SCEVAddRecExpr
>(RHS
);
12472 if (LAR
->getLoop() != RAR
->getLoop())
12474 if (!LAR
->isAffine() || !RAR
->isAffine())
12477 if (LAR
->getStepRecurrence(SE
) != RAR
->getStepRecurrence(SE
))
12480 SCEV::NoWrapFlags NW
= ICmpInst::isSigned(Pred
) ?
12481 SCEV::FlagNSW
: SCEV::FlagNUW
;
12482 if (!LAR
->getNoWrapFlags(NW
) || !RAR
->getNoWrapFlags(NW
))
12485 return SE
.isKnownPredicate(Pred
, LAR
->getStart(), RAR
->getStart());
12488 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
12490 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution
&SE
,
12491 ICmpInst::Predicate Pred
,
12492 const SCEV
*LHS
, const SCEV
*RHS
) {
12497 case ICmpInst::ICMP_SGE
:
12498 std::swap(LHS
, RHS
);
12500 case ICmpInst::ICMP_SLE
:
12502 // min(A, ...) <= A
12503 IsMinMaxConsistingOf
<SCEVSMinExpr
>(LHS
, RHS
) ||
12504 // A <= max(A, ...)
12505 IsMinMaxConsistingOf
<SCEVSMaxExpr
>(RHS
, LHS
);
12507 case ICmpInst::ICMP_UGE
:
12508 std::swap(LHS
, RHS
);
12510 case ICmpInst::ICMP_ULE
:
12512 // min(A, ...) <= A
12513 // FIXME: what about umin_seq?
12514 IsMinMaxConsistingOf
<SCEVUMinExpr
>(LHS
, RHS
) ||
12515 // A <= max(A, ...)
12516 IsMinMaxConsistingOf
<SCEVUMaxExpr
>(RHS
, LHS
);
12519 llvm_unreachable("covered switch fell through?!");
12522 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred
,
12523 const SCEV
*LHS
, const SCEV
*RHS
,
12524 const SCEV
*FoundLHS
,
12525 const SCEV
*FoundRHS
,
12527 assert(getTypeSizeInBits(LHS
->getType()) ==
12528 getTypeSizeInBits(RHS
->getType()) &&
12529 "LHS and RHS have different sizes?");
12530 assert(getTypeSizeInBits(FoundLHS
->getType()) ==
12531 getTypeSizeInBits(FoundRHS
->getType()) &&
12532 "FoundLHS and FoundRHS have different sizes?");
12533 // We want to avoid hurting the compile time with analysis of too big trees.
12534 if (Depth
> MaxSCEVOperationsImplicationDepth
)
12537 // We only want to work with GT comparison so far.
12538 if (Pred
== ICmpInst::ICMP_ULT
|| Pred
== ICmpInst::ICMP_SLT
) {
12539 Pred
= CmpInst::getSwappedPredicate(Pred
);
12540 std::swap(LHS
, RHS
);
12541 std::swap(FoundLHS
, FoundRHS
);
12544 // For unsigned, try to reduce it to corresponding signed comparison.
12545 if (Pred
== ICmpInst::ICMP_UGT
)
12546 // We can replace unsigned predicate with its signed counterpart if all
12547 // involved values are non-negative.
12548 // TODO: We could have better support for unsigned.
12549 if (isKnownNonNegative(FoundLHS
) && isKnownNonNegative(FoundRHS
)) {
12550 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing
12551 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us
12552 // use this fact to prove that LHS and RHS are non-negative.
12553 const SCEV
*MinusOne
= getMinusOne(LHS
->getType());
12554 if (isImpliedCondOperands(ICmpInst::ICMP_SGT
, LHS
, MinusOne
, FoundLHS
,
12556 isImpliedCondOperands(ICmpInst::ICMP_SGT
, RHS
, MinusOne
, FoundLHS
,
12558 Pred
= ICmpInst::ICMP_SGT
;
12561 if (Pred
!= ICmpInst::ICMP_SGT
)
12564 auto GetOpFromSExt
= [&](const SCEV
*S
) {
12565 if (auto *Ext
= dyn_cast
<SCEVSignExtendExpr
>(S
))
12566 return Ext
->getOperand();
12567 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
12568 // the constant in some cases.
12572 // Acquire values from extensions.
12573 auto *OrigLHS
= LHS
;
12574 auto *OrigFoundLHS
= FoundLHS
;
12575 LHS
= GetOpFromSExt(LHS
);
12576 FoundLHS
= GetOpFromSExt(FoundLHS
);
12578 // Is the SGT predicate can be proved trivially or using the found context.
12579 auto IsSGTViaContext
= [&](const SCEV
*S1
, const SCEV
*S2
) {
12580 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT
, S1
, S2
) ||
12581 isImpliedViaOperations(ICmpInst::ICMP_SGT
, S1
, S2
, OrigFoundLHS
,
12582 FoundRHS
, Depth
+ 1);
12585 if (auto *LHSAddExpr
= dyn_cast
<SCEVAddExpr
>(LHS
)) {
12586 // We want to avoid creation of any new non-constant SCEV. Since we are
12587 // going to compare the operands to RHS, we should be certain that we don't
12588 // need any size extensions for this. So let's decline all cases when the
12589 // sizes of types of LHS and RHS do not match.
12590 // TODO: Maybe try to get RHS from sext to catch more cases?
12591 if (getTypeSizeInBits(LHS
->getType()) != getTypeSizeInBits(RHS
->getType()))
12594 // Should not overflow.
12595 if (!LHSAddExpr
->hasNoSignedWrap())
12598 auto *LL
= LHSAddExpr
->getOperand(0);
12599 auto *LR
= LHSAddExpr
->getOperand(1);
12600 auto *MinusOne
= getMinusOne(RHS
->getType());
12602 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
12603 auto IsSumGreaterThanRHS
= [&](const SCEV
*S1
, const SCEV
*S2
) {
12604 return IsSGTViaContext(S1
, MinusOne
) && IsSGTViaContext(S2
, RHS
);
12606 // Try to prove the following rule:
12607 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS).
12608 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS).
12609 if (IsSumGreaterThanRHS(LL
, LR
) || IsSumGreaterThanRHS(LR
, LL
))
12611 } else if (auto *LHSUnknownExpr
= dyn_cast
<SCEVUnknown
>(LHS
)) {
12613 // FIXME: Once we have SDiv implemented, we can get rid of this matching.
12615 using namespace llvm::PatternMatch
;
12617 if (match(LHSUnknownExpr
->getValue(), m_SDiv(m_Value(LL
), m_Value(LR
)))) {
12618 // Rules for division.
12619 // We are going to perform some comparisons with Denominator and its
12620 // derivative expressions. In general case, creating a SCEV for it may
12621 // lead to a complex analysis of the entire graph, and in particular it
12622 // can request trip count recalculation for the same loop. This would
12623 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid
12624 // this, we only want to create SCEVs that are constants in this section.
12625 // So we bail if Denominator is not a constant.
12626 if (!isa
<ConstantInt
>(LR
))
12629 auto *Denominator
= cast
<SCEVConstant
>(getSCEV(LR
));
12631 // We want to make sure that LHS = FoundLHS / Denominator. If it is so,
12632 // then a SCEV for the numerator already exists and matches with FoundLHS.
12633 auto *Numerator
= getExistingSCEV(LL
);
12634 if (!Numerator
|| Numerator
->getType() != FoundLHS
->getType())
12637 // Make sure that the numerator matches with FoundLHS and the denominator
12639 if (!HasSameValue(Numerator
, FoundLHS
) || !isKnownPositive(Denominator
))
12642 auto *DTy
= Denominator
->getType();
12643 auto *FRHSTy
= FoundRHS
->getType();
12644 if (DTy
->isPointerTy() != FRHSTy
->isPointerTy())
12645 // One of types is a pointer and another one is not. We cannot extend
12646 // them properly to a wider type, so let us just reject this case.
12647 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help
12648 // to avoid this check.
12652 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0.
12653 auto *WTy
= getWiderType(DTy
, FRHSTy
);
12654 auto *DenominatorExt
= getNoopOrSignExtend(Denominator
, WTy
);
12655 auto *FoundRHSExt
= getNoopOrSignExtend(FoundRHS
, WTy
);
12657 // Try to prove the following rule:
12658 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS).
12659 // For example, given that FoundLHS > 2. It means that FoundLHS is at
12660 // least 3. If we divide it by Denominator < 4, we will have at least 1.
12661 auto *DenomMinusTwo
= getMinusSCEV(DenominatorExt
, getConstant(WTy
, 2));
12662 if (isKnownNonPositive(RHS
) &&
12663 IsSGTViaContext(FoundRHSExt
, DenomMinusTwo
))
12666 // Try to prove the following rule:
12667 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS).
12668 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2.
12669 // If we divide it by Denominator > 2, then:
12670 // 1. If FoundLHS is negative, then the result is 0.
12671 // 2. If FoundLHS is non-negative, then the result is non-negative.
12672 // Anyways, the result is non-negative.
12673 auto *MinusOne
= getMinusOne(WTy
);
12674 auto *NegDenomMinusOne
= getMinusSCEV(MinusOne
, DenominatorExt
);
12675 if (isKnownNegative(RHS
) &&
12676 IsSGTViaContext(FoundRHSExt
, NegDenomMinusOne
))
12681 // If our expression contained SCEVUnknown Phis, and we split it down and now
12682 // need to prove something for them, try to prove the predicate for every
12683 // possible incoming values of those Phis.
12684 if (isImpliedViaMerge(Pred
, OrigLHS
, RHS
, OrigFoundLHS
, FoundRHS
, Depth
+ 1))
12690 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred
,
12691 const SCEV
*LHS
, const SCEV
*RHS
) {
12692 // zext x u<= sext x, sext x s<= zext x
12694 case ICmpInst::ICMP_SGE
:
12695 std::swap(LHS
, RHS
);
12697 case ICmpInst::ICMP_SLE
: {
12698 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt.
12699 const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(LHS
);
12700 const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(RHS
);
12701 if (SExt
&& ZExt
&& SExt
->getOperand() == ZExt
->getOperand())
12705 case ICmpInst::ICMP_UGE
:
12706 std::swap(LHS
, RHS
);
12708 case ICmpInst::ICMP_ULE
: {
12709 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt.
12710 const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(LHS
);
12711 const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(RHS
);
12712 if (SExt
&& ZExt
&& SExt
->getOperand() == ZExt
->getOperand())
12723 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred
,
12724 const SCEV
*LHS
, const SCEV
*RHS
) {
12725 return isKnownPredicateExtendIdiom(Pred
, LHS
, RHS
) ||
12726 isKnownPredicateViaConstantRanges(Pred
, LHS
, RHS
) ||
12727 IsKnownPredicateViaMinOrMax(*this, Pred
, LHS
, RHS
) ||
12728 IsKnownPredicateViaAddRecStart(*this, Pred
, LHS
, RHS
) ||
12729 isKnownPredicateViaNoOverflow(Pred
, LHS
, RHS
);
12733 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred
,
12734 const SCEV
*LHS
, const SCEV
*RHS
,
12735 const SCEV
*FoundLHS
,
12736 const SCEV
*FoundRHS
) {
12738 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
12739 case ICmpInst::ICMP_EQ
:
12740 case ICmpInst::ICMP_NE
:
12741 if (HasSameValue(LHS
, FoundLHS
) && HasSameValue(RHS
, FoundRHS
))
12744 case ICmpInst::ICMP_SLT
:
12745 case ICmpInst::ICMP_SLE
:
12746 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE
, LHS
, FoundLHS
) &&
12747 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE
, RHS
, FoundRHS
))
12750 case ICmpInst::ICMP_SGT
:
12751 case ICmpInst::ICMP_SGE
:
12752 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE
, LHS
, FoundLHS
) &&
12753 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE
, RHS
, FoundRHS
))
12756 case ICmpInst::ICMP_ULT
:
12757 case ICmpInst::ICMP_ULE
:
12758 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE
, LHS
, FoundLHS
) &&
12759 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE
, RHS
, FoundRHS
))
12762 case ICmpInst::ICMP_UGT
:
12763 case ICmpInst::ICMP_UGE
:
12764 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE
, LHS
, FoundLHS
) &&
12765 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE
, RHS
, FoundRHS
))
12770 // Maybe it can be proved via operations?
12771 if (isImpliedViaOperations(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
12777 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred
,
12780 ICmpInst::Predicate FoundPred
,
12781 const SCEV
*FoundLHS
,
12782 const SCEV
*FoundRHS
) {
12783 if (!isa
<SCEVConstant
>(RHS
) || !isa
<SCEVConstant
>(FoundRHS
))
12784 // The restriction on `FoundRHS` be lifted easily -- it exists only to
12785 // reduce the compile time impact of this optimization.
12788 std::optional
<APInt
> Addend
= computeConstantDifference(LHS
, FoundLHS
);
12792 const APInt
&ConstFoundRHS
= cast
<SCEVConstant
>(FoundRHS
)->getAPInt();
12794 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
12795 // antecedent "`FoundLHS` `FoundPred` `FoundRHS`".
12796 ConstantRange FoundLHSRange
=
12797 ConstantRange::makeExactICmpRegion(FoundPred
, ConstFoundRHS
);
12799 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`:
12800 ConstantRange LHSRange
= FoundLHSRange
.add(ConstantRange(*Addend
));
12802 // We can also compute the range of values for `LHS` that satisfy the
12803 // consequent, "`LHS` `Pred` `RHS`":
12804 const APInt
&ConstRHS
= cast
<SCEVConstant
>(RHS
)->getAPInt();
12805 // The antecedent implies the consequent if every value of `LHS` that
12806 // satisfies the antecedent also satisfies the consequent.
12807 return LHSRange
.icmp(Pred
, ConstRHS
);
12810 bool ScalarEvolution::canIVOverflowOnLT(const SCEV
*RHS
, const SCEV
*Stride
,
12812 assert(isKnownPositive(Stride
) && "Positive stride expected!");
12814 unsigned BitWidth
= getTypeSizeInBits(RHS
->getType());
12815 const SCEV
*One
= getOne(Stride
->getType());
12818 APInt MaxRHS
= getSignedRangeMax(RHS
);
12819 APInt MaxValue
= APInt::getSignedMaxValue(BitWidth
);
12820 APInt MaxStrideMinusOne
= getSignedRangeMax(getMinusSCEV(Stride
, One
));
12822 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
12823 return (std::move(MaxValue
) - MaxStrideMinusOne
).slt(MaxRHS
);
12826 APInt MaxRHS
= getUnsignedRangeMax(RHS
);
12827 APInt MaxValue
= APInt::getMaxValue(BitWidth
);
12828 APInt MaxStrideMinusOne
= getUnsignedRangeMax(getMinusSCEV(Stride
, One
));
12830 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
12831 return (std::move(MaxValue
) - MaxStrideMinusOne
).ult(MaxRHS
);
12834 bool ScalarEvolution::canIVOverflowOnGT(const SCEV
*RHS
, const SCEV
*Stride
,
12837 unsigned BitWidth
= getTypeSizeInBits(RHS
->getType());
12838 const SCEV
*One
= getOne(Stride
->getType());
12841 APInt MinRHS
= getSignedRangeMin(RHS
);
12842 APInt MinValue
= APInt::getSignedMinValue(BitWidth
);
12843 APInt MaxStrideMinusOne
= getSignedRangeMax(getMinusSCEV(Stride
, One
));
12845 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
12846 return (std::move(MinValue
) + MaxStrideMinusOne
).sgt(MinRHS
);
12849 APInt MinRHS
= getUnsignedRangeMin(RHS
);
12850 APInt MinValue
= APInt::getMinValue(BitWidth
);
12851 APInt MaxStrideMinusOne
= getUnsignedRangeMax(getMinusSCEV(Stride
, One
));
12853 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
12854 return (std::move(MinValue
) + MaxStrideMinusOne
).ugt(MinRHS
);
12857 const SCEV
*ScalarEvolution::getUDivCeilSCEV(const SCEV
*N
, const SCEV
*D
) {
12858 // umin(N, 1) + floor((N - umin(N, 1)) / D)
12859 // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin
12860 // expression fixes the case of N=0.
12861 const SCEV
*MinNOne
= getUMinExpr(N
, getOne(N
->getType()));
12862 const SCEV
*NMinusOne
= getMinusSCEV(N
, MinNOne
);
12863 return getAddExpr(MinNOne
, getUDivExpr(NMinusOne
, D
));
12866 const SCEV
*ScalarEvolution::computeMaxBECountForLT(const SCEV
*Start
,
12867 const SCEV
*Stride
,
12871 // The logic in this function assumes we can represent a positive stride.
12872 // If we can't, the backedge-taken count must be zero.
12873 if (IsSigned
&& BitWidth
== 1)
12874 return getZero(Stride
->getType());
12876 // This code below only been closely audited for negative strides in the
12877 // unsigned comparison case, it may be correct for signed comparison, but
12878 // that needs to be established.
12879 if (IsSigned
&& isKnownNegative(Stride
))
12880 return getCouldNotCompute();
12882 // Calculate the maximum backedge count based on the range of values
12883 // permitted by Start, End, and Stride.
12885 IsSigned
? getSignedRangeMin(Start
) : getUnsignedRangeMin(Start
);
12888 IsSigned
? getSignedRangeMin(Stride
) : getUnsignedRangeMin(Stride
);
12890 // We assume either the stride is positive, or the backedge-taken count
12891 // is zero. So force StrideForMaxBECount to be at least one.
12892 APInt
One(BitWidth
, 1);
12893 APInt StrideForMaxBECount
= IsSigned
? APIntOps::smax(One
, MinStride
)
12894 : APIntOps::umax(One
, MinStride
);
12896 APInt MaxValue
= IsSigned
? APInt::getSignedMaxValue(BitWidth
)
12897 : APInt::getMaxValue(BitWidth
);
12898 APInt Limit
= MaxValue
- (StrideForMaxBECount
- 1);
12900 // Although End can be a MAX expression we estimate MaxEnd considering only
12901 // the case End = RHS of the loop termination condition. This is safe because
12902 // in the other case (End - Start) is zero, leading to a zero maximum backedge
12904 APInt MaxEnd
= IsSigned
? APIntOps::smin(getSignedRangeMax(End
), Limit
)
12905 : APIntOps::umin(getUnsignedRangeMax(End
), Limit
);
12907 // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride)
12908 MaxEnd
= IsSigned
? APIntOps::smax(MaxEnd
, MinStart
)
12909 : APIntOps::umax(MaxEnd
, MinStart
);
12911 return getUDivCeilSCEV(getConstant(MaxEnd
- MinStart
) /* Delta */,
12912 getConstant(StrideForMaxBECount
) /* Step */);
12915 ScalarEvolution::ExitLimit
12916 ScalarEvolution::howManyLessThans(const SCEV
*LHS
, const SCEV
*RHS
,
12917 const Loop
*L
, bool IsSigned
,
12918 bool ControlsOnlyExit
, bool AllowPredicates
) {
12919 SmallVector
<const SCEVPredicate
*> Predicates
;
12921 const SCEVAddRecExpr
*IV
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
12922 bool PredicatedIV
= false;
12924 if (auto *ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(LHS
)) {
12925 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(ZExt
->getOperand());
12926 if (AR
&& AR
->getLoop() == L
&& AR
->isAffine()) {
12927 auto canProveNUW
= [&]() {
12928 // We can use the comparison to infer no-wrap flags only if it fully
12929 // controls the loop exit.
12930 if (!ControlsOnlyExit
)
12933 if (!isLoopInvariant(RHS
, L
))
12936 if (!isKnownNonZero(AR
->getStepRecurrence(*this)))
12937 // We need the sequence defined by AR to strictly increase in the
12938 // unsigned integer domain for the logic below to hold.
12941 const unsigned InnerBitWidth
= getTypeSizeInBits(AR
->getType());
12942 const unsigned OuterBitWidth
= getTypeSizeInBits(RHS
->getType());
12943 // If RHS <=u Limit, then there must exist a value V in the sequence
12944 // defined by AR (e.g. {Start,+,Step}) such that V >u RHS, and
12945 // V <=u UINT_MAX. Thus, we must exit the loop before unsigned
12946 // overflow occurs. This limit also implies that a signed comparison
12947 // (in the wide bitwidth) is equivalent to an unsigned comparison as
12948 // the high bits on both sides must be zero.
12949 APInt StrideMax
= getUnsignedRangeMax(AR
->getStepRecurrence(*this));
12950 APInt Limit
= APInt::getMaxValue(InnerBitWidth
) - (StrideMax
- 1);
12951 Limit
= Limit
.zext(OuterBitWidth
);
12952 return getUnsignedRangeMax(applyLoopGuards(RHS
, L
)).ule(Limit
);
12954 auto Flags
= AR
->getNoWrapFlags();
12955 if (!hasFlags(Flags
, SCEV::FlagNUW
) && canProveNUW())
12956 Flags
= setFlags(Flags
, SCEV::FlagNUW
);
12958 setNoWrapFlags(const_cast<SCEVAddRecExpr
*>(AR
), Flags
);
12959 if (AR
->hasNoUnsignedWrap()) {
12960 // Emulate what getZeroExtendExpr would have done during construction
12961 // if we'd been able to infer the fact just above at that time.
12962 const SCEV
*Step
= AR
->getStepRecurrence(*this);
12963 Type
*Ty
= ZExt
->getType();
12964 auto *S
= getAddRecExpr(
12965 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this, 0),
12966 getZeroExtendExpr(Step
, Ty
, 0), L
, AR
->getNoWrapFlags());
12967 IV
= dyn_cast
<SCEVAddRecExpr
>(S
);
12974 if (!IV
&& AllowPredicates
) {
12975 // Try to make this an AddRec using runtime tests, in the first X
12976 // iterations of this loop, where X is the SCEV expression found by the
12977 // algorithm below.
12978 IV
= convertSCEVToAddRecWithPredicates(LHS
, L
, Predicates
);
12979 PredicatedIV
= true;
12982 // Avoid weird loops
12983 if (!IV
|| IV
->getLoop() != L
|| !IV
->isAffine())
12984 return getCouldNotCompute();
12986 // A precondition of this method is that the condition being analyzed
12987 // reaches an exiting branch which dominates the latch. Given that, we can
12988 // assume that an increment which violates the nowrap specification and
12989 // produces poison must cause undefined behavior when the resulting poison
12990 // value is branched upon and thus we can conclude that the backedge is
12991 // taken no more often than would be required to produce that poison value.
12992 // Note that a well defined loop can exit on the iteration which violates
12993 // the nowrap specification if there is another exit (either explicit or
12994 // implicit/exceptional) which causes the loop to execute before the
12995 // exiting instruction we're analyzing would trigger UB.
12996 auto WrapType
= IsSigned
? SCEV::FlagNSW
: SCEV::FlagNUW
;
12997 bool NoWrap
= ControlsOnlyExit
&& IV
->getNoWrapFlags(WrapType
);
12998 ICmpInst::Predicate Cond
= IsSigned
? ICmpInst::ICMP_SLT
: ICmpInst::ICMP_ULT
;
13000 const SCEV
*Stride
= IV
->getStepRecurrence(*this);
13002 bool PositiveStride
= isKnownPositive(Stride
);
13004 // Avoid negative or zero stride values.
13005 if (!PositiveStride
) {
13006 // We can compute the correct backedge taken count for loops with unknown
13007 // strides if we can prove that the loop is not an infinite loop with side
13008 // effects. Here's the loop structure we are trying to handle -
13014 // } while (i < end);
13016 // The backedge taken count for such loops is evaluated as -
13017 // (max(end, start + stride) - start - 1) /u stride
13019 // The additional preconditions that we need to check to prove correctness
13020 // of the above formula is as follows -
13022 // a) IV is either nuw or nsw depending upon signedness (indicated by the
13024 // b) the loop is guaranteed to be finite (e.g. is mustprogress and has
13025 // no side effects within the loop)
13026 // c) loop has a single static exit (with no abnormal exits)
13028 // Precondition a) implies that if the stride is negative, this is a single
13029 // trip loop. The backedge taken count formula reduces to zero in this case.
13031 // Precondition b) and c) combine to imply that if rhs is invariant in L,
13032 // then a zero stride means the backedge can't be taken without executing
13033 // undefined behavior.
13035 // The positive stride case is the same as isKnownPositive(Stride) returning
13036 // true (original behavior of the function).
13038 if (PredicatedIV
|| !NoWrap
|| !loopIsFiniteByAssumption(L
) ||
13039 !loopHasNoAbnormalExits(L
))
13040 return getCouldNotCompute();
13042 if (!isKnownNonZero(Stride
)) {
13043 // If we have a step of zero, and RHS isn't invariant in L, we don't know
13044 // if it might eventually be greater than start and if so, on which
13045 // iteration. We can't even produce a useful upper bound.
13046 if (!isLoopInvariant(RHS
, L
))
13047 return getCouldNotCompute();
13049 // We allow a potentially zero stride, but we need to divide by stride
13050 // below. Since the loop can't be infinite and this check must control
13051 // the sole exit, we can infer the exit must be taken on the first
13052 // iteration (e.g. backedge count = 0) if the stride is zero. Given that,
13053 // we know the numerator in the divides below must be zero, so we can
13054 // pick an arbitrary non-zero value for the denominator (e.g. stride)
13055 // and produce the right result.
13056 // FIXME: Handle the case where Stride is poison?
13057 auto wouldZeroStrideBeUB
= [&]() {
13058 // Proof by contradiction. Suppose the stride were zero. If we can
13059 // prove that the backedge *is* taken on the first iteration, then since
13060 // we know this condition controls the sole exit, we must have an
13061 // infinite loop. We can't have a (well defined) infinite loop per
13062 // check just above.
13063 // Note: The (Start - Stride) term is used to get the start' term from
13064 // (start' + stride,+,stride). Remember that we only care about the
13065 // result of this expression when stride == 0 at runtime.
13066 auto *StartIfZero
= getMinusSCEV(IV
->getStart(), Stride
);
13067 return isLoopEntryGuardedByCond(L
, Cond
, StartIfZero
, RHS
);
13069 if (!wouldZeroStrideBeUB()) {
13070 Stride
= getUMaxExpr(Stride
, getOne(Stride
->getType()));
13073 } else if (!NoWrap
) {
13074 // Avoid proven overflow cases: this will ensure that the backedge taken
13075 // count will not generate any unsigned overflow.
13076 if (canIVOverflowOnLT(RHS
, Stride
, IsSigned
))
13077 return getCouldNotCompute();
13080 // On all paths just preceeding, we established the following invariant:
13081 // IV can be assumed not to overflow up to and including the exiting
13082 // iteration. We proved this in one of two ways:
13083 // 1) We can show overflow doesn't occur before the exiting iteration
13084 // 1a) canIVOverflowOnLT, and b) step of one
13085 // 2) We can show that if overflow occurs, the loop must execute UB
13086 // before any possible exit.
13087 // Note that we have not yet proved RHS invariant (in general).
13089 const SCEV
*Start
= IV
->getStart();
13091 // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond.
13092 // If we convert to integers, isLoopEntryGuardedByCond will miss some cases.
13093 // Use integer-typed versions for actual computation; we can't subtract
13094 // pointers in general.
13095 const SCEV
*OrigStart
= Start
;
13096 const SCEV
*OrigRHS
= RHS
;
13097 if (Start
->getType()->isPointerTy()) {
13098 Start
= getLosslessPtrToIntExpr(Start
);
13099 if (isa
<SCEVCouldNotCompute
>(Start
))
13102 if (RHS
->getType()->isPointerTy()) {
13103 RHS
= getLosslessPtrToIntExpr(RHS
);
13104 if (isa
<SCEVCouldNotCompute
>(RHS
))
13108 const SCEV
*End
= nullptr, *BECount
= nullptr,
13109 *BECountIfBackedgeTaken
= nullptr;
13110 if (!isLoopInvariant(RHS
, L
)) {
13111 const auto *RHSAddRec
= dyn_cast
<SCEVAddRecExpr
>(RHS
);
13112 if (PositiveStride
&& RHSAddRec
!= nullptr && RHSAddRec
->getLoop() == L
&&
13113 RHSAddRec
->getNoWrapFlags()) {
13114 // The structure of loop we are trying to calculate backedge count of:
13116 // left = left_start
13117 // right = right_start
13119 // while(left < right){
13120 // ... do something here ...
13121 // left += s1; // stride of left is s1 (s1 > 0)
13122 // right += s2; // stride of right is s2 (s2 < 0)
13126 const SCEV
*RHSStart
= RHSAddRec
->getStart();
13127 const SCEV
*RHSStride
= RHSAddRec
->getStepRecurrence(*this);
13129 // If Stride - RHSStride is positive and does not overflow, we can write
13130 // backedge count as ->
13131 // ceil((End - Start) /u (Stride - RHSStride))
13132 // Where, End = max(RHSStart, Start)
13134 // Check if RHSStride < 0 and Stride - RHSStride will not overflow.
13135 if (isKnownNegative(RHSStride
) &&
13136 willNotOverflow(Instruction::Sub
, /*Signed=*/true, Stride
,
13139 const SCEV
*Denominator
= getMinusSCEV(Stride
, RHSStride
);
13140 if (isKnownPositive(Denominator
)) {
13141 End
= IsSigned
? getSMaxExpr(RHSStart
, Start
)
13142 : getUMaxExpr(RHSStart
, Start
);
13144 // We can do this because End >= Start, as End = max(RHSStart, Start)
13145 const SCEV
*Delta
= getMinusSCEV(End
, Start
);
13147 BECount
= getUDivCeilSCEV(Delta
, Denominator
);
13148 BECountIfBackedgeTaken
=
13149 getUDivCeilSCEV(getMinusSCEV(RHSStart
, Start
), Denominator
);
13153 if (BECount
== nullptr) {
13154 // If we cannot calculate ExactBECount, we can calculate the MaxBECount,
13155 // given the start, stride and max value for the end bound of the
13156 // loop (RHS), and the fact that IV does not overflow (which is
13158 const SCEV
*MaxBECount
= computeMaxBECountForLT(
13159 Start
, Stride
, RHS
, getTypeSizeInBits(LHS
->getType()), IsSigned
);
13160 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount
,
13161 MaxBECount
, false /*MaxOrZero*/, Predicates
);
13164 // We use the expression (max(End,Start)-Start)/Stride to describe the
13165 // backedge count, as if the backedge is taken at least once
13166 // max(End,Start) is End and so the result is as above, and if not
13167 // max(End,Start) is Start so we get a backedge count of zero.
13168 auto *OrigStartMinusStride
= getMinusSCEV(OrigStart
, Stride
);
13169 assert(isAvailableAtLoopEntry(OrigStartMinusStride
, L
) && "Must be!");
13170 assert(isAvailableAtLoopEntry(OrigStart
, L
) && "Must be!");
13171 assert(isAvailableAtLoopEntry(OrigRHS
, L
) && "Must be!");
13172 // Can we prove (max(RHS,Start) > Start - Stride?
13173 if (isLoopEntryGuardedByCond(L
, Cond
, OrigStartMinusStride
, OrigStart
) &&
13174 isLoopEntryGuardedByCond(L
, Cond
, OrigStartMinusStride
, OrigRHS
)) {
13175 // In this case, we can use a refined formula for computing backedge
13176 // taken count. The general formula remains:
13177 // "End-Start /uceiling Stride" where "End = max(RHS,Start)"
13178 // We want to use the alternate formula:
13179 // "((End - 1) - (Start - Stride)) /u Stride"
13180 // Let's do a quick case analysis to show these are equivalent under
13181 // our precondition that max(RHS,Start) > Start - Stride.
13182 // * For RHS <= Start, the backedge-taken count must be zero.
13183 // "((End - 1) - (Start - Stride)) /u Stride" reduces to
13184 // "((Start - 1) - (Start - Stride)) /u Stride" which simplies to
13185 // "Stride - 1 /u Stride" which is indeed zero for all non-zero values
13186 // of Stride. For 0 stride, we've use umin(1,Stride) above,
13187 // reducing this to the stride of 1 case.
13188 // * For RHS >= Start, the backedge count must be "RHS-Start /uceil
13190 // "((End - 1) - (Start - Stride)) /u Stride" reduces to
13191 // "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to
13192 // "((RHS - (Start - Stride) - 1) /u Stride".
13193 // Our preconditions trivially imply no overflow in that form.
13194 const SCEV
*MinusOne
= getMinusOne(Stride
->getType());
13195 const SCEV
*Numerator
=
13196 getMinusSCEV(getAddExpr(RHS
, MinusOne
), getMinusSCEV(Start
, Stride
));
13197 BECount
= getUDivExpr(Numerator
, Stride
);
13201 auto canProveRHSGreaterThanEqualStart
= [&]() {
13202 auto CondGE
= IsSigned
? ICmpInst::ICMP_SGE
: ICmpInst::ICMP_UGE
;
13203 const SCEV
*GuardedRHS
= applyLoopGuards(OrigRHS
, L
);
13204 const SCEV
*GuardedStart
= applyLoopGuards(OrigStart
, L
);
13206 if (isLoopEntryGuardedByCond(L
, CondGE
, OrigRHS
, OrigStart
) ||
13207 isKnownPredicate(CondGE
, GuardedRHS
, GuardedStart
))
13210 // (RHS > Start - 1) implies RHS >= Start.
13211 // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if
13212 // "Start - 1" doesn't overflow.
13213 // * For signed comparison, if Start - 1 does overflow, it's equal
13214 // to INT_MAX, and "RHS >s INT_MAX" is trivially false.
13215 // * For unsigned comparison, if Start - 1 does overflow, it's equal
13216 // to UINT_MAX, and "RHS >u UINT_MAX" is trivially false.
13218 // FIXME: Should isLoopEntryGuardedByCond do this for us?
13219 auto CondGT
= IsSigned
? ICmpInst::ICMP_SGT
: ICmpInst::ICMP_UGT
;
13220 auto *StartMinusOne
=
13221 getAddExpr(OrigStart
, getMinusOne(OrigStart
->getType()));
13222 return isLoopEntryGuardedByCond(L
, CondGT
, OrigRHS
, StartMinusOne
);
13225 // If we know that RHS >= Start in the context of loop, then we know
13226 // that max(RHS, Start) = RHS at this point.
13227 if (canProveRHSGreaterThanEqualStart()) {
13230 // If RHS < Start, the backedge will be taken zero times. So in
13231 // general, we can write the backedge-taken count as:
13233 // RHS >= Start ? ceil(RHS - Start) / Stride : 0
13235 // We convert it to the following to make it more convenient for SCEV:
13237 // ceil(max(RHS, Start) - Start) / Stride
13238 End
= IsSigned
? getSMaxExpr(RHS
, Start
) : getUMaxExpr(RHS
, Start
);
13240 // See what would happen if we assume the backedge is taken. This is
13241 // used to compute MaxBECount.
13242 BECountIfBackedgeTaken
=
13243 getUDivCeilSCEV(getMinusSCEV(RHS
, Start
), Stride
);
13246 // At this point, we know:
13248 // 1. If IsSigned, Start <=s End; otherwise, Start <=u End
13249 // 2. The index variable doesn't overflow.
13251 // Therefore, we know N exists such that
13252 // (Start + Stride * N) >= End, and computing "(Start + Stride * N)"
13253 // doesn't overflow.
13255 // Using this information, try to prove whether the addition in
13256 // "(Start - End) + (Stride - 1)" has unsigned overflow.
13257 const SCEV
*One
= getOne(Stride
->getType());
13258 bool MayAddOverflow
= [&] {
13259 if (isKnownToBeAPowerOfTwo(Stride
)) {
13260 // Suppose Stride is a power of two, and Start/End are unsigned
13261 // integers. Let UMAX be the largest representable unsigned
13264 // By the preconditions of this function, we know
13265 // "(Start + Stride * N) >= End", and this doesn't overflow.
13268 // End <= (Start + Stride * N) <= UMAX
13270 // Subtracting Start from all the terms:
13272 // End - Start <= Stride * N <= UMAX - Start
13274 // Since Start is unsigned, UMAX - Start <= UMAX. Therefore:
13276 // End - Start <= Stride * N <= UMAX
13278 // Stride * N is a multiple of Stride. Therefore,
13280 // End - Start <= Stride * N <= UMAX - (UMAX mod Stride)
13282 // Since Stride is a power of two, UMAX + 1 is divisible by
13283 // Stride. Therefore, UMAX mod Stride == Stride - 1. So we can
13286 // End - Start <= Stride * N <= UMAX - Stride - 1
13288 // Dropping the middle term:
13290 // End - Start <= UMAX - Stride - 1
13292 // Adding Stride - 1 to both sides:
13294 // (End - Start) + (Stride - 1) <= UMAX
13296 // In other words, the addition doesn't have unsigned overflow.
13298 // A similar proof works if we treat Start/End as signed values.
13299 // Just rewrite steps before "End - Start <= Stride * N <= UMAX"
13300 // to use signed max instead of unsigned max. Note that we're
13301 // trying to prove a lack of unsigned overflow in either case.
13304 if (Start
== Stride
|| Start
== getMinusSCEV(Stride
, One
)) {
13305 // If Start is equal to Stride, (End - Start) + (Stride - 1) == End
13306 // - 1. If !IsSigned, 0 <u Stride == Start <=u End; so 0 <u End - 1
13307 // <u End. If IsSigned, 0 <s Stride == Start <=s End; so 0 <s End -
13310 // If Start is equal to Stride - 1, (End - Start) + Stride - 1 ==
13317 const SCEV
*Delta
= getMinusSCEV(End
, Start
);
13318 if (!MayAddOverflow
) {
13319 // floor((D + (S - 1)) / S)
13320 // We prefer this formulation if it's legal because it's fewer
13323 getUDivExpr(getAddExpr(Delta
, getMinusSCEV(Stride
, One
)), Stride
);
13325 BECount
= getUDivCeilSCEV(Delta
, Stride
);
13330 const SCEV
*ConstantMaxBECount
;
13331 bool MaxOrZero
= false;
13332 if (isa
<SCEVConstant
>(BECount
)) {
13333 ConstantMaxBECount
= BECount
;
13334 } else if (BECountIfBackedgeTaken
&&
13335 isa
<SCEVConstant
>(BECountIfBackedgeTaken
)) {
13336 // If we know exactly how many times the backedge will be taken if it's
13337 // taken at least once, then the backedge count will either be that or
13339 ConstantMaxBECount
= BECountIfBackedgeTaken
;
13342 ConstantMaxBECount
= computeMaxBECountForLT(
13343 Start
, Stride
, RHS
, getTypeSizeInBits(LHS
->getType()), IsSigned
);
13346 if (isa
<SCEVCouldNotCompute
>(ConstantMaxBECount
) &&
13347 !isa
<SCEVCouldNotCompute
>(BECount
))
13348 ConstantMaxBECount
= getConstant(getUnsignedRangeMax(BECount
));
13350 const SCEV
*SymbolicMaxBECount
=
13351 isa
<SCEVCouldNotCompute
>(BECount
) ? ConstantMaxBECount
: BECount
;
13352 return ExitLimit(BECount
, ConstantMaxBECount
, SymbolicMaxBECount
, MaxOrZero
,
13356 ScalarEvolution::ExitLimit
ScalarEvolution::howManyGreaterThans(
13357 const SCEV
*LHS
, const SCEV
*RHS
, const Loop
*L
, bool IsSigned
,
13358 bool ControlsOnlyExit
, bool AllowPredicates
) {
13359 SmallVector
<const SCEVPredicate
*> Predicates
;
13360 // We handle only IV > Invariant
13361 if (!isLoopInvariant(RHS
, L
))
13362 return getCouldNotCompute();
13364 const SCEVAddRecExpr
*IV
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
13365 if (!IV
&& AllowPredicates
)
13366 // Try to make this an AddRec using runtime tests, in the first X
13367 // iterations of this loop, where X is the SCEV expression found by the
13368 // algorithm below.
13369 IV
= convertSCEVToAddRecWithPredicates(LHS
, L
, Predicates
);
13371 // Avoid weird loops
13372 if (!IV
|| IV
->getLoop() != L
|| !IV
->isAffine())
13373 return getCouldNotCompute();
13375 auto WrapType
= IsSigned
? SCEV::FlagNSW
: SCEV::FlagNUW
;
13376 bool NoWrap
= ControlsOnlyExit
&& IV
->getNoWrapFlags(WrapType
);
13377 ICmpInst::Predicate Cond
= IsSigned
? ICmpInst::ICMP_SGT
: ICmpInst::ICMP_UGT
;
13379 const SCEV
*Stride
= getNegativeSCEV(IV
->getStepRecurrence(*this));
13381 // Avoid negative or zero stride values
13382 if (!isKnownPositive(Stride
))
13383 return getCouldNotCompute();
13385 // Avoid proven overflow cases: this will ensure that the backedge taken count
13386 // will not generate any unsigned overflow. Relaxed no-overflow conditions
13387 // exploit NoWrapFlags, allowing to optimize in presence of undefined
13388 // behaviors like the case of C language.
13389 if (!Stride
->isOne() && !NoWrap
)
13390 if (canIVOverflowOnGT(RHS
, Stride
, IsSigned
))
13391 return getCouldNotCompute();
13393 const SCEV
*Start
= IV
->getStart();
13394 const SCEV
*End
= RHS
;
13395 if (!isLoopEntryGuardedByCond(L
, Cond
, getAddExpr(Start
, Stride
), RHS
)) {
13396 // If we know that Start >= RHS in the context of loop, then we know that
13397 // min(RHS, Start) = RHS at this point.
13398 if (isLoopEntryGuardedByCond(
13399 L
, IsSigned
? ICmpInst::ICMP_SGE
: ICmpInst::ICMP_UGE
, Start
, RHS
))
13402 End
= IsSigned
? getSMinExpr(RHS
, Start
) : getUMinExpr(RHS
, Start
);
13405 if (Start
->getType()->isPointerTy()) {
13406 Start
= getLosslessPtrToIntExpr(Start
);
13407 if (isa
<SCEVCouldNotCompute
>(Start
))
13410 if (End
->getType()->isPointerTy()) {
13411 End
= getLosslessPtrToIntExpr(End
);
13412 if (isa
<SCEVCouldNotCompute
>(End
))
13416 // Compute ((Start - End) + (Stride - 1)) / Stride.
13417 // FIXME: This can overflow. Holding off on fixing this for now;
13418 // howManyGreaterThans will hopefully be gone soon.
13419 const SCEV
*One
= getOne(Stride
->getType());
13420 const SCEV
*BECount
= getUDivExpr(
13421 getAddExpr(getMinusSCEV(Start
, End
), getMinusSCEV(Stride
, One
)), Stride
);
13423 APInt MaxStart
= IsSigned
? getSignedRangeMax(Start
)
13424 : getUnsignedRangeMax(Start
);
13426 APInt MinStride
= IsSigned
? getSignedRangeMin(Stride
)
13427 : getUnsignedRangeMin(Stride
);
13429 unsigned BitWidth
= getTypeSizeInBits(LHS
->getType());
13430 APInt Limit
= IsSigned
? APInt::getSignedMinValue(BitWidth
) + (MinStride
- 1)
13431 : APInt::getMinValue(BitWidth
) + (MinStride
- 1);
13433 // Although End can be a MIN expression we estimate MinEnd considering only
13434 // the case End = RHS. This is safe because in the other case (Start - End)
13435 // is zero, leading to a zero maximum backedge taken count.
13437 IsSigned
? APIntOps::smax(getSignedRangeMin(RHS
), Limit
)
13438 : APIntOps::umax(getUnsignedRangeMin(RHS
), Limit
);
13440 const SCEV
*ConstantMaxBECount
=
13441 isa
<SCEVConstant
>(BECount
)
13443 : getUDivCeilSCEV(getConstant(MaxStart
- MinEnd
),
13444 getConstant(MinStride
));
13446 if (isa
<SCEVCouldNotCompute
>(ConstantMaxBECount
))
13447 ConstantMaxBECount
= BECount
;
13448 const SCEV
*SymbolicMaxBECount
=
13449 isa
<SCEVCouldNotCompute
>(BECount
) ? ConstantMaxBECount
: BECount
;
13451 return ExitLimit(BECount
, ConstantMaxBECount
, SymbolicMaxBECount
, false,
13455 const SCEV
*SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange
&Range
,
13456 ScalarEvolution
&SE
) const {
13457 if (Range
.isFullSet()) // Infinite loop.
13458 return SE
.getCouldNotCompute();
13460 // If the start is a non-zero constant, shift the range to simplify things.
13461 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(getStart()))
13462 if (!SC
->getValue()->isZero()) {
13463 SmallVector
<const SCEV
*, 4> Operands(operands());
13464 Operands
[0] = SE
.getZero(SC
->getType());
13465 const SCEV
*Shifted
= SE
.getAddRecExpr(Operands
, getLoop(),
13466 getNoWrapFlags(FlagNW
));
13467 if (const auto *ShiftedAddRec
= dyn_cast
<SCEVAddRecExpr
>(Shifted
))
13468 return ShiftedAddRec
->getNumIterationsInRange(
13469 Range
.subtract(SC
->getAPInt()), SE
);
13470 // This is strange and shouldn't happen.
13471 return SE
.getCouldNotCompute();
13474 // The only time we can solve this is when we have all constant indices.
13475 // Otherwise, we cannot determine the overflow conditions.
13476 if (any_of(operands(), [](const SCEV
*Op
) { return !isa
<SCEVConstant
>(Op
); }))
13477 return SE
.getCouldNotCompute();
13479 // Okay at this point we know that all elements of the chrec are constants and
13480 // that the start element is zero.
13482 // First check to see if the range contains zero. If not, the first
13483 // iteration exits.
13484 unsigned BitWidth
= SE
.getTypeSizeInBits(getType());
13485 if (!Range
.contains(APInt(BitWidth
, 0)))
13486 return SE
.getZero(getType());
13489 // If this is an affine expression then we have this situation:
13490 // Solve {0,+,A} in Range === Ax in Range
13492 // We know that zero is in the range. If A is positive then we know that
13493 // the upper value of the range must be the first possible exit value.
13494 // If A is negative then the lower of the range is the last possible loop
13495 // value. Also note that we already checked for a full range.
13496 APInt A
= cast
<SCEVConstant
>(getOperand(1))->getAPInt();
13497 APInt End
= A
.sge(1) ? (Range
.getUpper() - 1) : Range
.getLower();
13499 // The exit value should be (End+A)/A.
13500 APInt ExitVal
= (End
+ A
).udiv(A
);
13501 ConstantInt
*ExitValue
= ConstantInt::get(SE
.getContext(), ExitVal
);
13503 // Evaluate at the exit value. If we really did fall out of the valid
13504 // range, then we computed our trip count, otherwise wrap around or other
13505 // things must have happened.
13506 ConstantInt
*Val
= EvaluateConstantChrecAtConstant(this, ExitValue
, SE
);
13507 if (Range
.contains(Val
->getValue()))
13508 return SE
.getCouldNotCompute(); // Something strange happened
13510 // Ensure that the previous value is in the range.
13511 assert(Range
.contains(
13512 EvaluateConstantChrecAtConstant(this,
13513 ConstantInt::get(SE
.getContext(), ExitVal
- 1), SE
)->getValue()) &&
13514 "Linear scev computation is off in a bad way!");
13515 return SE
.getConstant(ExitValue
);
13518 if (isQuadratic()) {
13519 if (auto S
= SolveQuadraticAddRecRange(this, Range
, SE
))
13520 return SE
.getConstant(*S
);
13523 return SE
.getCouldNotCompute();
13526 const SCEVAddRecExpr
*
13527 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution
&SE
) const {
13528 assert(getNumOperands() > 1 && "AddRec with zero step?");
13529 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)),
13530 // but in this case we cannot guarantee that the value returned will be an
13531 // AddRec because SCEV does not have a fixed point where it stops
13532 // simplification: it is legal to return ({rec1} + {rec2}). For example, it
13533 // may happen if we reach arithmetic depth limit while simplifying. So we
13534 // construct the returned value explicitly.
13535 SmallVector
<const SCEV
*, 3> Ops
;
13536 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
13537 // (this + Step) is {A+B,+,B+C,+...,+,N}.
13538 for (unsigned i
= 0, e
= getNumOperands() - 1; i
< e
; ++i
)
13539 Ops
.push_back(SE
.getAddExpr(getOperand(i
), getOperand(i
+ 1)));
13540 // We know that the last operand is not a constant zero (otherwise it would
13541 // have been popped out earlier). This guarantees us that if the result has
13542 // the same last operand, then it will also not be popped out, meaning that
13543 // the returned value will be an AddRec.
13544 const SCEV
*Last
= getOperand(getNumOperands() - 1);
13545 assert(!Last
->isZero() && "Recurrency with zero step?");
13546 Ops
.push_back(Last
);
13547 return cast
<SCEVAddRecExpr
>(SE
.getAddRecExpr(Ops
, getLoop(),
13548 SCEV::FlagAnyWrap
));
13551 // Return true when S contains at least an undef value.
13552 bool ScalarEvolution::containsUndefs(const SCEV
*S
) const {
13553 return SCEVExprContains(S
, [](const SCEV
*S
) {
13554 if (const auto *SU
= dyn_cast
<SCEVUnknown
>(S
))
13555 return isa
<UndefValue
>(SU
->getValue());
13560 // Return true when S contains a value that is a nullptr.
13561 bool ScalarEvolution::containsErasedValue(const SCEV
*S
) const {
13562 return SCEVExprContains(S
, [](const SCEV
*S
) {
13563 if (const auto *SU
= dyn_cast
<SCEVUnknown
>(S
))
13564 return SU
->getValue() == nullptr;
13569 /// Return the size of an element read or written by Inst.
13570 const SCEV
*ScalarEvolution::getElementSize(Instruction
*Inst
) {
13572 if (StoreInst
*Store
= dyn_cast
<StoreInst
>(Inst
))
13573 Ty
= Store
->getValueOperand()->getType();
13574 else if (LoadInst
*Load
= dyn_cast
<LoadInst
>(Inst
))
13575 Ty
= Load
->getType();
13579 Type
*ETy
= getEffectiveSCEVType(PointerType::getUnqual(Ty
));
13580 return getSizeOfExpr(ETy
, Ty
);
13583 //===----------------------------------------------------------------------===//
13584 // SCEVCallbackVH Class Implementation
13585 //===----------------------------------------------------------------------===//
13587 void ScalarEvolution::SCEVCallbackVH::deleted() {
13588 assert(SE
&& "SCEVCallbackVH called with a null ScalarEvolution!");
13589 if (PHINode
*PN
= dyn_cast
<PHINode
>(getValPtr()))
13590 SE
->ConstantEvolutionLoopExitValue
.erase(PN
);
13591 SE
->eraseValueFromMap(getValPtr());
13592 // this now dangles!
13595 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value
*V
) {
13596 assert(SE
&& "SCEVCallbackVH called with a null ScalarEvolution!");
13598 // Forget all the expressions associated with users of the old value,
13599 // so that future queries will recompute the expressions using the new
13601 SE
->forgetValue(getValPtr());
13602 // this now dangles!
13605 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value
*V
, ScalarEvolution
*se
)
13606 : CallbackVH(V
), SE(se
) {}
13608 //===----------------------------------------------------------------------===//
13609 // ScalarEvolution Class Implementation
13610 //===----------------------------------------------------------------------===//
13612 ScalarEvolution::ScalarEvolution(Function
&F
, TargetLibraryInfo
&TLI
,
13613 AssumptionCache
&AC
, DominatorTree
&DT
,
13615 : F(F
), DL(F
.getDataLayout()), TLI(TLI
), AC(AC
), DT(DT
), LI(LI
),
13616 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64),
13617 LoopDispositions(64), BlockDispositions(64) {
13618 // To use guards for proving predicates, we need to scan every instruction in
13619 // relevant basic blocks, and not just terminators. Doing this is a waste of
13620 // time if the IR does not actually contain any calls to
13621 // @llvm.experimental.guard, so do a quick check and remember this beforehand.
13623 // This pessimizes the case where a pass that preserves ScalarEvolution wants
13624 // to _add_ guards to the module when there weren't any before, and wants
13625 // ScalarEvolution to optimize based on those guards. For now we prefer to be
13626 // efficient in lieu of being smart in that rather obscure case.
13628 auto *GuardDecl
= Intrinsic::getDeclarationIfExists(
13629 F
.getParent(), Intrinsic::experimental_guard
);
13630 HasGuards
= GuardDecl
&& !GuardDecl
->use_empty();
13633 ScalarEvolution::ScalarEvolution(ScalarEvolution
&&Arg
)
13634 : F(Arg
.F
), DL(Arg
.DL
), HasGuards(Arg
.HasGuards
), TLI(Arg
.TLI
), AC(Arg
.AC
),
13635 DT(Arg
.DT
), LI(Arg
.LI
), CouldNotCompute(std::move(Arg
.CouldNotCompute
)),
13636 ValueExprMap(std::move(Arg
.ValueExprMap
)),
13637 PendingLoopPredicates(std::move(Arg
.PendingLoopPredicates
)),
13638 PendingPhiRanges(std::move(Arg
.PendingPhiRanges
)),
13639 PendingMerges(std::move(Arg
.PendingMerges
)),
13640 ConstantMultipleCache(std::move(Arg
.ConstantMultipleCache
)),
13641 BackedgeTakenCounts(std::move(Arg
.BackedgeTakenCounts
)),
13642 PredicatedBackedgeTakenCounts(
13643 std::move(Arg
.PredicatedBackedgeTakenCounts
)),
13644 BECountUsers(std::move(Arg
.BECountUsers
)),
13645 ConstantEvolutionLoopExitValue(
13646 std::move(Arg
.ConstantEvolutionLoopExitValue
)),
13647 ValuesAtScopes(std::move(Arg
.ValuesAtScopes
)),
13648 ValuesAtScopesUsers(std::move(Arg
.ValuesAtScopesUsers
)),
13649 LoopDispositions(std::move(Arg
.LoopDispositions
)),
13650 LoopPropertiesCache(std::move(Arg
.LoopPropertiesCache
)),
13651 BlockDispositions(std::move(Arg
.BlockDispositions
)),
13652 SCEVUsers(std::move(Arg
.SCEVUsers
)),
13653 UnsignedRanges(std::move(Arg
.UnsignedRanges
)),
13654 SignedRanges(std::move(Arg
.SignedRanges
)),
13655 UniqueSCEVs(std::move(Arg
.UniqueSCEVs
)),
13656 UniquePreds(std::move(Arg
.UniquePreds
)),
13657 SCEVAllocator(std::move(Arg
.SCEVAllocator
)),
13658 LoopUsers(std::move(Arg
.LoopUsers
)),
13659 PredicatedSCEVRewrites(std::move(Arg
.PredicatedSCEVRewrites
)),
13660 FirstUnknown(Arg
.FirstUnknown
) {
13661 Arg
.FirstUnknown
= nullptr;
13664 ScalarEvolution::~ScalarEvolution() {
13665 // Iterate through all the SCEVUnknown instances and call their
13666 // destructors, so that they release their references to their values.
13667 for (SCEVUnknown
*U
= FirstUnknown
; U
;) {
13668 SCEVUnknown
*Tmp
= U
;
13670 Tmp
->~SCEVUnknown();
13672 FirstUnknown
= nullptr;
13674 ExprValueMap
.clear();
13675 ValueExprMap
.clear();
13677 BackedgeTakenCounts
.clear();
13678 PredicatedBackedgeTakenCounts
.clear();
13680 assert(PendingLoopPredicates
.empty() && "isImpliedCond garbage");
13681 assert(PendingPhiRanges
.empty() && "getRangeRef garbage");
13682 assert(PendingMerges
.empty() && "isImpliedViaMerge garbage");
13683 assert(!WalkingBEDominatingConds
&& "isLoopBackedgeGuardedByCond garbage!");
13684 assert(!ProvingSplitPredicate
&& "ProvingSplitPredicate garbage!");
13687 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop
*L
) {
13688 return !isa
<SCEVCouldNotCompute
>(getBackedgeTakenCount(L
));
13691 /// When printing a top-level SCEV for trip counts, it's helpful to include
13692 /// a type for constants which are otherwise hard to disambiguate.
13693 static void PrintSCEVWithTypeHint(raw_ostream
&OS
, const SCEV
* S
) {
13694 if (isa
<SCEVConstant
>(S
))
13695 OS
<< *S
->getType() << " ";
13699 static void PrintLoopInfo(raw_ostream
&OS
, ScalarEvolution
*SE
,
13701 // Print all inner loops first
13703 PrintLoopInfo(OS
, SE
, I
);
13706 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
13709 SmallVector
<BasicBlock
*, 8> ExitingBlocks
;
13710 L
->getExitingBlocks(ExitingBlocks
);
13711 if (ExitingBlocks
.size() != 1)
13712 OS
<< "<multiple exits> ";
13714 auto *BTC
= SE
->getBackedgeTakenCount(L
);
13715 if (!isa
<SCEVCouldNotCompute
>(BTC
)) {
13716 OS
<< "backedge-taken count is ";
13717 PrintSCEVWithTypeHint(OS
, BTC
);
13719 OS
<< "Unpredictable backedge-taken count.";
13722 if (ExitingBlocks
.size() > 1)
13723 for (BasicBlock
*ExitingBlock
: ExitingBlocks
) {
13724 OS
<< " exit count for " << ExitingBlock
->getName() << ": ";
13725 const SCEV
*EC
= SE
->getExitCount(L
, ExitingBlock
);
13726 PrintSCEVWithTypeHint(OS
, EC
);
13727 if (isa
<SCEVCouldNotCompute
>(EC
)) {
13728 // Retry with predicates.
13729 SmallVector
<const SCEVPredicate
*> Predicates
;
13730 EC
= SE
->getPredicatedExitCount(L
, ExitingBlock
, &Predicates
);
13731 if (!isa
<SCEVCouldNotCompute
>(EC
)) {
13732 OS
<< "\n predicated exit count for " << ExitingBlock
->getName()
13734 PrintSCEVWithTypeHint(OS
, EC
);
13735 OS
<< "\n Predicates:\n";
13736 for (const auto *P
: Predicates
)
13744 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
13747 auto *ConstantBTC
= SE
->getConstantMaxBackedgeTakenCount(L
);
13748 if (!isa
<SCEVCouldNotCompute
>(ConstantBTC
)) {
13749 OS
<< "constant max backedge-taken count is ";
13750 PrintSCEVWithTypeHint(OS
, ConstantBTC
);
13751 if (SE
->isBackedgeTakenCountMaxOrZero(L
))
13752 OS
<< ", actual taken count either this or zero.";
13754 OS
<< "Unpredictable constant max backedge-taken count. ";
13759 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
13762 auto *SymbolicBTC
= SE
->getSymbolicMaxBackedgeTakenCount(L
);
13763 if (!isa
<SCEVCouldNotCompute
>(SymbolicBTC
)) {
13764 OS
<< "symbolic max backedge-taken count is ";
13765 PrintSCEVWithTypeHint(OS
, SymbolicBTC
);
13766 if (SE
->isBackedgeTakenCountMaxOrZero(L
))
13767 OS
<< ", actual taken count either this or zero.";
13769 OS
<< "Unpredictable symbolic max backedge-taken count. ";
13773 if (ExitingBlocks
.size() > 1)
13774 for (BasicBlock
*ExitingBlock
: ExitingBlocks
) {
13775 OS
<< " symbolic max exit count for " << ExitingBlock
->getName() << ": ";
13776 auto *ExitBTC
= SE
->getExitCount(L
, ExitingBlock
,
13777 ScalarEvolution::SymbolicMaximum
);
13778 PrintSCEVWithTypeHint(OS
, ExitBTC
);
13779 if (isa
<SCEVCouldNotCompute
>(ExitBTC
)) {
13780 // Retry with predicates.
13781 SmallVector
<const SCEVPredicate
*> Predicates
;
13782 ExitBTC
= SE
->getPredicatedExitCount(L
, ExitingBlock
, &Predicates
,
13783 ScalarEvolution::SymbolicMaximum
);
13784 if (!isa
<SCEVCouldNotCompute
>(ExitBTC
)) {
13785 OS
<< "\n predicated symbolic max exit count for "
13786 << ExitingBlock
->getName() << ": ";
13787 PrintSCEVWithTypeHint(OS
, ExitBTC
);
13788 OS
<< "\n Predicates:\n";
13789 for (const auto *P
: Predicates
)
13796 SmallVector
<const SCEVPredicate
*, 4> Preds
;
13797 auto *PBT
= SE
->getPredicatedBackedgeTakenCount(L
, Preds
);
13799 assert(!Preds
.empty() && "Different predicated BTC, but no predicates");
13801 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
13803 if (!isa
<SCEVCouldNotCompute
>(PBT
)) {
13804 OS
<< "Predicated backedge-taken count is ";
13805 PrintSCEVWithTypeHint(OS
, PBT
);
13807 OS
<< "Unpredictable predicated backedge-taken count.";
13809 OS
<< " Predicates:\n";
13810 for (const auto *P
: Preds
)
13815 auto *PredConstantMax
=
13816 SE
->getPredicatedConstantMaxBackedgeTakenCount(L
, Preds
);
13817 if (PredConstantMax
!= ConstantBTC
) {
13818 assert(!Preds
.empty() &&
13819 "different predicated constant max BTC but no predicates");
13821 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
13823 if (!isa
<SCEVCouldNotCompute
>(PredConstantMax
)) {
13824 OS
<< "Predicated constant max backedge-taken count is ";
13825 PrintSCEVWithTypeHint(OS
, PredConstantMax
);
13827 OS
<< "Unpredictable predicated constant max backedge-taken count.";
13829 OS
<< " Predicates:\n";
13830 for (const auto *P
: Preds
)
13835 auto *PredSymbolicMax
=
13836 SE
->getPredicatedSymbolicMaxBackedgeTakenCount(L
, Preds
);
13837 if (SymbolicBTC
!= PredSymbolicMax
) {
13838 assert(!Preds
.empty() &&
13839 "Different predicated symbolic max BTC, but no predicates");
13841 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
13843 if (!isa
<SCEVCouldNotCompute
>(PredSymbolicMax
)) {
13844 OS
<< "Predicated symbolic max backedge-taken count is ";
13845 PrintSCEVWithTypeHint(OS
, PredSymbolicMax
);
13847 OS
<< "Unpredictable predicated symbolic max backedge-taken count.";
13849 OS
<< " Predicates:\n";
13850 for (const auto *P
: Preds
)
13854 if (SE
->hasLoopInvariantBackedgeTakenCount(L
)) {
13856 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
13858 OS
<< "Trip multiple is " << SE
->getSmallConstantTripMultiple(L
) << "\n";
13863 raw_ostream
&operator<<(raw_ostream
&OS
, ScalarEvolution::LoopDisposition LD
) {
13865 case ScalarEvolution::LoopVariant
:
13868 case ScalarEvolution::LoopInvariant
:
13871 case ScalarEvolution::LoopComputable
:
13872 OS
<< "Computable";
13878 raw_ostream
&operator<<(raw_ostream
&OS
, ScalarEvolution::BlockDisposition BD
) {
13880 case ScalarEvolution::DoesNotDominateBlock
:
13881 OS
<< "DoesNotDominate";
13883 case ScalarEvolution::DominatesBlock
:
13886 case ScalarEvolution::ProperlyDominatesBlock
:
13887 OS
<< "ProperlyDominates";
13892 } // namespace llvm
13894 void ScalarEvolution::print(raw_ostream
&OS
) const {
13895 // ScalarEvolution's implementation of the print method is to print
13896 // out SCEV values of all instructions that are interesting. Doing
13897 // this potentially causes it to create new SCEV objects though,
13898 // which technically conflicts with the const qualifier. This isn't
13899 // observable from outside the class though, so casting away the
13900 // const isn't dangerous.
13901 ScalarEvolution
&SE
= *const_cast<ScalarEvolution
*>(this);
13903 if (ClassifyExpressions
) {
13904 OS
<< "Classifying expressions for: ";
13905 F
.printAsOperand(OS
, /*PrintType=*/false);
13907 for (Instruction
&I
: instructions(F
))
13908 if (isSCEVable(I
.getType()) && !isa
<CmpInst
>(I
)) {
13911 const SCEV
*SV
= SE
.getSCEV(&I
);
13913 if (!isa
<SCEVCouldNotCompute
>(SV
)) {
13915 SE
.getUnsignedRange(SV
).print(OS
);
13917 SE
.getSignedRange(SV
).print(OS
);
13920 const Loop
*L
= LI
.getLoopFor(I
.getParent());
13922 const SCEV
*AtUse
= SE
.getSCEVAtScope(SV
, L
);
13926 if (!isa
<SCEVCouldNotCompute
>(AtUse
)) {
13928 SE
.getUnsignedRange(AtUse
).print(OS
);
13930 SE
.getSignedRange(AtUse
).print(OS
);
13935 OS
<< "\t\t" "Exits: ";
13936 const SCEV
*ExitValue
= SE
.getSCEVAtScope(SV
, L
->getParentLoop());
13937 if (!SE
.isLoopInvariant(ExitValue
, L
)) {
13938 OS
<< "<<Unknown>>";
13944 for (const auto *Iter
= L
; Iter
; Iter
= Iter
->getParentLoop()) {
13946 OS
<< "\t\t" "LoopDispositions: { ";
13952 Iter
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
13953 OS
<< ": " << SE
.getLoopDisposition(SV
, Iter
);
13956 for (const auto *InnerL
: depth_first(L
)) {
13960 OS
<< "\t\t" "LoopDispositions: { ";
13966 InnerL
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
13967 OS
<< ": " << SE
.getLoopDisposition(SV
, InnerL
);
13977 OS
<< "Determining loop execution counts for: ";
13978 F
.printAsOperand(OS
, /*PrintType=*/false);
13981 PrintLoopInfo(OS
, &SE
, I
);
13984 ScalarEvolution::LoopDisposition
13985 ScalarEvolution::getLoopDisposition(const SCEV
*S
, const Loop
*L
) {
13986 auto &Values
= LoopDispositions
[S
];
13987 for (auto &V
: Values
) {
13988 if (V
.getPointer() == L
)
13991 Values
.emplace_back(L
, LoopVariant
);
13992 LoopDisposition D
= computeLoopDisposition(S
, L
);
13993 auto &Values2
= LoopDispositions
[S
];
13994 for (auto &V
: llvm::reverse(Values2
)) {
13995 if (V
.getPointer() == L
) {
14003 ScalarEvolution::LoopDisposition
14004 ScalarEvolution::computeLoopDisposition(const SCEV
*S
, const Loop
*L
) {
14005 switch (S
->getSCEVType()) {
14008 return LoopInvariant
;
14009 case scAddRecExpr
: {
14010 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(S
);
14012 // If L is the addrec's loop, it's computable.
14013 if (AR
->getLoop() == L
)
14014 return LoopComputable
;
14016 // Add recurrences are never invariant in the function-body (null loop).
14018 return LoopVariant
;
14020 // Everything that is not defined at loop entry is variant.
14021 if (DT
.dominates(L
->getHeader(), AR
->getLoop()->getHeader()))
14022 return LoopVariant
;
14023 assert(!L
->contains(AR
->getLoop()) && "Containing loop's header does not"
14024 " dominate the contained loop's header?");
14026 // This recurrence is invariant w.r.t. L if AR's loop contains L.
14027 if (AR
->getLoop()->contains(L
))
14028 return LoopInvariant
;
14030 // This recurrence is variant w.r.t. L if any of its operands
14032 for (const auto *Op
: AR
->operands())
14033 if (!isLoopInvariant(Op
, L
))
14034 return LoopVariant
;
14036 // Otherwise it's loop-invariant.
14037 return LoopInvariant
;
14050 case scSequentialUMinExpr
: {
14051 bool HasVarying
= false;
14052 for (const auto *Op
: S
->operands()) {
14053 LoopDisposition D
= getLoopDisposition(Op
, L
);
14054 if (D
== LoopVariant
)
14055 return LoopVariant
;
14056 if (D
== LoopComputable
)
14059 return HasVarying
? LoopComputable
: LoopInvariant
;
14062 // All non-instruction values are loop invariant. All instructions are loop
14063 // invariant if they are not contained in the specified loop.
14064 // Instructions are never considered invariant in the function body
14065 // (null loop) because they are defined within the "loop".
14066 if (auto *I
= dyn_cast
<Instruction
>(cast
<SCEVUnknown
>(S
)->getValue()))
14067 return (L
&& !L
->contains(I
)) ? LoopInvariant
: LoopVariant
;
14068 return LoopInvariant
;
14069 case scCouldNotCompute
:
14070 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
14072 llvm_unreachable("Unknown SCEV kind!");
14075 bool ScalarEvolution::isLoopInvariant(const SCEV
*S
, const Loop
*L
) {
14076 return getLoopDisposition(S
, L
) == LoopInvariant
;
14079 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV
*S
, const Loop
*L
) {
14080 return getLoopDisposition(S
, L
) == LoopComputable
;
14083 ScalarEvolution::BlockDisposition
14084 ScalarEvolution::getBlockDisposition(const SCEV
*S
, const BasicBlock
*BB
) {
14085 auto &Values
= BlockDispositions
[S
];
14086 for (auto &V
: Values
) {
14087 if (V
.getPointer() == BB
)
14090 Values
.emplace_back(BB
, DoesNotDominateBlock
);
14091 BlockDisposition D
= computeBlockDisposition(S
, BB
);
14092 auto &Values2
= BlockDispositions
[S
];
14093 for (auto &V
: llvm::reverse(Values2
)) {
14094 if (V
.getPointer() == BB
) {
14102 ScalarEvolution::BlockDisposition
14103 ScalarEvolution::computeBlockDisposition(const SCEV
*S
, const BasicBlock
*BB
) {
14104 switch (S
->getSCEVType()) {
14107 return ProperlyDominatesBlock
;
14108 case scAddRecExpr
: {
14109 // This uses a "dominates" query instead of "properly dominates" query
14110 // to test for proper dominance too, because the instruction which
14111 // produces the addrec's value is a PHI, and a PHI effectively properly
14112 // dominates its entire containing block.
14113 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(S
);
14114 if (!DT
.dominates(AR
->getLoop()->getHeader(), BB
))
14115 return DoesNotDominateBlock
;
14117 // Fall through into SCEVNAryExpr handling.
14131 case scSequentialUMinExpr
: {
14132 bool Proper
= true;
14133 for (const SCEV
*NAryOp
: S
->operands()) {
14134 BlockDisposition D
= getBlockDisposition(NAryOp
, BB
);
14135 if (D
== DoesNotDominateBlock
)
14136 return DoesNotDominateBlock
;
14137 if (D
== DominatesBlock
)
14140 return Proper
? ProperlyDominatesBlock
: DominatesBlock
;
14143 if (Instruction
*I
=
14144 dyn_cast
<Instruction
>(cast
<SCEVUnknown
>(S
)->getValue())) {
14145 if (I
->getParent() == BB
)
14146 return DominatesBlock
;
14147 if (DT
.properlyDominates(I
->getParent(), BB
))
14148 return ProperlyDominatesBlock
;
14149 return DoesNotDominateBlock
;
14151 return ProperlyDominatesBlock
;
14152 case scCouldNotCompute
:
14153 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
14155 llvm_unreachable("Unknown SCEV kind!");
14158 bool ScalarEvolution::dominates(const SCEV
*S
, const BasicBlock
*BB
) {
14159 return getBlockDisposition(S
, BB
) >= DominatesBlock
;
14162 bool ScalarEvolution::properlyDominates(const SCEV
*S
, const BasicBlock
*BB
) {
14163 return getBlockDisposition(S
, BB
) == ProperlyDominatesBlock
;
14166 bool ScalarEvolution::hasOperand(const SCEV
*S
, const SCEV
*Op
) const {
14167 return SCEVExprContains(S
, [&](const SCEV
*Expr
) { return Expr
== Op
; });
14170 void ScalarEvolution::forgetBackedgeTakenCounts(const Loop
*L
,
14173 Predicated
? PredicatedBackedgeTakenCounts
: BackedgeTakenCounts
;
14174 auto It
= BECounts
.find(L
);
14175 if (It
!= BECounts
.end()) {
14176 for (const ExitNotTakenInfo
&ENT
: It
->second
.ExitNotTaken
) {
14177 for (const SCEV
*S
: {ENT
.ExactNotTaken
, ENT
.SymbolicMaxNotTaken
}) {
14178 if (!isa
<SCEVConstant
>(S
)) {
14179 auto UserIt
= BECountUsers
.find(S
);
14180 assert(UserIt
!= BECountUsers
.end());
14181 UserIt
->second
.erase({L
, Predicated
});
14185 BECounts
.erase(It
);
14189 void ScalarEvolution::forgetMemoizedResults(ArrayRef
<const SCEV
*> SCEVs
) {
14190 SmallPtrSet
<const SCEV
*, 8> ToForget(SCEVs
.begin(), SCEVs
.end());
14191 SmallVector
<const SCEV
*, 8> Worklist(ToForget
.begin(), ToForget
.end());
14193 while (!Worklist
.empty()) {
14194 const SCEV
*Curr
= Worklist
.pop_back_val();
14195 auto Users
= SCEVUsers
.find(Curr
);
14196 if (Users
!= SCEVUsers
.end())
14197 for (const auto *User
: Users
->second
)
14198 if (ToForget
.insert(User
).second
)
14199 Worklist
.push_back(User
);
14202 for (const auto *S
: ToForget
)
14203 forgetMemoizedResultsImpl(S
);
14205 for (auto I
= PredicatedSCEVRewrites
.begin();
14206 I
!= PredicatedSCEVRewrites
.end();) {
14207 std::pair
<const SCEV
*, const Loop
*> Entry
= I
->first
;
14208 if (ToForget
.count(Entry
.first
))
14209 PredicatedSCEVRewrites
.erase(I
++);
14215 void ScalarEvolution::forgetMemoizedResultsImpl(const SCEV
*S
) {
14216 LoopDispositions
.erase(S
);
14217 BlockDispositions
.erase(S
);
14218 UnsignedRanges
.erase(S
);
14219 SignedRanges
.erase(S
);
14220 HasRecMap
.erase(S
);
14221 ConstantMultipleCache
.erase(S
);
14223 if (auto *AR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
14224 UnsignedWrapViaInductionTried
.erase(AR
);
14225 SignedWrapViaInductionTried
.erase(AR
);
14228 auto ExprIt
= ExprValueMap
.find(S
);
14229 if (ExprIt
!= ExprValueMap
.end()) {
14230 for (Value
*V
: ExprIt
->second
) {
14231 auto ValueIt
= ValueExprMap
.find_as(V
);
14232 if (ValueIt
!= ValueExprMap
.end())
14233 ValueExprMap
.erase(ValueIt
);
14235 ExprValueMap
.erase(ExprIt
);
14238 auto ScopeIt
= ValuesAtScopes
.find(S
);
14239 if (ScopeIt
!= ValuesAtScopes
.end()) {
14240 for (const auto &Pair
: ScopeIt
->second
)
14241 if (!isa_and_nonnull
<SCEVConstant
>(Pair
.second
))
14242 llvm::erase(ValuesAtScopesUsers
[Pair
.second
],
14243 std::make_pair(Pair
.first
, S
));
14244 ValuesAtScopes
.erase(ScopeIt
);
14247 auto ScopeUserIt
= ValuesAtScopesUsers
.find(S
);
14248 if (ScopeUserIt
!= ValuesAtScopesUsers
.end()) {
14249 for (const auto &Pair
: ScopeUserIt
->second
)
14250 llvm::erase(ValuesAtScopes
[Pair
.second
], std::make_pair(Pair
.first
, S
));
14251 ValuesAtScopesUsers
.erase(ScopeUserIt
);
14254 auto BEUsersIt
= BECountUsers
.find(S
);
14255 if (BEUsersIt
!= BECountUsers
.end()) {
14256 // Work on a copy, as forgetBackedgeTakenCounts() will modify the original.
14257 auto Copy
= BEUsersIt
->second
;
14258 for (const auto &Pair
: Copy
)
14259 forgetBackedgeTakenCounts(Pair
.getPointer(), Pair
.getInt());
14260 BECountUsers
.erase(BEUsersIt
);
14263 auto FoldUser
= FoldCacheUser
.find(S
);
14264 if (FoldUser
!= FoldCacheUser
.end())
14265 for (auto &KV
: FoldUser
->second
)
14266 FoldCache
.erase(KV
);
14267 FoldCacheUser
.erase(S
);
14271 ScalarEvolution::getUsedLoops(const SCEV
*S
,
14272 SmallPtrSetImpl
<const Loop
*> &LoopsUsed
) {
14273 struct FindUsedLoops
{
14274 FindUsedLoops(SmallPtrSetImpl
<const Loop
*> &LoopsUsed
)
14275 : LoopsUsed(LoopsUsed
) {}
14276 SmallPtrSetImpl
<const Loop
*> &LoopsUsed
;
14277 bool follow(const SCEV
*S
) {
14278 if (auto *AR
= dyn_cast
<SCEVAddRecExpr
>(S
))
14279 LoopsUsed
.insert(AR
->getLoop());
14283 bool isDone() const { return false; }
14286 FindUsedLoops
F(LoopsUsed
);
14287 SCEVTraversal
<FindUsedLoops
>(F
).visitAll(S
);
14290 void ScalarEvolution::getReachableBlocks(
14291 SmallPtrSetImpl
<BasicBlock
*> &Reachable
, Function
&F
) {
14292 SmallVector
<BasicBlock
*> Worklist
;
14293 Worklist
.push_back(&F
.getEntryBlock());
14294 while (!Worklist
.empty()) {
14295 BasicBlock
*BB
= Worklist
.pop_back_val();
14296 if (!Reachable
.insert(BB
).second
)
14300 BasicBlock
*TrueBB
, *FalseBB
;
14301 if (match(BB
->getTerminator(), m_Br(m_Value(Cond
), m_BasicBlock(TrueBB
),
14302 m_BasicBlock(FalseBB
)))) {
14303 if (auto *C
= dyn_cast
<ConstantInt
>(Cond
)) {
14304 Worklist
.push_back(C
->isOne() ? TrueBB
: FalseBB
);
14308 if (auto *Cmp
= dyn_cast
<ICmpInst
>(Cond
)) {
14309 const SCEV
*L
= getSCEV(Cmp
->getOperand(0));
14310 const SCEV
*R
= getSCEV(Cmp
->getOperand(1));
14311 if (isKnownPredicateViaConstantRanges(Cmp
->getPredicate(), L
, R
)) {
14312 Worklist
.push_back(TrueBB
);
14315 if (isKnownPredicateViaConstantRanges(Cmp
->getInversePredicate(), L
,
14317 Worklist
.push_back(FalseBB
);
14323 append_range(Worklist
, successors(BB
));
14327 void ScalarEvolution::verify() const {
14328 ScalarEvolution
&SE
= *const_cast<ScalarEvolution
*>(this);
14329 ScalarEvolution
SE2(F
, TLI
, AC
, DT
, LI
);
14331 SmallVector
<Loop
*, 8> LoopStack(LI
.begin(), LI
.end());
14333 // Map's SCEV expressions from one ScalarEvolution "universe" to another.
14334 struct SCEVMapper
: public SCEVRewriteVisitor
<SCEVMapper
> {
14335 SCEVMapper(ScalarEvolution
&SE
) : SCEVRewriteVisitor
<SCEVMapper
>(SE
) {}
14337 const SCEV
*visitConstant(const SCEVConstant
*Constant
) {
14338 return SE
.getConstant(Constant
->getAPInt());
14341 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
14342 return SE
.getUnknown(Expr
->getValue());
14345 const SCEV
*visitCouldNotCompute(const SCEVCouldNotCompute
*Expr
) {
14346 return SE
.getCouldNotCompute();
14350 SCEVMapper
SCM(SE2
);
14351 SmallPtrSet
<BasicBlock
*, 16> ReachableBlocks
;
14352 SE2
.getReachableBlocks(ReachableBlocks
, F
);
14354 auto GetDelta
= [&](const SCEV
*Old
, const SCEV
*New
) -> const SCEV
* {
14355 if (containsUndefs(Old
) || containsUndefs(New
)) {
14356 // SCEV treats "undef" as an unknown but consistent value (i.e. it does
14357 // not propagate undef aggressively). This means we can (and do) fail
14358 // verification in cases where a transform makes a value go from "undef"
14359 // to "undef+1" (say). The transform is fine, since in both cases the
14360 // result is "undef", but SCEV thinks the value increased by 1.
14364 // Unless VerifySCEVStrict is set, we only compare constant deltas.
14365 const SCEV
*Delta
= SE2
.getMinusSCEV(Old
, New
);
14366 if (!VerifySCEVStrict
&& !isa
<SCEVConstant
>(Delta
))
14372 while (!LoopStack
.empty()) {
14373 auto *L
= LoopStack
.pop_back_val();
14374 llvm::append_range(LoopStack
, *L
);
14376 // Only verify BECounts in reachable loops. For an unreachable loop,
14377 // any BECount is legal.
14378 if (!ReachableBlocks
.contains(L
->getHeader()))
14381 // Only verify cached BECounts. Computing new BECounts may change the
14382 // results of subsequent SCEV uses.
14383 auto It
= BackedgeTakenCounts
.find(L
);
14384 if (It
== BackedgeTakenCounts
.end())
14388 SCM
.visit(It
->second
.getExact(L
, const_cast<ScalarEvolution
*>(this)));
14389 auto *NewBECount
= SE2
.getBackedgeTakenCount(L
);
14391 if (CurBECount
== SE2
.getCouldNotCompute() ||
14392 NewBECount
== SE2
.getCouldNotCompute()) {
14393 // NB! This situation is legal, but is very suspicious -- whatever pass
14394 // change the loop to make a trip count go from could not compute to
14395 // computable or vice-versa *should have* invalidated SCEV. However, we
14396 // choose not to assert here (for now) since we don't want false
14401 if (SE
.getTypeSizeInBits(CurBECount
->getType()) >
14402 SE
.getTypeSizeInBits(NewBECount
->getType()))
14403 NewBECount
= SE2
.getZeroExtendExpr(NewBECount
, CurBECount
->getType());
14404 else if (SE
.getTypeSizeInBits(CurBECount
->getType()) <
14405 SE
.getTypeSizeInBits(NewBECount
->getType()))
14406 CurBECount
= SE2
.getZeroExtendExpr(CurBECount
, NewBECount
->getType());
14408 const SCEV
*Delta
= GetDelta(CurBECount
, NewBECount
);
14409 if (Delta
&& !Delta
->isZero()) {
14410 dbgs() << "Trip Count for " << *L
<< " Changed!\n";
14411 dbgs() << "Old: " << *CurBECount
<< "\n";
14412 dbgs() << "New: " << *NewBECount
<< "\n";
14413 dbgs() << "Delta: " << *Delta
<< "\n";
14418 // Collect all valid loops currently in LoopInfo.
14419 SmallPtrSet
<Loop
*, 32> ValidLoops
;
14420 SmallVector
<Loop
*, 32> Worklist(LI
.begin(), LI
.end());
14421 while (!Worklist
.empty()) {
14422 Loop
*L
= Worklist
.pop_back_val();
14423 if (ValidLoops
.insert(L
).second
)
14424 Worklist
.append(L
->begin(), L
->end());
14426 for (const auto &KV
: ValueExprMap
) {
14428 // Check for SCEV expressions referencing invalid/deleted loops.
14429 if (auto *AR
= dyn_cast
<SCEVAddRecExpr
>(KV
.second
)) {
14430 assert(ValidLoops
.contains(AR
->getLoop()) &&
14431 "AddRec references invalid loop");
14435 // Check that the value is also part of the reverse map.
14436 auto It
= ExprValueMap
.find(KV
.second
);
14437 if (It
== ExprValueMap
.end() || !It
->second
.contains(KV
.first
)) {
14438 dbgs() << "Value " << *KV
.first
14439 << " is in ValueExprMap but not in ExprValueMap\n";
14443 if (auto *I
= dyn_cast
<Instruction
>(&*KV
.first
)) {
14444 if (!ReachableBlocks
.contains(I
->getParent()))
14446 const SCEV
*OldSCEV
= SCM
.visit(KV
.second
);
14447 const SCEV
*NewSCEV
= SE2
.getSCEV(I
);
14448 const SCEV
*Delta
= GetDelta(OldSCEV
, NewSCEV
);
14449 if (Delta
&& !Delta
->isZero()) {
14450 dbgs() << "SCEV for value " << *I
<< " changed!\n"
14451 << "Old: " << *OldSCEV
<< "\n"
14452 << "New: " << *NewSCEV
<< "\n"
14453 << "Delta: " << *Delta
<< "\n";
14459 for (const auto &KV
: ExprValueMap
) {
14460 for (Value
*V
: KV
.second
) {
14461 auto It
= ValueExprMap
.find_as(V
);
14462 if (It
== ValueExprMap
.end()) {
14463 dbgs() << "Value " << *V
14464 << " is in ExprValueMap but not in ValueExprMap\n";
14467 if (It
->second
!= KV
.first
) {
14468 dbgs() << "Value " << *V
<< " mapped to " << *It
->second
14469 << " rather than " << *KV
.first
<< "\n";
14475 // Verify integrity of SCEV users.
14476 for (const auto &S
: UniqueSCEVs
) {
14477 for (const auto *Op
: S
.operands()) {
14478 // We do not store dependencies of constants.
14479 if (isa
<SCEVConstant
>(Op
))
14481 auto It
= SCEVUsers
.find(Op
);
14482 if (It
!= SCEVUsers
.end() && It
->second
.count(&S
))
14484 dbgs() << "Use of operand " << *Op
<< " by user " << S
14485 << " is not being tracked!\n";
14490 // Verify integrity of ValuesAtScopes users.
14491 for (const auto &ValueAndVec
: ValuesAtScopes
) {
14492 const SCEV
*Value
= ValueAndVec
.first
;
14493 for (const auto &LoopAndValueAtScope
: ValueAndVec
.second
) {
14494 const Loop
*L
= LoopAndValueAtScope
.first
;
14495 const SCEV
*ValueAtScope
= LoopAndValueAtScope
.second
;
14496 if (!isa
<SCEVConstant
>(ValueAtScope
)) {
14497 auto It
= ValuesAtScopesUsers
.find(ValueAtScope
);
14498 if (It
!= ValuesAtScopesUsers
.end() &&
14499 is_contained(It
->second
, std::make_pair(L
, Value
)))
14501 dbgs() << "Value: " << *Value
<< ", Loop: " << *L
<< ", ValueAtScope: "
14502 << *ValueAtScope
<< " missing in ValuesAtScopesUsers\n";
14508 for (const auto &ValueAtScopeAndVec
: ValuesAtScopesUsers
) {
14509 const SCEV
*ValueAtScope
= ValueAtScopeAndVec
.first
;
14510 for (const auto &LoopAndValue
: ValueAtScopeAndVec
.second
) {
14511 const Loop
*L
= LoopAndValue
.first
;
14512 const SCEV
*Value
= LoopAndValue
.second
;
14513 assert(!isa
<SCEVConstant
>(Value
));
14514 auto It
= ValuesAtScopes
.find(Value
);
14515 if (It
!= ValuesAtScopes
.end() &&
14516 is_contained(It
->second
, std::make_pair(L
, ValueAtScope
)))
14518 dbgs() << "Value: " << *Value
<< ", Loop: " << *L
<< ", ValueAtScope: "
14519 << *ValueAtScope
<< " missing in ValuesAtScopes\n";
14524 // Verify integrity of BECountUsers.
14525 auto VerifyBECountUsers
= [&](bool Predicated
) {
14527 Predicated
? PredicatedBackedgeTakenCounts
: BackedgeTakenCounts
;
14528 for (const auto &LoopAndBEInfo
: BECounts
) {
14529 for (const ExitNotTakenInfo
&ENT
: LoopAndBEInfo
.second
.ExitNotTaken
) {
14530 for (const SCEV
*S
: {ENT
.ExactNotTaken
, ENT
.SymbolicMaxNotTaken
}) {
14531 if (!isa
<SCEVConstant
>(S
)) {
14532 auto UserIt
= BECountUsers
.find(S
);
14533 if (UserIt
!= BECountUsers
.end() &&
14534 UserIt
->second
.contains({ LoopAndBEInfo
.first
, Predicated
}))
14536 dbgs() << "Value " << *S
<< " for loop " << *LoopAndBEInfo
.first
14537 << " missing from BECountUsers\n";
14544 VerifyBECountUsers(/* Predicated */ false);
14545 VerifyBECountUsers(/* Predicated */ true);
14547 // Verify intergity of loop disposition cache.
14548 for (auto &[S
, Values
] : LoopDispositions
) {
14549 for (auto [Loop
, CachedDisposition
] : Values
) {
14550 const auto RecomputedDisposition
= SE2
.getLoopDisposition(S
, Loop
);
14551 if (CachedDisposition
!= RecomputedDisposition
) {
14552 dbgs() << "Cached disposition of " << *S
<< " for loop " << *Loop
14553 << " is incorrect: cached " << CachedDisposition
<< ", actual "
14554 << RecomputedDisposition
<< "\n";
14560 // Verify integrity of the block disposition cache.
14561 for (auto &[S
, Values
] : BlockDispositions
) {
14562 for (auto [BB
, CachedDisposition
] : Values
) {
14563 const auto RecomputedDisposition
= SE2
.getBlockDisposition(S
, BB
);
14564 if (CachedDisposition
!= RecomputedDisposition
) {
14565 dbgs() << "Cached disposition of " << *S
<< " for block %"
14566 << BB
->getName() << " is incorrect: cached " << CachedDisposition
14567 << ", actual " << RecomputedDisposition
<< "\n";
14573 // Verify FoldCache/FoldCacheUser caches.
14574 for (auto [FoldID
, Expr
] : FoldCache
) {
14575 auto I
= FoldCacheUser
.find(Expr
);
14576 if (I
== FoldCacheUser
.end()) {
14577 dbgs() << "Missing entry in FoldCacheUser for cached expression " << *Expr
14581 if (!is_contained(I
->second
, FoldID
)) {
14582 dbgs() << "Missing FoldID in cached users of " << *Expr
<< "!\n";
14586 for (auto [Expr
, IDs
] : FoldCacheUser
) {
14587 for (auto &FoldID
: IDs
) {
14588 auto I
= FoldCache
.find(FoldID
);
14589 if (I
== FoldCache
.end()) {
14590 dbgs() << "Missing entry in FoldCache for expression " << *Expr
14594 if (I
->second
!= Expr
) {
14595 dbgs() << "Entry in FoldCache doesn't match FoldCacheUser: "
14596 << *I
->second
<< " != " << *Expr
<< "!\n";
14602 // Verify that ConstantMultipleCache computations are correct. We check that
14603 // cached multiples and recomputed multiples are multiples of each other to
14604 // verify correctness. It is possible that a recomputed multiple is different
14605 // from the cached multiple due to strengthened no wrap flags or changes in
14606 // KnownBits computations.
14607 for (auto [S
, Multiple
] : ConstantMultipleCache
) {
14608 APInt RecomputedMultiple
= SE2
.getConstantMultiple(S
);
14609 if ((Multiple
!= 0 && RecomputedMultiple
!= 0 &&
14610 Multiple
.urem(RecomputedMultiple
) != 0 &&
14611 RecomputedMultiple
.urem(Multiple
) != 0)) {
14612 dbgs() << "Incorrect cached computation in ConstantMultipleCache for "
14613 << *S
<< " : Computed " << RecomputedMultiple
14614 << " but cache contains " << Multiple
<< "!\n";
14620 bool ScalarEvolution::invalidate(
14621 Function
&F
, const PreservedAnalyses
&PA
,
14622 FunctionAnalysisManager::Invalidator
&Inv
) {
14623 // Invalidate the ScalarEvolution object whenever it isn't preserved or one
14624 // of its dependencies is invalidated.
14625 auto PAC
= PA
.getChecker
<ScalarEvolutionAnalysis
>();
14626 return !(PAC
.preserved() || PAC
.preservedSet
<AllAnalysesOn
<Function
>>()) ||
14627 Inv
.invalidate
<AssumptionAnalysis
>(F
, PA
) ||
14628 Inv
.invalidate
<DominatorTreeAnalysis
>(F
, PA
) ||
14629 Inv
.invalidate
<LoopAnalysis
>(F
, PA
);
14632 AnalysisKey
ScalarEvolutionAnalysis::Key
;
14634 ScalarEvolution
ScalarEvolutionAnalysis::run(Function
&F
,
14635 FunctionAnalysisManager
&AM
) {
14636 auto &TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
14637 auto &AC
= AM
.getResult
<AssumptionAnalysis
>(F
);
14638 auto &DT
= AM
.getResult
<DominatorTreeAnalysis
>(F
);
14639 auto &LI
= AM
.getResult
<LoopAnalysis
>(F
);
14640 return ScalarEvolution(F
, TLI
, AC
, DT
, LI
);
14644 ScalarEvolutionVerifierPass::run(Function
&F
, FunctionAnalysisManager
&AM
) {
14645 AM
.getResult
<ScalarEvolutionAnalysis
>(F
).verify();
14646 return PreservedAnalyses::all();
14650 ScalarEvolutionPrinterPass::run(Function
&F
, FunctionAnalysisManager
&AM
) {
14651 // For compatibility with opt's -analyze feature under legacy pass manager
14652 // which was not ported to NPM. This keeps tests using
14653 // update_analyze_test_checks.py working.
14654 OS
<< "Printing analysis 'Scalar Evolution Analysis' for function '"
14655 << F
.getName() << "':\n";
14656 AM
.getResult
<ScalarEvolutionAnalysis
>(F
).print(OS
);
14657 return PreservedAnalyses::all();
14660 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass
, "scalar-evolution",
14661 "Scalar Evolution Analysis", false, true)
14662 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
14663 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
)
14664 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
14665 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
14666 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass
, "scalar-evolution",
14667 "Scalar Evolution Analysis", false, true)
14669 char ScalarEvolutionWrapperPass::ID
= 0;
14671 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID
) {
14672 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry());
14675 bool ScalarEvolutionWrapperPass::runOnFunction(Function
&F
) {
14676 SE
.reset(new ScalarEvolution(
14677 F
, getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
),
14678 getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
),
14679 getAnalysis
<DominatorTreeWrapperPass
>().getDomTree(),
14680 getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo()));
14684 void ScalarEvolutionWrapperPass::releaseMemory() { SE
.reset(); }
14686 void ScalarEvolutionWrapperPass::print(raw_ostream
&OS
, const Module
*) const {
14690 void ScalarEvolutionWrapperPass::verifyAnalysis() const {
14697 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage
&AU
) const {
14698 AU
.setPreservesAll();
14699 AU
.addRequiredTransitive
<AssumptionCacheTracker
>();
14700 AU
.addRequiredTransitive
<LoopInfoWrapperPass
>();
14701 AU
.addRequiredTransitive
<DominatorTreeWrapperPass
>();
14702 AU
.addRequiredTransitive
<TargetLibraryInfoWrapperPass
>();
14705 const SCEVPredicate
*ScalarEvolution::getEqualPredicate(const SCEV
*LHS
,
14707 return getComparePredicate(ICmpInst::ICMP_EQ
, LHS
, RHS
);
14710 const SCEVPredicate
*
14711 ScalarEvolution::getComparePredicate(const ICmpInst::Predicate Pred
,
14712 const SCEV
*LHS
, const SCEV
*RHS
) {
14713 FoldingSetNodeID ID
;
14714 assert(LHS
->getType() == RHS
->getType() &&
14715 "Type mismatch between LHS and RHS");
14716 // Unique this node based on the arguments
14717 ID
.AddInteger(SCEVPredicate::P_Compare
);
14718 ID
.AddInteger(Pred
);
14719 ID
.AddPointer(LHS
);
14720 ID
.AddPointer(RHS
);
14721 void *IP
= nullptr;
14722 if (const auto *S
= UniquePreds
.FindNodeOrInsertPos(ID
, IP
))
14724 SCEVComparePredicate
*Eq
= new (SCEVAllocator
)
14725 SCEVComparePredicate(ID
.Intern(SCEVAllocator
), Pred
, LHS
, RHS
);
14726 UniquePreds
.InsertNode(Eq
, IP
);
14730 const SCEVPredicate
*ScalarEvolution::getWrapPredicate(
14731 const SCEVAddRecExpr
*AR
,
14732 SCEVWrapPredicate::IncrementWrapFlags AddedFlags
) {
14733 FoldingSetNodeID ID
;
14734 // Unique this node based on the arguments
14735 ID
.AddInteger(SCEVPredicate::P_Wrap
);
14737 ID
.AddInteger(AddedFlags
);
14738 void *IP
= nullptr;
14739 if (const auto *S
= UniquePreds
.FindNodeOrInsertPos(ID
, IP
))
14741 auto *OF
= new (SCEVAllocator
)
14742 SCEVWrapPredicate(ID
.Intern(SCEVAllocator
), AR
, AddedFlags
);
14743 UniquePreds
.InsertNode(OF
, IP
);
14749 class SCEVPredicateRewriter
: public SCEVRewriteVisitor
<SCEVPredicateRewriter
> {
14752 /// Rewrites \p S in the context of a loop L and the SCEV predication
14753 /// infrastructure.
14755 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the
14756 /// equivalences present in \p Pred.
14758 /// If \p NewPreds is non-null, rewrite is free to add further predicates to
14759 /// \p NewPreds such that the result will be an AddRecExpr.
14760 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
, ScalarEvolution
&SE
,
14761 SmallVectorImpl
<const SCEVPredicate
*> *NewPreds
,
14762 const SCEVPredicate
*Pred
) {
14763 SCEVPredicateRewriter
Rewriter(L
, SE
, NewPreds
, Pred
);
14764 return Rewriter
.visit(S
);
14767 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
14769 if (auto *U
= dyn_cast
<SCEVUnionPredicate
>(Pred
)) {
14770 for (const auto *Pred
: U
->getPredicates())
14771 if (const auto *IPred
= dyn_cast
<SCEVComparePredicate
>(Pred
))
14772 if (IPred
->getLHS() == Expr
&&
14773 IPred
->getPredicate() == ICmpInst::ICMP_EQ
)
14774 return IPred
->getRHS();
14775 } else if (const auto *IPred
= dyn_cast
<SCEVComparePredicate
>(Pred
)) {
14776 if (IPred
->getLHS() == Expr
&&
14777 IPred
->getPredicate() == ICmpInst::ICMP_EQ
)
14778 return IPred
->getRHS();
14781 return convertToAddRecWithPreds(Expr
);
14784 const SCEV
*visitZeroExtendExpr(const SCEVZeroExtendExpr
*Expr
) {
14785 const SCEV
*Operand
= visit(Expr
->getOperand());
14786 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Operand
);
14787 if (AR
&& AR
->getLoop() == L
&& AR
->isAffine()) {
14788 // This couldn't be folded because the operand didn't have the nuw
14789 // flag. Add the nusw flag as an assumption that we could make.
14790 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
14791 Type
*Ty
= Expr
->getType();
14792 if (addOverflowAssumption(AR
, SCEVWrapPredicate::IncrementNUSW
))
14793 return SE
.getAddRecExpr(SE
.getZeroExtendExpr(AR
->getStart(), Ty
),
14794 SE
.getSignExtendExpr(Step
, Ty
), L
,
14795 AR
->getNoWrapFlags());
14797 return SE
.getZeroExtendExpr(Operand
, Expr
->getType());
14800 const SCEV
*visitSignExtendExpr(const SCEVSignExtendExpr
*Expr
) {
14801 const SCEV
*Operand
= visit(Expr
->getOperand());
14802 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Operand
);
14803 if (AR
&& AR
->getLoop() == L
&& AR
->isAffine()) {
14804 // This couldn't be folded because the operand didn't have the nsw
14805 // flag. Add the nssw flag as an assumption that we could make.
14806 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
14807 Type
*Ty
= Expr
->getType();
14808 if (addOverflowAssumption(AR
, SCEVWrapPredicate::IncrementNSSW
))
14809 return SE
.getAddRecExpr(SE
.getSignExtendExpr(AR
->getStart(), Ty
),
14810 SE
.getSignExtendExpr(Step
, Ty
), L
,
14811 AR
->getNoWrapFlags());
14813 return SE
.getSignExtendExpr(Operand
, Expr
->getType());
14817 explicit SCEVPredicateRewriter(
14818 const Loop
*L
, ScalarEvolution
&SE
,
14819 SmallVectorImpl
<const SCEVPredicate
*> *NewPreds
,
14820 const SCEVPredicate
*Pred
)
14821 : SCEVRewriteVisitor(SE
), NewPreds(NewPreds
), Pred(Pred
), L(L
) {}
14823 bool addOverflowAssumption(const SCEVPredicate
*P
) {
14825 // Check if we've already made this assumption.
14826 return Pred
&& Pred
->implies(P
);
14828 NewPreds
->push_back(P
);
14832 bool addOverflowAssumption(const SCEVAddRecExpr
*AR
,
14833 SCEVWrapPredicate::IncrementWrapFlags AddedFlags
) {
14834 auto *A
= SE
.getWrapPredicate(AR
, AddedFlags
);
14835 return addOverflowAssumption(A
);
14838 // If \p Expr represents a PHINode, we try to see if it can be represented
14839 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible
14840 // to add this predicate as a runtime overflow check, we return the AddRec.
14841 // If \p Expr does not meet these conditions (is not a PHI node, or we
14842 // couldn't create an AddRec for it, or couldn't add the predicate), we just
14844 const SCEV
*convertToAddRecWithPreds(const SCEVUnknown
*Expr
) {
14845 if (!isa
<PHINode
>(Expr
->getValue()))
14848 std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
14849 PredicatedRewrite
= SE
.createAddRecFromPHIWithCasts(Expr
);
14850 if (!PredicatedRewrite
)
14852 for (const auto *P
: PredicatedRewrite
->second
){
14853 // Wrap predicates from outer loops are not supported.
14854 if (auto *WP
= dyn_cast
<const SCEVWrapPredicate
>(P
)) {
14855 if (L
!= WP
->getExpr()->getLoop())
14858 if (!addOverflowAssumption(P
))
14861 return PredicatedRewrite
->first
;
14864 SmallVectorImpl
<const SCEVPredicate
*> *NewPreds
;
14865 const SCEVPredicate
*Pred
;
14869 } // end anonymous namespace
14872 ScalarEvolution::rewriteUsingPredicate(const SCEV
*S
, const Loop
*L
,
14873 const SCEVPredicate
&Preds
) {
14874 return SCEVPredicateRewriter::rewrite(S
, L
, *this, nullptr, &Preds
);
14877 const SCEVAddRecExpr
*ScalarEvolution::convertSCEVToAddRecWithPredicates(
14878 const SCEV
*S
, const Loop
*L
,
14879 SmallVectorImpl
<const SCEVPredicate
*> &Preds
) {
14880 SmallVector
<const SCEVPredicate
*> TransformPreds
;
14881 S
= SCEVPredicateRewriter::rewrite(S
, L
, *this, &TransformPreds
, nullptr);
14882 auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(S
);
14887 // Since the transformation was successful, we can now transfer the SCEV
14889 Preds
.append(TransformPreds
.begin(), TransformPreds
.end());
14894 /// SCEV predicates
14895 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID
,
14896 SCEVPredicateKind Kind
)
14897 : FastID(ID
), Kind(Kind
) {}
14899 SCEVComparePredicate::SCEVComparePredicate(const FoldingSetNodeIDRef ID
,
14900 const ICmpInst::Predicate Pred
,
14901 const SCEV
*LHS
, const SCEV
*RHS
)
14902 : SCEVPredicate(ID
, P_Compare
), Pred(Pred
), LHS(LHS
), RHS(RHS
) {
14903 assert(LHS
->getType() == RHS
->getType() && "LHS and RHS types don't match");
14904 assert(LHS
!= RHS
&& "LHS and RHS are the same SCEV");
14907 bool SCEVComparePredicate::implies(const SCEVPredicate
*N
) const {
14908 const auto *Op
= dyn_cast
<SCEVComparePredicate
>(N
);
14913 if (Pred
!= ICmpInst::ICMP_EQ
)
14916 return Op
->LHS
== LHS
&& Op
->RHS
== RHS
;
14919 bool SCEVComparePredicate::isAlwaysTrue() const { return false; }
14921 void SCEVComparePredicate::print(raw_ostream
&OS
, unsigned Depth
) const {
14922 if (Pred
== ICmpInst::ICMP_EQ
)
14923 OS
.indent(Depth
) << "Equal predicate: " << *LHS
<< " == " << *RHS
<< "\n";
14925 OS
.indent(Depth
) << "Compare predicate: " << *LHS
<< " " << Pred
<< ") "
14930 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID
,
14931 const SCEVAddRecExpr
*AR
,
14932 IncrementWrapFlags Flags
)
14933 : SCEVPredicate(ID
, P_Wrap
), AR(AR
), Flags(Flags
) {}
14935 const SCEVAddRecExpr
*SCEVWrapPredicate::getExpr() const { return AR
; }
14937 bool SCEVWrapPredicate::implies(const SCEVPredicate
*N
) const {
14938 const auto *Op
= dyn_cast
<SCEVWrapPredicate
>(N
);
14940 return Op
&& Op
->AR
== AR
&& setFlags(Flags
, Op
->Flags
) == Flags
;
14943 bool SCEVWrapPredicate::isAlwaysTrue() const {
14944 SCEV::NoWrapFlags ScevFlags
= AR
->getNoWrapFlags();
14945 IncrementWrapFlags IFlags
= Flags
;
14947 if (ScalarEvolution::setFlags(ScevFlags
, SCEV::FlagNSW
) == ScevFlags
)
14948 IFlags
= clearFlags(IFlags
, IncrementNSSW
);
14950 return IFlags
== IncrementAnyWrap
;
14953 void SCEVWrapPredicate::print(raw_ostream
&OS
, unsigned Depth
) const {
14954 OS
.indent(Depth
) << *getExpr() << " Added Flags: ";
14955 if (SCEVWrapPredicate::IncrementNUSW
& getFlags())
14957 if (SCEVWrapPredicate::IncrementNSSW
& getFlags())
14962 SCEVWrapPredicate::IncrementWrapFlags
14963 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr
*AR
,
14964 ScalarEvolution
&SE
) {
14965 IncrementWrapFlags ImpliedFlags
= IncrementAnyWrap
;
14966 SCEV::NoWrapFlags StaticFlags
= AR
->getNoWrapFlags();
14968 // We can safely transfer the NSW flag as NSSW.
14969 if (ScalarEvolution::setFlags(StaticFlags
, SCEV::FlagNSW
) == StaticFlags
)
14970 ImpliedFlags
= IncrementNSSW
;
14972 if (ScalarEvolution::setFlags(StaticFlags
, SCEV::FlagNUW
) == StaticFlags
) {
14973 // If the increment is positive, the SCEV NUW flag will also imply the
14974 // WrapPredicate NUSW flag.
14975 if (const auto *Step
= dyn_cast
<SCEVConstant
>(AR
->getStepRecurrence(SE
)))
14976 if (Step
->getValue()->getValue().isNonNegative())
14977 ImpliedFlags
= setFlags(ImpliedFlags
, IncrementNUSW
);
14980 return ImpliedFlags
;
14983 /// Union predicates don't get cached so create a dummy set ID for it.
14984 SCEVUnionPredicate::SCEVUnionPredicate(ArrayRef
<const SCEVPredicate
*> Preds
)
14985 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union
) {
14986 for (const auto *P
: Preds
)
14990 bool SCEVUnionPredicate::isAlwaysTrue() const {
14991 return all_of(Preds
,
14992 [](const SCEVPredicate
*I
) { return I
->isAlwaysTrue(); });
14995 bool SCEVUnionPredicate::implies(const SCEVPredicate
*N
) const {
14996 if (const auto *Set
= dyn_cast
<SCEVUnionPredicate
>(N
))
14997 return all_of(Set
->Preds
,
14998 [this](const SCEVPredicate
*I
) { return this->implies(I
); });
15000 return any_of(Preds
,
15001 [N
](const SCEVPredicate
*I
) { return I
->implies(N
); });
15004 void SCEVUnionPredicate::print(raw_ostream
&OS
, unsigned Depth
) const {
15005 for (const auto *Pred
: Preds
)
15006 Pred
->print(OS
, Depth
);
15009 void SCEVUnionPredicate::add(const SCEVPredicate
*N
) {
15010 if (const auto *Set
= dyn_cast
<SCEVUnionPredicate
>(N
)) {
15011 for (const auto *Pred
: Set
->Preds
)
15016 // Only add predicate if it is not already implied by this union predicate.
15018 Preds
.push_back(N
);
15021 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution
&SE
,
15024 SmallVector
<const SCEVPredicate
*, 4> Empty
;
15025 Preds
= std::make_unique
<SCEVUnionPredicate
>(Empty
);
15028 void ScalarEvolution::registerUser(const SCEV
*User
,
15029 ArrayRef
<const SCEV
*> Ops
) {
15030 for (const auto *Op
: Ops
)
15031 // We do not expect that forgetting cached data for SCEVConstants will ever
15032 // open any prospects for sharpening or introduce any correctness issues,
15033 // so we don't bother storing their dependencies.
15034 if (!isa
<SCEVConstant
>(Op
))
15035 SCEVUsers
[Op
].insert(User
);
15038 const SCEV
*PredicatedScalarEvolution::getSCEV(Value
*V
) {
15039 const SCEV
*Expr
= SE
.getSCEV(V
);
15040 RewriteEntry
&Entry
= RewriteMap
[Expr
];
15042 // If we already have an entry and the version matches, return it.
15043 if (Entry
.second
&& Generation
== Entry
.first
)
15044 return Entry
.second
;
15046 // We found an entry but it's stale. Rewrite the stale entry
15047 // according to the current predicate.
15049 Expr
= Entry
.second
;
15051 const SCEV
*NewSCEV
= SE
.rewriteUsingPredicate(Expr
, &L
, *Preds
);
15052 Entry
= {Generation
, NewSCEV
};
15057 const SCEV
*PredicatedScalarEvolution::getBackedgeTakenCount() {
15058 if (!BackedgeCount
) {
15059 SmallVector
<const SCEVPredicate
*, 4> Preds
;
15060 BackedgeCount
= SE
.getPredicatedBackedgeTakenCount(&L
, Preds
);
15061 for (const auto *P
: Preds
)
15064 return BackedgeCount
;
15067 const SCEV
*PredicatedScalarEvolution::getSymbolicMaxBackedgeTakenCount() {
15068 if (!SymbolicMaxBackedgeCount
) {
15069 SmallVector
<const SCEVPredicate
*, 4> Preds
;
15070 SymbolicMaxBackedgeCount
=
15071 SE
.getPredicatedSymbolicMaxBackedgeTakenCount(&L
, Preds
);
15072 for (const auto *P
: Preds
)
15075 return SymbolicMaxBackedgeCount
;
15078 unsigned PredicatedScalarEvolution::getSmallConstantMaxTripCount() {
15079 if (!SmallConstantMaxTripCount
) {
15080 SmallVector
<const SCEVPredicate
*, 4> Preds
;
15081 SmallConstantMaxTripCount
= SE
.getSmallConstantMaxTripCount(&L
, &Preds
);
15082 for (const auto *P
: Preds
)
15085 return *SmallConstantMaxTripCount
;
15088 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate
&Pred
) {
15089 if (Preds
->implies(&Pred
))
15092 SmallVector
<const SCEVPredicate
*, 4> NewPreds(Preds
->getPredicates());
15093 NewPreds
.push_back(&Pred
);
15094 Preds
= std::make_unique
<SCEVUnionPredicate
>(NewPreds
);
15095 updateGeneration();
15098 const SCEVPredicate
&PredicatedScalarEvolution::getPredicate() const {
15102 void PredicatedScalarEvolution::updateGeneration() {
15103 // If the generation number wrapped recompute everything.
15104 if (++Generation
== 0) {
15105 for (auto &II
: RewriteMap
) {
15106 const SCEV
*Rewritten
= II
.second
.second
;
15107 II
.second
= {Generation
, SE
.rewriteUsingPredicate(Rewritten
, &L
, *Preds
)};
15112 void PredicatedScalarEvolution::setNoOverflow(
15113 Value
*V
, SCEVWrapPredicate::IncrementWrapFlags Flags
) {
15114 const SCEV
*Expr
= getSCEV(V
);
15115 const auto *AR
= cast
<SCEVAddRecExpr
>(Expr
);
15117 auto ImpliedFlags
= SCEVWrapPredicate::getImpliedFlags(AR
, SE
);
15119 // Clear the statically implied flags.
15120 Flags
= SCEVWrapPredicate::clearFlags(Flags
, ImpliedFlags
);
15121 addPredicate(*SE
.getWrapPredicate(AR
, Flags
));
15123 auto II
= FlagsMap
.insert({V
, Flags
});
15125 II
.first
->second
= SCEVWrapPredicate::setFlags(Flags
, II
.first
->second
);
15128 bool PredicatedScalarEvolution::hasNoOverflow(
15129 Value
*V
, SCEVWrapPredicate::IncrementWrapFlags Flags
) {
15130 const SCEV
*Expr
= getSCEV(V
);
15131 const auto *AR
= cast
<SCEVAddRecExpr
>(Expr
);
15133 Flags
= SCEVWrapPredicate::clearFlags(
15134 Flags
, SCEVWrapPredicate::getImpliedFlags(AR
, SE
));
15136 auto II
= FlagsMap
.find(V
);
15138 if (II
!= FlagsMap
.end())
15139 Flags
= SCEVWrapPredicate::clearFlags(Flags
, II
->second
);
15141 return Flags
== SCEVWrapPredicate::IncrementAnyWrap
;
15144 const SCEVAddRecExpr
*PredicatedScalarEvolution::getAsAddRec(Value
*V
) {
15145 const SCEV
*Expr
= this->getSCEV(V
);
15146 SmallVector
<const SCEVPredicate
*, 4> NewPreds
;
15147 auto *New
= SE
.convertSCEVToAddRecWithPredicates(Expr
, &L
, NewPreds
);
15152 for (const auto *P
: NewPreds
)
15155 RewriteMap
[SE
.getSCEV(V
)] = {Generation
, New
};
15159 PredicatedScalarEvolution::PredicatedScalarEvolution(
15160 const PredicatedScalarEvolution
&Init
)
15161 : RewriteMap(Init
.RewriteMap
), SE(Init
.SE
), L(Init
.L
),
15162 Preds(std::make_unique
<SCEVUnionPredicate
>(Init
.Preds
->getPredicates())),
15163 Generation(Init
.Generation
), BackedgeCount(Init
.BackedgeCount
) {
15164 for (auto I
: Init
.FlagsMap
)
15165 FlagsMap
.insert(I
);
15168 void PredicatedScalarEvolution::print(raw_ostream
&OS
, unsigned Depth
) const {
15170 for (auto *BB
: L
.getBlocks())
15171 for (auto &I
: *BB
) {
15172 if (!SE
.isSCEVable(I
.getType()))
15175 auto *Expr
= SE
.getSCEV(&I
);
15176 auto II
= RewriteMap
.find(Expr
);
15178 if (II
== RewriteMap
.end())
15181 // Don't print things that are not interesting.
15182 if (II
->second
.second
== Expr
)
15185 OS
.indent(Depth
) << "[PSE]" << I
<< ":\n";
15186 OS
.indent(Depth
+ 2) << *Expr
<< "\n";
15187 OS
.indent(Depth
+ 2) << "--> " << *II
->second
.second
<< "\n";
15191 // Match the mathematical pattern A - (A / B) * B, where A and B can be
15192 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used
15193 // for URem with constant power-of-2 second operands.
15194 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is
15195 // 4, A / B becomes X / 8).
15196 bool ScalarEvolution::matchURem(const SCEV
*Expr
, const SCEV
*&LHS
,
15197 const SCEV
*&RHS
) {
15198 if (Expr
->getType()->isPointerTy())
15201 // Try to match 'zext (trunc A to iB) to iY', which is used
15202 // for URem with constant power-of-2 second operands. Make sure the size of
15203 // the operand A matches the size of the whole expressions.
15204 if (const auto *ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(Expr
))
15205 if (const auto *Trunc
= dyn_cast
<SCEVTruncateExpr
>(ZExt
->getOperand(0))) {
15206 LHS
= Trunc
->getOperand();
15207 // Bail out if the type of the LHS is larger than the type of the
15208 // expression for now.
15209 if (getTypeSizeInBits(LHS
->getType()) >
15210 getTypeSizeInBits(Expr
->getType()))
15212 if (LHS
->getType() != Expr
->getType())
15213 LHS
= getZeroExtendExpr(LHS
, Expr
->getType());
15214 RHS
= getConstant(APInt(getTypeSizeInBits(Expr
->getType()), 1)
15215 << getTypeSizeInBits(Trunc
->getType()));
15218 const auto *Add
= dyn_cast
<SCEVAddExpr
>(Expr
);
15219 if (Add
== nullptr || Add
->getNumOperands() != 2)
15222 const SCEV
*A
= Add
->getOperand(1);
15223 const auto *Mul
= dyn_cast
<SCEVMulExpr
>(Add
->getOperand(0));
15225 if (Mul
== nullptr)
15228 const auto MatchURemWithDivisor
= [&](const SCEV
*B
) {
15229 // (SomeExpr + (-(SomeExpr / B) * B)).
15230 if (Expr
== getURemExpr(A
, B
)) {
15238 // (SomeExpr + (-1 * (SomeExpr / B) * B)).
15239 if (Mul
->getNumOperands() == 3 && isa
<SCEVConstant
>(Mul
->getOperand(0)))
15240 return MatchURemWithDivisor(Mul
->getOperand(1)) ||
15241 MatchURemWithDivisor(Mul
->getOperand(2));
15243 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)).
15244 if (Mul
->getNumOperands() == 2)
15245 return MatchURemWithDivisor(Mul
->getOperand(1)) ||
15246 MatchURemWithDivisor(Mul
->getOperand(0)) ||
15247 MatchURemWithDivisor(getNegativeSCEV(Mul
->getOperand(1))) ||
15248 MatchURemWithDivisor(getNegativeSCEV(Mul
->getOperand(0)));
15252 ScalarEvolution::LoopGuards
15253 ScalarEvolution::LoopGuards::collect(const Loop
*L
, ScalarEvolution
&SE
) {
15254 BasicBlock
*Header
= L
->getHeader();
15255 BasicBlock
*Pred
= L
->getLoopPredecessor();
15256 LoopGuards
Guards(SE
);
15257 SmallPtrSet
<const BasicBlock
*, 8> VisitedBlocks
;
15258 collectFromBlock(SE
, Guards
, Header
, Pred
, VisitedBlocks
);
15262 void ScalarEvolution::LoopGuards::collectFromPHI(
15263 ScalarEvolution
&SE
, ScalarEvolution::LoopGuards
&Guards
,
15264 const PHINode
&Phi
, SmallPtrSetImpl
<const BasicBlock
*> &VisitedBlocks
,
15265 SmallDenseMap
<const BasicBlock
*, LoopGuards
> &IncomingGuards
,
15267 if (!SE
.isSCEVable(Phi
.getType()))
15270 using MinMaxPattern
= std::pair
<const SCEVConstant
*, SCEVTypes
>;
15271 auto GetMinMaxConst
= [&](unsigned IncomingIdx
) -> MinMaxPattern
{
15272 const BasicBlock
*InBlock
= Phi
.getIncomingBlock(IncomingIdx
);
15273 if (!VisitedBlocks
.insert(InBlock
).second
)
15274 return {nullptr, scCouldNotCompute
};
15275 auto [G
, Inserted
] = IncomingGuards
.try_emplace(InBlock
, LoopGuards(SE
));
15277 collectFromBlock(SE
, G
->second
, Phi
.getParent(), InBlock
, VisitedBlocks
,
15279 auto &RewriteMap
= G
->second
.RewriteMap
;
15280 if (RewriteMap
.empty())
15281 return {nullptr, scCouldNotCompute
};
15282 auto S
= RewriteMap
.find(SE
.getSCEV(Phi
.getIncomingValue(IncomingIdx
)));
15283 if (S
== RewriteMap
.end())
15284 return {nullptr, scCouldNotCompute
};
15285 auto *SM
= dyn_cast_if_present
<SCEVMinMaxExpr
>(S
->second
);
15287 return {nullptr, scCouldNotCompute
};
15288 if (const SCEVConstant
*C0
= dyn_cast
<SCEVConstant
>(SM
->getOperand(0)))
15289 return {C0
, SM
->getSCEVType()};
15290 return {nullptr, scCouldNotCompute
};
15292 auto MergeMinMaxConst
= [](MinMaxPattern P1
,
15293 MinMaxPattern P2
) -> MinMaxPattern
{
15294 auto [C1
, T1
] = P1
;
15295 auto [C2
, T2
] = P2
;
15296 if (!C1
|| !C2
|| T1
!= T2
)
15297 return {nullptr, scCouldNotCompute
};
15300 return {C1
->getAPInt().ult(C2
->getAPInt()) ? C1
: C2
, T1
};
15302 return {C1
->getAPInt().slt(C2
->getAPInt()) ? C1
: C2
, T1
};
15304 return {C1
->getAPInt().ugt(C2
->getAPInt()) ? C1
: C2
, T1
};
15306 return {C1
->getAPInt().sgt(C2
->getAPInt()) ? C1
: C2
, T1
};
15308 llvm_unreachable("Trying to merge non-MinMaxExpr SCEVs.");
15311 auto P
= GetMinMaxConst(0);
15312 for (unsigned int In
= 1; In
< Phi
.getNumIncomingValues(); In
++) {
15315 P
= MergeMinMaxConst(P
, GetMinMaxConst(In
));
15318 const SCEV
*LHS
= SE
.getSCEV(const_cast<PHINode
*>(&Phi
));
15319 SmallVector
<const SCEV
*, 2> Ops({P
.first
, LHS
});
15320 const SCEV
*RHS
= SE
.getMinMaxExpr(P
.second
, Ops
);
15321 Guards
.RewriteMap
.insert({LHS
, RHS
});
15325 void ScalarEvolution::LoopGuards::collectFromBlock(
15326 ScalarEvolution
&SE
, ScalarEvolution::LoopGuards
&Guards
,
15327 const BasicBlock
*Block
, const BasicBlock
*Pred
,
15328 SmallPtrSetImpl
<const BasicBlock
*> &VisitedBlocks
, unsigned Depth
) {
15329 SmallVector
<const SCEV
*> ExprsToRewrite
;
15330 auto CollectCondition
= [&](ICmpInst::Predicate Predicate
, const SCEV
*LHS
,
15332 DenseMap
<const SCEV
*, const SCEV
*>
15334 // WARNING: It is generally unsound to apply any wrap flags to the proposed
15335 // replacement SCEV which isn't directly implied by the structure of that
15336 // SCEV. In particular, using contextual facts to imply flags is *NOT*
15337 // legal. See the scoping rules for flags in the header to understand why.
15339 // If LHS is a constant, apply information to the other expression.
15340 if (isa
<SCEVConstant
>(LHS
)) {
15341 std::swap(LHS
, RHS
);
15342 Predicate
= CmpInst::getSwappedPredicate(Predicate
);
15345 // Check for a condition of the form (-C1 + X < C2). InstCombine will
15346 // create this form when combining two checks of the form (X u< C2 + C1) and
15348 auto MatchRangeCheckIdiom
= [&SE
, Predicate
, LHS
, RHS
, &RewriteMap
,
15349 &ExprsToRewrite
]() {
15350 auto *AddExpr
= dyn_cast
<SCEVAddExpr
>(LHS
);
15351 if (!AddExpr
|| AddExpr
->getNumOperands() != 2)
15354 auto *C1
= dyn_cast
<SCEVConstant
>(AddExpr
->getOperand(0));
15355 auto *LHSUnknown
= dyn_cast
<SCEVUnknown
>(AddExpr
->getOperand(1));
15356 auto *C2
= dyn_cast
<SCEVConstant
>(RHS
);
15357 if (!C1
|| !C2
|| !LHSUnknown
)
15361 ConstantRange::makeExactICmpRegion(Predicate
, C2
->getAPInt())
15362 .sub(C1
->getAPInt());
15364 // Bail out, unless we have a non-wrapping, monotonic range.
15365 if (ExactRegion
.isWrappedSet() || ExactRegion
.isFullSet())
15367 auto I
= RewriteMap
.find(LHSUnknown
);
15368 const SCEV
*RewrittenLHS
= I
!= RewriteMap
.end() ? I
->second
: LHSUnknown
;
15369 RewriteMap
[LHSUnknown
] = SE
.getUMaxExpr(
15370 SE
.getConstant(ExactRegion
.getUnsignedMin()),
15371 SE
.getUMinExpr(RewrittenLHS
,
15372 SE
.getConstant(ExactRegion
.getUnsignedMax())));
15373 ExprsToRewrite
.push_back(LHSUnknown
);
15376 if (MatchRangeCheckIdiom())
15379 // Return true if \p Expr is a MinMax SCEV expression with a non-negative
15380 // constant operand. If so, return in \p SCTy the SCEV type and in \p RHS
15381 // the non-constant operand and in \p LHS the constant operand.
15382 auto IsMinMaxSCEVWithNonNegativeConstant
=
15383 [&](const SCEV
*Expr
, SCEVTypes
&SCTy
, const SCEV
*&LHS
,
15384 const SCEV
*&RHS
) {
15385 if (auto *MinMax
= dyn_cast
<SCEVMinMaxExpr
>(Expr
)) {
15386 if (MinMax
->getNumOperands() != 2)
15388 if (auto *C
= dyn_cast
<SCEVConstant
>(MinMax
->getOperand(0))) {
15389 if (C
->getAPInt().isNegative())
15391 SCTy
= MinMax
->getSCEVType();
15392 LHS
= MinMax
->getOperand(0);
15393 RHS
= MinMax
->getOperand(1);
15400 // Checks whether Expr is a non-negative constant, and Divisor is a positive
15401 // constant, and returns their APInt in ExprVal and in DivisorVal.
15402 auto GetNonNegExprAndPosDivisor
= [&](const SCEV
*Expr
, const SCEV
*Divisor
,
15403 APInt
&ExprVal
, APInt
&DivisorVal
) {
15404 auto *ConstExpr
= dyn_cast
<SCEVConstant
>(Expr
);
15405 auto *ConstDivisor
= dyn_cast
<SCEVConstant
>(Divisor
);
15406 if (!ConstExpr
|| !ConstDivisor
)
15408 ExprVal
= ConstExpr
->getAPInt();
15409 DivisorVal
= ConstDivisor
->getAPInt();
15410 return ExprVal
.isNonNegative() && !DivisorVal
.isNonPositive();
15413 // Return a new SCEV that modifies \p Expr to the closest number divides by
15414 // \p Divisor and greater or equal than Expr.
15415 // For now, only handle constant Expr and Divisor.
15416 auto GetNextSCEVDividesByDivisor
= [&](const SCEV
*Expr
,
15417 const SCEV
*Divisor
) {
15420 if (!GetNonNegExprAndPosDivisor(Expr
, Divisor
, ExprVal
, DivisorVal
))
15422 APInt Rem
= ExprVal
.urem(DivisorVal
);
15424 // return the SCEV: Expr + Divisor - Expr % Divisor
15425 return SE
.getConstant(ExprVal
+ DivisorVal
- Rem
);
15429 // Return a new SCEV that modifies \p Expr to the closest number divides by
15430 // \p Divisor and less or equal than Expr.
15431 // For now, only handle constant Expr and Divisor.
15432 auto GetPreviousSCEVDividesByDivisor
= [&](const SCEV
*Expr
,
15433 const SCEV
*Divisor
) {
15436 if (!GetNonNegExprAndPosDivisor(Expr
, Divisor
, ExprVal
, DivisorVal
))
15438 APInt Rem
= ExprVal
.urem(DivisorVal
);
15439 // return the SCEV: Expr - Expr % Divisor
15440 return SE
.getConstant(ExprVal
- Rem
);
15443 // Apply divisibilty by \p Divisor on MinMaxExpr with constant values,
15444 // recursively. This is done by aligning up/down the constant value to the
15446 std::function
<const SCEV
*(const SCEV
*, const SCEV
*)>
15447 ApplyDivisibiltyOnMinMaxExpr
= [&](const SCEV
*MinMaxExpr
,
15448 const SCEV
*Divisor
) {
15449 const SCEV
*MinMaxLHS
= nullptr, *MinMaxRHS
= nullptr;
15451 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr
, SCTy
, MinMaxLHS
,
15455 isa
<SCEVSMinExpr
>(MinMaxExpr
) || isa
<SCEVUMinExpr
>(MinMaxExpr
);
15456 assert(SE
.isKnownNonNegative(MinMaxLHS
) &&
15457 "Expected non-negative operand!");
15458 auto *DivisibleExpr
=
15459 IsMin
? GetPreviousSCEVDividesByDivisor(MinMaxLHS
, Divisor
)
15460 : GetNextSCEVDividesByDivisor(MinMaxLHS
, Divisor
);
15461 SmallVector
<const SCEV
*> Ops
= {
15462 ApplyDivisibiltyOnMinMaxExpr(MinMaxRHS
, Divisor
), DivisibleExpr
};
15463 return SE
.getMinMaxExpr(SCTy
, Ops
);
15466 // If we have LHS == 0, check if LHS is computing a property of some unknown
15467 // SCEV %v which we can rewrite %v to express explicitly.
15468 const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
);
15469 if (Predicate
== CmpInst::ICMP_EQ
&& RHSC
&&
15470 RHSC
->getValue()->isNullValue()) {
15471 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to
15472 // explicitly express that.
15473 const SCEV
*URemLHS
= nullptr;
15474 const SCEV
*URemRHS
= nullptr;
15475 if (SE
.matchURem(LHS
, URemLHS
, URemRHS
)) {
15476 if (const SCEVUnknown
*LHSUnknown
= dyn_cast
<SCEVUnknown
>(URemLHS
)) {
15477 auto I
= RewriteMap
.find(LHSUnknown
);
15478 const SCEV
*RewrittenLHS
=
15479 I
!= RewriteMap
.end() ? I
->second
: LHSUnknown
;
15480 RewrittenLHS
= ApplyDivisibiltyOnMinMaxExpr(RewrittenLHS
, URemRHS
);
15481 const auto *Multiple
=
15482 SE
.getMulExpr(SE
.getUDivExpr(RewrittenLHS
, URemRHS
), URemRHS
);
15483 RewriteMap
[LHSUnknown
] = Multiple
;
15484 ExprsToRewrite
.push_back(LHSUnknown
);
15490 // Do not apply information for constants or if RHS contains an AddRec.
15491 if (isa
<SCEVConstant
>(LHS
) || SE
.containsAddRecurrence(RHS
))
15494 // If RHS is SCEVUnknown, make sure the information is applied to it.
15495 if (!isa
<SCEVUnknown
>(LHS
) && isa
<SCEVUnknown
>(RHS
)) {
15496 std::swap(LHS
, RHS
);
15497 Predicate
= CmpInst::getSwappedPredicate(Predicate
);
15500 // Puts rewrite rule \p From -> \p To into the rewrite map. Also if \p From
15501 // and \p FromRewritten are the same (i.e. there has been no rewrite
15502 // registered for \p From), then puts this value in the list of rewritten
15504 auto AddRewrite
= [&](const SCEV
*From
, const SCEV
*FromRewritten
,
15506 if (From
== FromRewritten
)
15507 ExprsToRewrite
.push_back(From
);
15508 RewriteMap
[From
] = To
;
15511 // Checks whether \p S has already been rewritten. In that case returns the
15512 // existing rewrite because we want to chain further rewrites onto the
15513 // already rewritten value. Otherwise returns \p S.
15514 auto GetMaybeRewritten
= [&](const SCEV
*S
) {
15515 auto I
= RewriteMap
.find(S
);
15516 return I
!= RewriteMap
.end() ? I
->second
: S
;
15519 // Check for the SCEV expression (A /u B) * B while B is a constant, inside
15520 // \p Expr. The check is done recuresively on \p Expr, which is assumed to
15521 // be a composition of Min/Max SCEVs. Return whether the SCEV expression (A
15522 // /u B) * B was found, and return the divisor B in \p DividesBy. For
15523 // example, if Expr = umin (umax ((A /u 8) * 8, 16), 64), return true since
15524 // (A /u 8) * 8 matched the pattern, and return the constant SCEV 8 in \p
15526 std::function
<bool(const SCEV
*, const SCEV
*&)> HasDivisibiltyInfo
=
15527 [&](const SCEV
*Expr
, const SCEV
*&DividesBy
) {
15528 if (auto *Mul
= dyn_cast
<SCEVMulExpr
>(Expr
)) {
15529 if (Mul
->getNumOperands() != 2)
15531 auto *MulLHS
= Mul
->getOperand(0);
15532 auto *MulRHS
= Mul
->getOperand(1);
15533 if (isa
<SCEVConstant
>(MulLHS
))
15534 std::swap(MulLHS
, MulRHS
);
15535 if (auto *Div
= dyn_cast
<SCEVUDivExpr
>(MulLHS
))
15536 if (Div
->getOperand(1) == MulRHS
) {
15537 DividesBy
= MulRHS
;
15541 if (auto *MinMax
= dyn_cast
<SCEVMinMaxExpr
>(Expr
))
15542 return HasDivisibiltyInfo(MinMax
->getOperand(0), DividesBy
) ||
15543 HasDivisibiltyInfo(MinMax
->getOperand(1), DividesBy
);
15547 // Return true if Expr known to divide by \p DividesBy.
15548 std::function
<bool(const SCEV
*, const SCEV
*&)> IsKnownToDivideBy
=
15549 [&](const SCEV
*Expr
, const SCEV
*DividesBy
) {
15550 if (SE
.getURemExpr(Expr
, DividesBy
)->isZero())
15552 if (auto *MinMax
= dyn_cast
<SCEVMinMaxExpr
>(Expr
))
15553 return IsKnownToDivideBy(MinMax
->getOperand(0), DividesBy
) &&
15554 IsKnownToDivideBy(MinMax
->getOperand(1), DividesBy
);
15558 const SCEV
*RewrittenLHS
= GetMaybeRewritten(LHS
);
15559 const SCEV
*DividesBy
= nullptr;
15560 if (HasDivisibiltyInfo(RewrittenLHS
, DividesBy
))
15561 // Check that the whole expression is divided by DividesBy
15563 IsKnownToDivideBy(RewrittenLHS
, DividesBy
) ? DividesBy
: nullptr;
15565 // Collect rewrites for LHS and its transitive operands based on the
15567 // For min/max expressions, also apply the guard to its operands:
15568 // 'min(a, b) >= c' -> '(a >= c) and (b >= c)',
15569 // 'min(a, b) > c' -> '(a > c) and (b > c)',
15570 // 'max(a, b) <= c' -> '(a <= c) and (b <= c)',
15571 // 'max(a, b) < c' -> '(a < c) and (b < c)'.
15573 // We cannot express strict predicates in SCEV, so instead we replace them
15574 // with non-strict ones against plus or minus one of RHS depending on the
15576 const SCEV
*One
= SE
.getOne(RHS
->getType());
15577 switch (Predicate
) {
15578 case CmpInst::ICMP_ULT
:
15579 if (RHS
->getType()->isPointerTy())
15581 RHS
= SE
.getUMaxExpr(RHS
, One
);
15583 case CmpInst::ICMP_SLT
: {
15584 RHS
= SE
.getMinusSCEV(RHS
, One
);
15585 RHS
= DividesBy
? GetPreviousSCEVDividesByDivisor(RHS
, DividesBy
) : RHS
;
15588 case CmpInst::ICMP_UGT
:
15589 case CmpInst::ICMP_SGT
:
15590 RHS
= SE
.getAddExpr(RHS
, One
);
15591 RHS
= DividesBy
? GetNextSCEVDividesByDivisor(RHS
, DividesBy
) : RHS
;
15593 case CmpInst::ICMP_ULE
:
15594 case CmpInst::ICMP_SLE
:
15595 RHS
= DividesBy
? GetPreviousSCEVDividesByDivisor(RHS
, DividesBy
) : RHS
;
15597 case CmpInst::ICMP_UGE
:
15598 case CmpInst::ICMP_SGE
:
15599 RHS
= DividesBy
? GetNextSCEVDividesByDivisor(RHS
, DividesBy
) : RHS
;
15605 SmallVector
<const SCEV
*, 16> Worklist(1, LHS
);
15606 SmallPtrSet
<const SCEV
*, 16> Visited
;
15608 auto EnqueueOperands
= [&Worklist
](const SCEVNAryExpr
*S
) {
15609 append_range(Worklist
, S
->operands());
15612 while (!Worklist
.empty()) {
15613 const SCEV
*From
= Worklist
.pop_back_val();
15614 if (isa
<SCEVConstant
>(From
))
15616 if (!Visited
.insert(From
).second
)
15618 const SCEV
*FromRewritten
= GetMaybeRewritten(From
);
15619 const SCEV
*To
= nullptr;
15621 switch (Predicate
) {
15622 case CmpInst::ICMP_ULT
:
15623 case CmpInst::ICMP_ULE
:
15624 To
= SE
.getUMinExpr(FromRewritten
, RHS
);
15625 if (auto *UMax
= dyn_cast
<SCEVUMaxExpr
>(FromRewritten
))
15626 EnqueueOperands(UMax
);
15628 case CmpInst::ICMP_SLT
:
15629 case CmpInst::ICMP_SLE
:
15630 To
= SE
.getSMinExpr(FromRewritten
, RHS
);
15631 if (auto *SMax
= dyn_cast
<SCEVSMaxExpr
>(FromRewritten
))
15632 EnqueueOperands(SMax
);
15634 case CmpInst::ICMP_UGT
:
15635 case CmpInst::ICMP_UGE
:
15636 To
= SE
.getUMaxExpr(FromRewritten
, RHS
);
15637 if (auto *UMin
= dyn_cast
<SCEVUMinExpr
>(FromRewritten
))
15638 EnqueueOperands(UMin
);
15640 case CmpInst::ICMP_SGT
:
15641 case CmpInst::ICMP_SGE
:
15642 To
= SE
.getSMaxExpr(FromRewritten
, RHS
);
15643 if (auto *SMin
= dyn_cast
<SCEVSMinExpr
>(FromRewritten
))
15644 EnqueueOperands(SMin
);
15646 case CmpInst::ICMP_EQ
:
15647 if (isa
<SCEVConstant
>(RHS
))
15650 case CmpInst::ICMP_NE
:
15651 if (isa
<SCEVConstant
>(RHS
) &&
15652 cast
<SCEVConstant
>(RHS
)->getValue()->isNullValue()) {
15653 const SCEV
*OneAlignedUp
=
15654 DividesBy
? GetNextSCEVDividesByDivisor(One
, DividesBy
) : One
;
15655 To
= SE
.getUMaxExpr(FromRewritten
, OneAlignedUp
);
15663 AddRewrite(From
, FromRewritten
, To
);
15667 SmallVector
<PointerIntPair
<Value
*, 1, bool>> Terms
;
15668 // First, collect information from assumptions dominating the loop.
15669 for (auto &AssumeVH
: SE
.AC
.assumptions()) {
15672 auto *AssumeI
= cast
<CallInst
>(AssumeVH
);
15673 if (!SE
.DT
.dominates(AssumeI
, Block
))
15675 Terms
.emplace_back(AssumeI
->getOperand(0), true);
15678 // Second, collect information from llvm.experimental.guards dominating the loop.
15679 auto *GuardDecl
= Intrinsic::getDeclarationIfExists(
15680 SE
.F
.getParent(), Intrinsic::experimental_guard
);
15682 for (const auto *GU
: GuardDecl
->users())
15683 if (const auto *Guard
= dyn_cast
<IntrinsicInst
>(GU
))
15684 if (Guard
->getFunction() == Block
->getParent() &&
15685 SE
.DT
.dominates(Guard
, Block
))
15686 Terms
.emplace_back(Guard
->getArgOperand(0), true);
15688 // Third, collect conditions from dominating branches. Starting at the loop
15689 // predecessor, climb up the predecessor chain, as long as there are
15690 // predecessors that can be found that have unique successors leading to the
15691 // original header.
15692 // TODO: share this logic with isLoopEntryGuardedByCond.
15693 std::pair
<const BasicBlock
*, const BasicBlock
*> Pair(Pred
, Block
);
15695 Pair
= SE
.getPredecessorWithUniqueSuccessorForBB(Pair
.first
)) {
15696 VisitedBlocks
.insert(Pair
.second
);
15697 const BranchInst
*LoopEntryPredicate
=
15698 dyn_cast
<BranchInst
>(Pair
.first
->getTerminator());
15699 if (!LoopEntryPredicate
|| LoopEntryPredicate
->isUnconditional())
15702 Terms
.emplace_back(LoopEntryPredicate
->getCondition(),
15703 LoopEntryPredicate
->getSuccessor(0) == Pair
.second
);
15705 // If we are recursively collecting guards stop after 2
15706 // predecessors to limit compile-time impact for now.
15707 if (Depth
> 0 && Terms
.size() == 2)
15710 // Finally, if we stopped climbing the predecessor chain because
15711 // there wasn't a unique one to continue, try to collect conditions
15712 // for PHINodes by recursively following all of their incoming
15713 // blocks and try to merge the found conditions to build a new one
15715 if (Pair
.second
->hasNPredecessorsOrMore(2) &&
15716 Depth
< MaxLoopGuardCollectionDepth
) {
15717 SmallDenseMap
<const BasicBlock
*, LoopGuards
> IncomingGuards
;
15718 for (auto &Phi
: Pair
.second
->phis())
15719 collectFromPHI(SE
, Guards
, Phi
, VisitedBlocks
, IncomingGuards
, Depth
);
15722 // Now apply the information from the collected conditions to
15723 // Guards.RewriteMap. Conditions are processed in reverse order, so the
15724 // earliest conditions is processed first. This ensures the SCEVs with the
15725 // shortest dependency chains are constructed first.
15726 for (auto [Term
, EnterIfTrue
] : reverse(Terms
)) {
15727 SmallVector
<Value
*, 8> Worklist
;
15728 SmallPtrSet
<Value
*, 8> Visited
;
15729 Worklist
.push_back(Term
);
15730 while (!Worklist
.empty()) {
15731 Value
*Cond
= Worklist
.pop_back_val();
15732 if (!Visited
.insert(Cond
).second
)
15735 if (auto *Cmp
= dyn_cast
<ICmpInst
>(Cond
)) {
15737 EnterIfTrue
? Cmp
->getPredicate() : Cmp
->getInversePredicate();
15738 const auto *LHS
= SE
.getSCEV(Cmp
->getOperand(0));
15739 const auto *RHS
= SE
.getSCEV(Cmp
->getOperand(1));
15740 CollectCondition(Predicate
, LHS
, RHS
, Guards
.RewriteMap
);
15745 if (EnterIfTrue
? match(Cond
, m_LogicalAnd(m_Value(L
), m_Value(R
)))
15746 : match(Cond
, m_LogicalOr(m_Value(L
), m_Value(R
)))) {
15747 Worklist
.push_back(L
);
15748 Worklist
.push_back(R
);
15753 // Let the rewriter preserve NUW/NSW flags if the unsigned/signed ranges of
15754 // the replacement expressions are contained in the ranges of the replaced
15756 Guards
.PreserveNUW
= true;
15757 Guards
.PreserveNSW
= true;
15758 for (const SCEV
*Expr
: ExprsToRewrite
) {
15759 const SCEV
*RewriteTo
= Guards
.RewriteMap
[Expr
];
15760 Guards
.PreserveNUW
&=
15761 SE
.getUnsignedRange(Expr
).contains(SE
.getUnsignedRange(RewriteTo
));
15762 Guards
.PreserveNSW
&=
15763 SE
.getSignedRange(Expr
).contains(SE
.getSignedRange(RewriteTo
));
15766 // Now that all rewrite information is collect, rewrite the collected
15767 // expressions with the information in the map. This applies information to
15768 // sub-expressions.
15769 if (ExprsToRewrite
.size() > 1) {
15770 for (const SCEV
*Expr
: ExprsToRewrite
) {
15771 const SCEV
*RewriteTo
= Guards
.RewriteMap
[Expr
];
15772 Guards
.RewriteMap
.erase(Expr
);
15773 Guards
.RewriteMap
.insert({Expr
, Guards
.rewrite(RewriteTo
)});
15778 const SCEV
*ScalarEvolution::LoopGuards::rewrite(const SCEV
*Expr
) const {
15779 /// A rewriter to replace SCEV expressions in Map with the corresponding entry
15780 /// in the map. It skips AddRecExpr because we cannot guarantee that the
15781 /// replacement is loop invariant in the loop of the AddRec.
15782 class SCEVLoopGuardRewriter
15783 : public SCEVRewriteVisitor
<SCEVLoopGuardRewriter
> {
15784 const DenseMap
<const SCEV
*, const SCEV
*> &Map
;
15786 SCEV::NoWrapFlags FlagMask
= SCEV::FlagAnyWrap
;
15789 SCEVLoopGuardRewriter(ScalarEvolution
&SE
,
15790 const ScalarEvolution::LoopGuards
&Guards
)
15791 : SCEVRewriteVisitor(SE
), Map(Guards
.RewriteMap
) {
15792 if (Guards
.PreserveNUW
)
15793 FlagMask
= ScalarEvolution::setFlags(FlagMask
, SCEV::FlagNUW
);
15794 if (Guards
.PreserveNSW
)
15795 FlagMask
= ScalarEvolution::setFlags(FlagMask
, SCEV::FlagNSW
);
15798 const SCEV
*visitAddRecExpr(const SCEVAddRecExpr
*Expr
) { return Expr
; }
15800 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
15801 auto I
= Map
.find(Expr
);
15802 if (I
== Map
.end())
15807 const SCEV
*visitZeroExtendExpr(const SCEVZeroExtendExpr
*Expr
) {
15808 auto I
= Map
.find(Expr
);
15809 if (I
== Map
.end()) {
15810 // If we didn't find the extact ZExt expr in the map, check if there's
15811 // an entry for a smaller ZExt we can use instead.
15812 Type
*Ty
= Expr
->getType();
15813 const SCEV
*Op
= Expr
->getOperand(0);
15814 unsigned Bitwidth
= Ty
->getScalarSizeInBits() / 2;
15815 while (Bitwidth
% 8 == 0 && Bitwidth
>= 8 &&
15816 Bitwidth
> Op
->getType()->getScalarSizeInBits()) {
15817 Type
*NarrowTy
= IntegerType::get(SE
.getContext(), Bitwidth
);
15818 auto *NarrowExt
= SE
.getZeroExtendExpr(Op
, NarrowTy
);
15819 auto I
= Map
.find(NarrowExt
);
15820 if (I
!= Map
.end())
15821 return SE
.getZeroExtendExpr(I
->second
, Ty
);
15822 Bitwidth
= Bitwidth
/ 2;
15825 return SCEVRewriteVisitor
<SCEVLoopGuardRewriter
>::visitZeroExtendExpr(
15831 const SCEV
*visitSignExtendExpr(const SCEVSignExtendExpr
*Expr
) {
15832 auto I
= Map
.find(Expr
);
15833 if (I
== Map
.end())
15834 return SCEVRewriteVisitor
<SCEVLoopGuardRewriter
>::visitSignExtendExpr(
15839 const SCEV
*visitUMinExpr(const SCEVUMinExpr
*Expr
) {
15840 auto I
= Map
.find(Expr
);
15841 if (I
== Map
.end())
15842 return SCEVRewriteVisitor
<SCEVLoopGuardRewriter
>::visitUMinExpr(Expr
);
15846 const SCEV
*visitSMinExpr(const SCEVSMinExpr
*Expr
) {
15847 auto I
= Map
.find(Expr
);
15848 if (I
== Map
.end())
15849 return SCEVRewriteVisitor
<SCEVLoopGuardRewriter
>::visitSMinExpr(Expr
);
15853 const SCEV
*visitAddExpr(const SCEVAddExpr
*Expr
) {
15854 SmallVector
<const SCEV
*, 2> Operands
;
15855 bool Changed
= false;
15856 for (const auto *Op
: Expr
->operands()) {
15857 Operands
.push_back(
15858 SCEVRewriteVisitor
<SCEVLoopGuardRewriter
>::visit(Op
));
15859 Changed
|= Op
!= Operands
.back();
15861 // We are only replacing operands with equivalent values, so transfer the
15862 // flags from the original expression.
15863 return !Changed
? Expr
15864 : SE
.getAddExpr(Operands
,
15865 ScalarEvolution::maskFlags(
15866 Expr
->getNoWrapFlags(), FlagMask
));
15869 const SCEV
*visitMulExpr(const SCEVMulExpr
*Expr
) {
15870 SmallVector
<const SCEV
*, 2> Operands
;
15871 bool Changed
= false;
15872 for (const auto *Op
: Expr
->operands()) {
15873 Operands
.push_back(
15874 SCEVRewriteVisitor
<SCEVLoopGuardRewriter
>::visit(Op
));
15875 Changed
|= Op
!= Operands
.back();
15877 // We are only replacing operands with equivalent values, so transfer the
15878 // flags from the original expression.
15879 return !Changed
? Expr
15880 : SE
.getMulExpr(Operands
,
15881 ScalarEvolution::maskFlags(
15882 Expr
->getNoWrapFlags(), FlagMask
));
15886 if (RewriteMap
.empty())
15889 SCEVLoopGuardRewriter
Rewriter(SE
, *this);
15890 return Rewriter
.visit(Expr
);
15893 const SCEV
*ScalarEvolution::applyLoopGuards(const SCEV
*Expr
, const Loop
*L
) {
15894 return applyLoopGuards(Expr
, LoopGuards::collect(L
, *this));
15897 const SCEV
*ScalarEvolution::applyLoopGuards(const SCEV
*Expr
,
15898 const LoopGuards
&Guards
) {
15899 return Guards
.rewrite(Expr
);