1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
14 // There are several aspects to this library. First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle. We only create one SCEV of a particular shape, so
18 // pointer-comparisons for equality are legal.
20 // One important aspect of the SCEV objects is that they are never cyclic, even
21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
23 // recurrence) then we represent it directly as a recurrence node, otherwise we
24 // represent it as a SCEVUnknown node.
26 // In addition to being able to represent expressions of various types, we also
27 // have folders that are used to build the *canonical* representation for a
28 // particular expression. These folders are capable of using a variety of
29 // rewrite rules to simplify the expressions.
31 // Once the folders are defined, we can implement the more interesting
32 // higher-level code, such as the code that recognizes PHI nodes of various
33 // types, computes the execution count of a loop, etc.
35 // TODO: We should use these routines and value representations to implement
36 // dependence analysis!
38 //===----------------------------------------------------------------------===//
40 // There are several good references for the techniques used in this analysis.
42 // Chains of recurrences -- a method to expedite the evaluation
43 // of closed-form functions
44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
46 // On computational properties of chains of recurrences
49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50 // Robert A. van Engelen
52 // Efficient Symbolic Analysis for Optimizing Compilers
53 // Robert A. van Engelen
55 // Using the chains of recurrences algebra for data dependence testing and
56 // induction variable substitution
57 // MS Thesis, Johnie Birch
59 //===----------------------------------------------------------------------===//
61 #include "llvm/Analysis/ScalarEvolution.h"
62 #include "llvm/ADT/APInt.h"
63 #include "llvm/ADT/ArrayRef.h"
64 #include "llvm/ADT/DenseMap.h"
65 #include "llvm/ADT/DepthFirstIterator.h"
66 #include "llvm/ADT/EquivalenceClasses.h"
67 #include "llvm/ADT/FoldingSet.h"
68 #include "llvm/ADT/None.h"
69 #include "llvm/ADT/Optional.h"
70 #include "llvm/ADT/STLExtras.h"
71 #include "llvm/ADT/ScopeExit.h"
72 #include "llvm/ADT/Sequence.h"
73 #include "llvm/ADT/SetVector.h"
74 #include "llvm/ADT/SmallPtrSet.h"
75 #include "llvm/ADT/SmallSet.h"
76 #include "llvm/ADT/SmallVector.h"
77 #include "llvm/ADT/Statistic.h"
78 #include "llvm/ADT/StringRef.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/ConstantFolding.h"
81 #include "llvm/Analysis/InstructionSimplify.h"
82 #include "llvm/Analysis/LoopInfo.h"
83 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
84 #include "llvm/Analysis/TargetLibraryInfo.h"
85 #include "llvm/Analysis/ValueTracking.h"
86 #include "llvm/Config/llvm-config.h"
87 #include "llvm/IR/Argument.h"
88 #include "llvm/IR/BasicBlock.h"
89 #include "llvm/IR/CFG.h"
90 #include "llvm/IR/CallSite.h"
91 #include "llvm/IR/Constant.h"
92 #include "llvm/IR/ConstantRange.h"
93 #include "llvm/IR/Constants.h"
94 #include "llvm/IR/DataLayout.h"
95 #include "llvm/IR/DerivedTypes.h"
96 #include "llvm/IR/Dominators.h"
97 #include "llvm/IR/Function.h"
98 #include "llvm/IR/GlobalAlias.h"
99 #include "llvm/IR/GlobalValue.h"
100 #include "llvm/IR/GlobalVariable.h"
101 #include "llvm/IR/InstIterator.h"
102 #include "llvm/IR/InstrTypes.h"
103 #include "llvm/IR/Instruction.h"
104 #include "llvm/IR/Instructions.h"
105 #include "llvm/IR/IntrinsicInst.h"
106 #include "llvm/IR/Intrinsics.h"
107 #include "llvm/IR/LLVMContext.h"
108 #include "llvm/IR/Metadata.h"
109 #include "llvm/IR/Operator.h"
110 #include "llvm/IR/PatternMatch.h"
111 #include "llvm/IR/Type.h"
112 #include "llvm/IR/Use.h"
113 #include "llvm/IR/User.h"
114 #include "llvm/IR/Value.h"
115 #include "llvm/Pass.h"
116 #include "llvm/Support/Casting.h"
117 #include "llvm/Support/CommandLine.h"
118 #include "llvm/Support/Compiler.h"
119 #include "llvm/Support/Debug.h"
120 #include "llvm/Support/ErrorHandling.h"
121 #include "llvm/Support/KnownBits.h"
122 #include "llvm/Support/SaveAndRestore.h"
123 #include "llvm/Support/raw_ostream.h"
136 using namespace llvm
;
138 #define DEBUG_TYPE "scalar-evolution"
140 STATISTIC(NumArrayLenItCounts
,
141 "Number of trip counts computed with array length");
142 STATISTIC(NumTripCountsComputed
,
143 "Number of loops with predictable loop counts");
144 STATISTIC(NumTripCountsNotComputed
,
145 "Number of loops without predictable loop counts");
146 STATISTIC(NumBruteForceTripCountsComputed
,
147 "Number of loops with trip counts computed by force");
149 static cl::opt
<unsigned>
150 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden
,
151 cl::desc("Maximum number of iterations SCEV will "
152 "symbolically execute a constant "
156 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean.
157 static cl::opt
<bool> VerifySCEV(
158 "verify-scev", cl::Hidden
,
159 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
161 VerifySCEVMap("verify-scev-maps", cl::Hidden
,
162 cl::desc("Verify no dangling value in ScalarEvolution's "
163 "ExprValueMap (slow)"));
165 static cl::opt
<unsigned> MulOpsInlineThreshold(
166 "scev-mulops-inline-threshold", cl::Hidden
,
167 cl::desc("Threshold for inlining multiplication operands into a SCEV"),
170 static cl::opt
<unsigned> AddOpsInlineThreshold(
171 "scev-addops-inline-threshold", cl::Hidden
,
172 cl::desc("Threshold for inlining addition operands into a SCEV"),
175 static cl::opt
<unsigned> MaxSCEVCompareDepth(
176 "scalar-evolution-max-scev-compare-depth", cl::Hidden
,
177 cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
180 static cl::opt
<unsigned> MaxSCEVOperationsImplicationDepth(
181 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden
,
182 cl::desc("Maximum depth of recursive SCEV operations implication analysis"),
185 static cl::opt
<unsigned> MaxValueCompareDepth(
186 "scalar-evolution-max-value-compare-depth", cl::Hidden
,
187 cl::desc("Maximum depth of recursive value complexity comparisons"),
190 static cl::opt
<unsigned>
191 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden
,
192 cl::desc("Maximum depth of recursive arithmetics"),
195 static cl::opt
<unsigned> MaxConstantEvolvingDepth(
196 "scalar-evolution-max-constant-evolving-depth", cl::Hidden
,
197 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
199 static cl::opt
<unsigned>
200 MaxExtDepth("scalar-evolution-max-ext-depth", cl::Hidden
,
201 cl::desc("Maximum depth of recursive SExt/ZExt"),
204 static cl::opt
<unsigned>
205 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden
,
206 cl::desc("Max coefficients in AddRec during evolving"),
209 //===----------------------------------------------------------------------===//
210 // SCEV class definitions
211 //===----------------------------------------------------------------------===//
213 //===----------------------------------------------------------------------===//
214 // Implementation of the SCEV class.
217 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
218 LLVM_DUMP_METHOD
void SCEV::dump() const {
224 void SCEV::print(raw_ostream
&OS
) const {
225 switch (static_cast<SCEVTypes
>(getSCEVType())) {
227 cast
<SCEVConstant
>(this)->getValue()->printAsOperand(OS
, false);
230 const SCEVTruncateExpr
*Trunc
= cast
<SCEVTruncateExpr
>(this);
231 const SCEV
*Op
= Trunc
->getOperand();
232 OS
<< "(trunc " << *Op
->getType() << " " << *Op
<< " to "
233 << *Trunc
->getType() << ")";
237 const SCEVZeroExtendExpr
*ZExt
= cast
<SCEVZeroExtendExpr
>(this);
238 const SCEV
*Op
= ZExt
->getOperand();
239 OS
<< "(zext " << *Op
->getType() << " " << *Op
<< " to "
240 << *ZExt
->getType() << ")";
244 const SCEVSignExtendExpr
*SExt
= cast
<SCEVSignExtendExpr
>(this);
245 const SCEV
*Op
= SExt
->getOperand();
246 OS
<< "(sext " << *Op
->getType() << " " << *Op
<< " to "
247 << *SExt
->getType() << ")";
251 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(this);
252 OS
<< "{" << *AR
->getOperand(0);
253 for (unsigned i
= 1, e
= AR
->getNumOperands(); i
!= e
; ++i
)
254 OS
<< ",+," << *AR
->getOperand(i
);
256 if (AR
->hasNoUnsignedWrap())
258 if (AR
->hasNoSignedWrap())
260 if (AR
->hasNoSelfWrap() &&
261 !AR
->getNoWrapFlags((NoWrapFlags
)(FlagNUW
| FlagNSW
)))
263 AR
->getLoop()->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
271 const SCEVNAryExpr
*NAry
= cast
<SCEVNAryExpr
>(this);
272 const char *OpStr
= nullptr;
273 switch (NAry
->getSCEVType()) {
274 case scAddExpr
: OpStr
= " + "; break;
275 case scMulExpr
: OpStr
= " * "; break;
276 case scUMaxExpr
: OpStr
= " umax "; break;
277 case scSMaxExpr
: OpStr
= " smax "; break;
280 for (SCEVNAryExpr::op_iterator I
= NAry
->op_begin(), E
= NAry
->op_end();
283 if (std::next(I
) != E
)
287 switch (NAry
->getSCEVType()) {
290 if (NAry
->hasNoUnsignedWrap())
292 if (NAry
->hasNoSignedWrap())
298 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(this);
299 OS
<< "(" << *UDiv
->getLHS() << " /u " << *UDiv
->getRHS() << ")";
303 const SCEVUnknown
*U
= cast
<SCEVUnknown
>(this);
305 if (U
->isSizeOf(AllocTy
)) {
306 OS
<< "sizeof(" << *AllocTy
<< ")";
309 if (U
->isAlignOf(AllocTy
)) {
310 OS
<< "alignof(" << *AllocTy
<< ")";
316 if (U
->isOffsetOf(CTy
, FieldNo
)) {
317 OS
<< "offsetof(" << *CTy
<< ", ";
318 FieldNo
->printAsOperand(OS
, false);
323 // Otherwise just print it normally.
324 U
->getValue()->printAsOperand(OS
, false);
327 case scCouldNotCompute
:
328 OS
<< "***COULDNOTCOMPUTE***";
331 llvm_unreachable("Unknown SCEV kind!");
334 Type
*SCEV::getType() const {
335 switch (static_cast<SCEVTypes
>(getSCEVType())) {
337 return cast
<SCEVConstant
>(this)->getType();
341 return cast
<SCEVCastExpr
>(this)->getType();
346 return cast
<SCEVNAryExpr
>(this)->getType();
348 return cast
<SCEVAddExpr
>(this)->getType();
350 return cast
<SCEVUDivExpr
>(this)->getType();
352 return cast
<SCEVUnknown
>(this)->getType();
353 case scCouldNotCompute
:
354 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
356 llvm_unreachable("Unknown SCEV kind!");
359 bool SCEV::isZero() const {
360 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
361 return SC
->getValue()->isZero();
365 bool SCEV::isOne() const {
366 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
367 return SC
->getValue()->isOne();
371 bool SCEV::isAllOnesValue() const {
372 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
373 return SC
->getValue()->isMinusOne();
377 bool SCEV::isNonConstantNegative() const {
378 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(this);
379 if (!Mul
) return false;
381 // If there is a constant factor, it will be first.
382 const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Mul
->getOperand(0));
383 if (!SC
) return false;
385 // Return true if the value is negative, this matches things like (-42 * V).
386 return SC
->getAPInt().isNegative();
389 SCEVCouldNotCompute::SCEVCouldNotCompute() :
390 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute
) {}
392 bool SCEVCouldNotCompute::classof(const SCEV
*S
) {
393 return S
->getSCEVType() == scCouldNotCompute
;
396 const SCEV
*ScalarEvolution::getConstant(ConstantInt
*V
) {
398 ID
.AddInteger(scConstant
);
401 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
402 SCEV
*S
= new (SCEVAllocator
) SCEVConstant(ID
.Intern(SCEVAllocator
), V
);
403 UniqueSCEVs
.InsertNode(S
, IP
);
407 const SCEV
*ScalarEvolution::getConstant(const APInt
&Val
) {
408 return getConstant(ConstantInt::get(getContext(), Val
));
412 ScalarEvolution::getConstant(Type
*Ty
, uint64_t V
, bool isSigned
) {
413 IntegerType
*ITy
= cast
<IntegerType
>(getEffectiveSCEVType(Ty
));
414 return getConstant(ConstantInt::get(ITy
, V
, isSigned
));
417 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID
,
418 unsigned SCEVTy
, const SCEV
*op
, Type
*ty
)
419 : SCEV(ID
, SCEVTy
), Op(op
), Ty(ty
) {}
421 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID
,
422 const SCEV
*op
, Type
*ty
)
423 : SCEVCastExpr(ID
, scTruncate
, op
, ty
) {
424 assert(Op
->getType()->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
425 "Cannot truncate non-integer value!");
428 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID
,
429 const SCEV
*op
, Type
*ty
)
430 : SCEVCastExpr(ID
, scZeroExtend
, op
, ty
) {
431 assert(Op
->getType()->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
432 "Cannot zero extend non-integer value!");
435 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID
,
436 const SCEV
*op
, Type
*ty
)
437 : SCEVCastExpr(ID
, scSignExtend
, op
, ty
) {
438 assert(Op
->getType()->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
439 "Cannot sign extend non-integer value!");
442 void SCEVUnknown::deleted() {
443 // Clear this SCEVUnknown from various maps.
444 SE
->forgetMemoizedResults(this);
446 // Remove this SCEVUnknown from the uniquing map.
447 SE
->UniqueSCEVs
.RemoveNode(this);
449 // Release the value.
453 void SCEVUnknown::allUsesReplacedWith(Value
*New
) {
454 // Remove this SCEVUnknown from the uniquing map.
455 SE
->UniqueSCEVs
.RemoveNode(this);
457 // Update this SCEVUnknown to point to the new value. This is needed
458 // because there may still be outstanding SCEVs which still point to
463 bool SCEVUnknown::isSizeOf(Type
*&AllocTy
) const {
464 if (ConstantExpr
*VCE
= dyn_cast
<ConstantExpr
>(getValue()))
465 if (VCE
->getOpcode() == Instruction::PtrToInt
)
466 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(VCE
->getOperand(0)))
467 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
468 CE
->getOperand(0)->isNullValue() &&
469 CE
->getNumOperands() == 2)
470 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(CE
->getOperand(1)))
472 AllocTy
= cast
<PointerType
>(CE
->getOperand(0)->getType())
480 bool SCEVUnknown::isAlignOf(Type
*&AllocTy
) const {
481 if (ConstantExpr
*VCE
= dyn_cast
<ConstantExpr
>(getValue()))
482 if (VCE
->getOpcode() == Instruction::PtrToInt
)
483 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(VCE
->getOperand(0)))
484 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
485 CE
->getOperand(0)->isNullValue()) {
487 cast
<PointerType
>(CE
->getOperand(0)->getType())->getElementType();
488 if (StructType
*STy
= dyn_cast
<StructType
>(Ty
))
489 if (!STy
->isPacked() &&
490 CE
->getNumOperands() == 3 &&
491 CE
->getOperand(1)->isNullValue()) {
492 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(CE
->getOperand(2)))
494 STy
->getNumElements() == 2 &&
495 STy
->getElementType(0)->isIntegerTy(1)) {
496 AllocTy
= STy
->getElementType(1);
505 bool SCEVUnknown::isOffsetOf(Type
*&CTy
, Constant
*&FieldNo
) const {
506 if (ConstantExpr
*VCE
= dyn_cast
<ConstantExpr
>(getValue()))
507 if (VCE
->getOpcode() == Instruction::PtrToInt
)
508 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(VCE
->getOperand(0)))
509 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
510 CE
->getNumOperands() == 3 &&
511 CE
->getOperand(0)->isNullValue() &&
512 CE
->getOperand(1)->isNullValue()) {
514 cast
<PointerType
>(CE
->getOperand(0)->getType())->getElementType();
515 // Ignore vector types here so that ScalarEvolutionExpander doesn't
516 // emit getelementptrs that index into vectors.
517 if (Ty
->isStructTy() || Ty
->isArrayTy()) {
519 FieldNo
= CE
->getOperand(2);
527 //===----------------------------------------------------------------------===//
529 //===----------------------------------------------------------------------===//
531 /// Compare the two values \p LV and \p RV in terms of their "complexity" where
532 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order
533 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that
534 /// have been previously deemed to be "equally complex" by this routine. It is
535 /// intended to avoid exponential time complexity in cases like:
545 /// CompareValueComplexity(%f, %c)
547 /// Since we do not continue running this routine on expression trees once we
548 /// have seen unequal values, there is no need to track them in the cache.
550 CompareValueComplexity(EquivalenceClasses
<const Value
*> &EqCacheValue
,
551 const LoopInfo
*const LI
, Value
*LV
, Value
*RV
,
553 if (Depth
> MaxValueCompareDepth
|| EqCacheValue
.isEquivalent(LV
, RV
))
556 // Order pointer values after integer values. This helps SCEVExpander form
558 bool LIsPointer
= LV
->getType()->isPointerTy(),
559 RIsPointer
= RV
->getType()->isPointerTy();
560 if (LIsPointer
!= RIsPointer
)
561 return (int)LIsPointer
- (int)RIsPointer
;
563 // Compare getValueID values.
564 unsigned LID
= LV
->getValueID(), RID
= RV
->getValueID();
566 return (int)LID
- (int)RID
;
568 // Sort arguments by their position.
569 if (const auto *LA
= dyn_cast
<Argument
>(LV
)) {
570 const auto *RA
= cast
<Argument
>(RV
);
571 unsigned LArgNo
= LA
->getArgNo(), RArgNo
= RA
->getArgNo();
572 return (int)LArgNo
- (int)RArgNo
;
575 if (const auto *LGV
= dyn_cast
<GlobalValue
>(LV
)) {
576 const auto *RGV
= cast
<GlobalValue
>(RV
);
578 const auto IsGVNameSemantic
= [&](const GlobalValue
*GV
) {
579 auto LT
= GV
->getLinkage();
580 return !(GlobalValue::isPrivateLinkage(LT
) ||
581 GlobalValue::isInternalLinkage(LT
));
584 // Use the names to distinguish the two values, but only if the
585 // names are semantically important.
586 if (IsGVNameSemantic(LGV
) && IsGVNameSemantic(RGV
))
587 return LGV
->getName().compare(RGV
->getName());
590 // For instructions, compare their loop depth, and their operand count. This
592 if (const auto *LInst
= dyn_cast
<Instruction
>(LV
)) {
593 const auto *RInst
= cast
<Instruction
>(RV
);
595 // Compare loop depths.
596 const BasicBlock
*LParent
= LInst
->getParent(),
597 *RParent
= RInst
->getParent();
598 if (LParent
!= RParent
) {
599 unsigned LDepth
= LI
->getLoopDepth(LParent
),
600 RDepth
= LI
->getLoopDepth(RParent
);
601 if (LDepth
!= RDepth
)
602 return (int)LDepth
- (int)RDepth
;
605 // Compare the number of operands.
606 unsigned LNumOps
= LInst
->getNumOperands(),
607 RNumOps
= RInst
->getNumOperands();
608 if (LNumOps
!= RNumOps
)
609 return (int)LNumOps
- (int)RNumOps
;
611 for (unsigned Idx
: seq(0u, LNumOps
)) {
613 CompareValueComplexity(EqCacheValue
, LI
, LInst
->getOperand(Idx
),
614 RInst
->getOperand(Idx
), Depth
+ 1);
620 EqCacheValue
.unionSets(LV
, RV
);
624 // Return negative, zero, or positive, if LHS is less than, equal to, or greater
625 // than RHS, respectively. A three-way result allows recursive comparisons to be
627 static int CompareSCEVComplexity(
628 EquivalenceClasses
<const SCEV
*> &EqCacheSCEV
,
629 EquivalenceClasses
<const Value
*> &EqCacheValue
,
630 const LoopInfo
*const LI
, const SCEV
*LHS
, const SCEV
*RHS
,
631 DominatorTree
&DT
, unsigned Depth
= 0) {
632 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
636 // Primarily, sort the SCEVs by their getSCEVType().
637 unsigned LType
= LHS
->getSCEVType(), RType
= RHS
->getSCEVType();
639 return (int)LType
- (int)RType
;
641 if (Depth
> MaxSCEVCompareDepth
|| EqCacheSCEV
.isEquivalent(LHS
, RHS
))
643 // Aside from the getSCEVType() ordering, the particular ordering
644 // isn't very important except that it's beneficial to be consistent,
645 // so that (a + b) and (b + a) don't end up as different expressions.
646 switch (static_cast<SCEVTypes
>(LType
)) {
648 const SCEVUnknown
*LU
= cast
<SCEVUnknown
>(LHS
);
649 const SCEVUnknown
*RU
= cast
<SCEVUnknown
>(RHS
);
651 int X
= CompareValueComplexity(EqCacheValue
, LI
, LU
->getValue(),
652 RU
->getValue(), Depth
+ 1);
654 EqCacheSCEV
.unionSets(LHS
, RHS
);
659 const SCEVConstant
*LC
= cast
<SCEVConstant
>(LHS
);
660 const SCEVConstant
*RC
= cast
<SCEVConstant
>(RHS
);
662 // Compare constant values.
663 const APInt
&LA
= LC
->getAPInt();
664 const APInt
&RA
= RC
->getAPInt();
665 unsigned LBitWidth
= LA
.getBitWidth(), RBitWidth
= RA
.getBitWidth();
666 if (LBitWidth
!= RBitWidth
)
667 return (int)LBitWidth
- (int)RBitWidth
;
668 return LA
.ult(RA
) ? -1 : 1;
672 const SCEVAddRecExpr
*LA
= cast
<SCEVAddRecExpr
>(LHS
);
673 const SCEVAddRecExpr
*RA
= cast
<SCEVAddRecExpr
>(RHS
);
675 // There is always a dominance between two recs that are used by one SCEV,
676 // so we can safely sort recs by loop header dominance. We require such
677 // order in getAddExpr.
678 const Loop
*LLoop
= LA
->getLoop(), *RLoop
= RA
->getLoop();
679 if (LLoop
!= RLoop
) {
680 const BasicBlock
*LHead
= LLoop
->getHeader(), *RHead
= RLoop
->getHeader();
681 assert(LHead
!= RHead
&& "Two loops share the same header?");
682 if (DT
.dominates(LHead
, RHead
))
685 assert(DT
.dominates(RHead
, LHead
) &&
686 "No dominance between recurrences used by one SCEV?");
690 // Addrec complexity grows with operand count.
691 unsigned LNumOps
= LA
->getNumOperands(), RNumOps
= RA
->getNumOperands();
692 if (LNumOps
!= RNumOps
)
693 return (int)LNumOps
- (int)RNumOps
;
695 // Lexicographically compare.
696 for (unsigned i
= 0; i
!= LNumOps
; ++i
) {
697 int X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
,
698 LA
->getOperand(i
), RA
->getOperand(i
), DT
,
703 EqCacheSCEV
.unionSets(LHS
, RHS
);
711 const SCEVNAryExpr
*LC
= cast
<SCEVNAryExpr
>(LHS
);
712 const SCEVNAryExpr
*RC
= cast
<SCEVNAryExpr
>(RHS
);
714 // Lexicographically compare n-ary expressions.
715 unsigned LNumOps
= LC
->getNumOperands(), RNumOps
= RC
->getNumOperands();
716 if (LNumOps
!= RNumOps
)
717 return (int)LNumOps
- (int)RNumOps
;
719 for (unsigned i
= 0; i
!= LNumOps
; ++i
) {
720 int X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
,
721 LC
->getOperand(i
), RC
->getOperand(i
), DT
,
726 EqCacheSCEV
.unionSets(LHS
, RHS
);
731 const SCEVUDivExpr
*LC
= cast
<SCEVUDivExpr
>(LHS
);
732 const SCEVUDivExpr
*RC
= cast
<SCEVUDivExpr
>(RHS
);
734 // Lexicographically compare udiv expressions.
735 int X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
, LC
->getLHS(),
736 RC
->getLHS(), DT
, Depth
+ 1);
739 X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
, LC
->getRHS(),
740 RC
->getRHS(), DT
, Depth
+ 1);
742 EqCacheSCEV
.unionSets(LHS
, RHS
);
749 const SCEVCastExpr
*LC
= cast
<SCEVCastExpr
>(LHS
);
750 const SCEVCastExpr
*RC
= cast
<SCEVCastExpr
>(RHS
);
752 // Compare cast expressions by operand.
753 int X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
,
754 LC
->getOperand(), RC
->getOperand(), DT
,
757 EqCacheSCEV
.unionSets(LHS
, RHS
);
761 case scCouldNotCompute
:
762 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
764 llvm_unreachable("Unknown SCEV kind!");
767 /// Given a list of SCEV objects, order them by their complexity, and group
768 /// objects of the same complexity together by value. When this routine is
769 /// finished, we know that any duplicates in the vector are consecutive and that
770 /// complexity is monotonically increasing.
772 /// Note that we go take special precautions to ensure that we get deterministic
773 /// results from this routine. In other words, we don't want the results of
774 /// this to depend on where the addresses of various SCEV objects happened to
776 static void GroupByComplexity(SmallVectorImpl
<const SCEV
*> &Ops
,
777 LoopInfo
*LI
, DominatorTree
&DT
) {
778 if (Ops
.size() < 2) return; // Noop
780 EquivalenceClasses
<const SCEV
*> EqCacheSCEV
;
781 EquivalenceClasses
<const Value
*> EqCacheValue
;
782 if (Ops
.size() == 2) {
783 // This is the common case, which also happens to be trivially simple.
785 const SCEV
*&LHS
= Ops
[0], *&RHS
= Ops
[1];
786 if (CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
, RHS
, LHS
, DT
) < 0)
791 // Do the rough sort by complexity.
792 std::stable_sort(Ops
.begin(), Ops
.end(),
793 [&](const SCEV
*LHS
, const SCEV
*RHS
) {
794 return CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
,
798 // Now that we are sorted by complexity, group elements of the same
799 // complexity. Note that this is, at worst, N^2, but the vector is likely to
800 // be extremely short in practice. Note that we take this approach because we
801 // do not want to depend on the addresses of the objects we are grouping.
802 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
-2; ++i
) {
803 const SCEV
*S
= Ops
[i
];
804 unsigned Complexity
= S
->getSCEVType();
806 // If there are any objects of the same complexity and same value as this
808 for (unsigned j
= i
+1; j
!= e
&& Ops
[j
]->getSCEVType() == Complexity
; ++j
) {
809 if (Ops
[j
] == S
) { // Found a duplicate.
810 // Move it to immediately after i'th element.
811 std::swap(Ops
[i
+1], Ops
[j
]);
812 ++i
; // no need to rescan it.
813 if (i
== e
-2) return; // Done!
819 // Returns the size of the SCEV S.
820 static inline int sizeOfSCEV(const SCEV
*S
) {
821 struct FindSCEVSize
{
824 FindSCEVSize() = default;
826 bool follow(const SCEV
*S
) {
828 // Keep looking at all operands of S.
832 bool isDone() const {
838 SCEVTraversal
<FindSCEVSize
> ST(F
);
845 struct SCEVDivision
: public SCEVVisitor
<SCEVDivision
, void> {
847 // Computes the Quotient and Remainder of the division of Numerator by
849 static void divide(ScalarEvolution
&SE
, const SCEV
*Numerator
,
850 const SCEV
*Denominator
, const SCEV
**Quotient
,
851 const SCEV
**Remainder
) {
852 assert(Numerator
&& Denominator
&& "Uninitialized SCEV");
854 SCEVDivision
D(SE
, Numerator
, Denominator
);
856 // Check for the trivial case here to avoid having to check for it in the
858 if (Numerator
== Denominator
) {
864 if (Numerator
->isZero()) {
870 // A simple case when N/1. The quotient is N.
871 if (Denominator
->isOne()) {
872 *Quotient
= Numerator
;
877 // Split the Denominator when it is a product.
878 if (const SCEVMulExpr
*T
= dyn_cast
<SCEVMulExpr
>(Denominator
)) {
880 *Quotient
= Numerator
;
881 for (const SCEV
*Op
: T
->operands()) {
882 divide(SE
, *Quotient
, Op
, &Q
, &R
);
885 // Bail out when the Numerator is not divisible by one of the terms of
889 *Remainder
= Numerator
;
898 *Quotient
= D
.Quotient
;
899 *Remainder
= D
.Remainder
;
902 // Except in the trivial case described above, we do not know how to divide
903 // Expr by Denominator for the following functions with empty implementation.
904 void visitTruncateExpr(const SCEVTruncateExpr
*Numerator
) {}
905 void visitZeroExtendExpr(const SCEVZeroExtendExpr
*Numerator
) {}
906 void visitSignExtendExpr(const SCEVSignExtendExpr
*Numerator
) {}
907 void visitUDivExpr(const SCEVUDivExpr
*Numerator
) {}
908 void visitSMaxExpr(const SCEVSMaxExpr
*Numerator
) {}
909 void visitUMaxExpr(const SCEVUMaxExpr
*Numerator
) {}
910 void visitUnknown(const SCEVUnknown
*Numerator
) {}
911 void visitCouldNotCompute(const SCEVCouldNotCompute
*Numerator
) {}
913 void visitConstant(const SCEVConstant
*Numerator
) {
914 if (const SCEVConstant
*D
= dyn_cast
<SCEVConstant
>(Denominator
)) {
915 APInt NumeratorVal
= Numerator
->getAPInt();
916 APInt DenominatorVal
= D
->getAPInt();
917 uint32_t NumeratorBW
= NumeratorVal
.getBitWidth();
918 uint32_t DenominatorBW
= DenominatorVal
.getBitWidth();
920 if (NumeratorBW
> DenominatorBW
)
921 DenominatorVal
= DenominatorVal
.sext(NumeratorBW
);
922 else if (NumeratorBW
< DenominatorBW
)
923 NumeratorVal
= NumeratorVal
.sext(DenominatorBW
);
925 APInt
QuotientVal(NumeratorVal
.getBitWidth(), 0);
926 APInt
RemainderVal(NumeratorVal
.getBitWidth(), 0);
927 APInt::sdivrem(NumeratorVal
, DenominatorVal
, QuotientVal
, RemainderVal
);
928 Quotient
= SE
.getConstant(QuotientVal
);
929 Remainder
= SE
.getConstant(RemainderVal
);
934 void visitAddRecExpr(const SCEVAddRecExpr
*Numerator
) {
935 const SCEV
*StartQ
, *StartR
, *StepQ
, *StepR
;
936 if (!Numerator
->isAffine())
937 return cannotDivide(Numerator
);
938 divide(SE
, Numerator
->getStart(), Denominator
, &StartQ
, &StartR
);
939 divide(SE
, Numerator
->getStepRecurrence(SE
), Denominator
, &StepQ
, &StepR
);
940 // Bail out if the types do not match.
941 Type
*Ty
= Denominator
->getType();
942 if (Ty
!= StartQ
->getType() || Ty
!= StartR
->getType() ||
943 Ty
!= StepQ
->getType() || Ty
!= StepR
->getType())
944 return cannotDivide(Numerator
);
945 Quotient
= SE
.getAddRecExpr(StartQ
, StepQ
, Numerator
->getLoop(),
946 Numerator
->getNoWrapFlags());
947 Remainder
= SE
.getAddRecExpr(StartR
, StepR
, Numerator
->getLoop(),
948 Numerator
->getNoWrapFlags());
951 void visitAddExpr(const SCEVAddExpr
*Numerator
) {
952 SmallVector
<const SCEV
*, 2> Qs
, Rs
;
953 Type
*Ty
= Denominator
->getType();
955 for (const SCEV
*Op
: Numerator
->operands()) {
957 divide(SE
, Op
, Denominator
, &Q
, &R
);
959 // Bail out if types do not match.
960 if (Ty
!= Q
->getType() || Ty
!= R
->getType())
961 return cannotDivide(Numerator
);
967 if (Qs
.size() == 1) {
973 Quotient
= SE
.getAddExpr(Qs
);
974 Remainder
= SE
.getAddExpr(Rs
);
977 void visitMulExpr(const SCEVMulExpr
*Numerator
) {
978 SmallVector
<const SCEV
*, 2> Qs
;
979 Type
*Ty
= Denominator
->getType();
981 bool FoundDenominatorTerm
= false;
982 for (const SCEV
*Op
: Numerator
->operands()) {
983 // Bail out if types do not match.
984 if (Ty
!= Op
->getType())
985 return cannotDivide(Numerator
);
987 if (FoundDenominatorTerm
) {
992 // Check whether Denominator divides one of the product operands.
994 divide(SE
, Op
, Denominator
, &Q
, &R
);
1000 // Bail out if types do not match.
1001 if (Ty
!= Q
->getType())
1002 return cannotDivide(Numerator
);
1004 FoundDenominatorTerm
= true;
1008 if (FoundDenominatorTerm
) {
1013 Quotient
= SE
.getMulExpr(Qs
);
1017 if (!isa
<SCEVUnknown
>(Denominator
))
1018 return cannotDivide(Numerator
);
1020 // The Remainder is obtained by replacing Denominator by 0 in Numerator.
1021 ValueToValueMap RewriteMap
;
1022 RewriteMap
[cast
<SCEVUnknown
>(Denominator
)->getValue()] =
1023 cast
<SCEVConstant
>(Zero
)->getValue();
1024 Remainder
= SCEVParameterRewriter::rewrite(Numerator
, SE
, RewriteMap
, true);
1026 if (Remainder
->isZero()) {
1027 // The Quotient is obtained by replacing Denominator by 1 in Numerator.
1028 RewriteMap
[cast
<SCEVUnknown
>(Denominator
)->getValue()] =
1029 cast
<SCEVConstant
>(One
)->getValue();
1031 SCEVParameterRewriter::rewrite(Numerator
, SE
, RewriteMap
, true);
1035 // Quotient is (Numerator - Remainder) divided by Denominator.
1037 const SCEV
*Diff
= SE
.getMinusSCEV(Numerator
, Remainder
);
1038 // This SCEV does not seem to simplify: fail the division here.
1039 if (sizeOfSCEV(Diff
) > sizeOfSCEV(Numerator
))
1040 return cannotDivide(Numerator
);
1041 divide(SE
, Diff
, Denominator
, &Q
, &R
);
1043 return cannotDivide(Numerator
);
1048 SCEVDivision(ScalarEvolution
&S
, const SCEV
*Numerator
,
1049 const SCEV
*Denominator
)
1050 : SE(S
), Denominator(Denominator
) {
1051 Zero
= SE
.getZero(Denominator
->getType());
1052 One
= SE
.getOne(Denominator
->getType());
1054 // We generally do not know how to divide Expr by Denominator. We
1055 // initialize the division to a "cannot divide" state to simplify the rest
1057 cannotDivide(Numerator
);
1060 // Convenience function for giving up on the division. We set the quotient to
1061 // be equal to zero and the remainder to be equal to the numerator.
1062 void cannotDivide(const SCEV
*Numerator
) {
1064 Remainder
= Numerator
;
1067 ScalarEvolution
&SE
;
1068 const SCEV
*Denominator
, *Quotient
, *Remainder
, *Zero
, *One
;
1071 } // end anonymous namespace
1073 //===----------------------------------------------------------------------===//
1074 // Simple SCEV method implementations
1075 //===----------------------------------------------------------------------===//
1077 /// Compute BC(It, K). The result has width W. Assume, K > 0.
1078 static const SCEV
*BinomialCoefficient(const SCEV
*It
, unsigned K
,
1079 ScalarEvolution
&SE
,
1081 // Handle the simplest case efficiently.
1083 return SE
.getTruncateOrZeroExtend(It
, ResultTy
);
1085 // We are using the following formula for BC(It, K):
1087 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
1089 // Suppose, W is the bitwidth of the return value. We must be prepared for
1090 // overflow. Hence, we must assure that the result of our computation is
1091 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
1092 // safe in modular arithmetic.
1094 // However, this code doesn't use exactly that formula; the formula it uses
1095 // is something like the following, where T is the number of factors of 2 in
1096 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
1099 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
1101 // This formula is trivially equivalent to the previous formula. However,
1102 // this formula can be implemented much more efficiently. The trick is that
1103 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
1104 // arithmetic. To do exact division in modular arithmetic, all we have
1105 // to do is multiply by the inverse. Therefore, this step can be done at
1108 // The next issue is how to safely do the division by 2^T. The way this
1109 // is done is by doing the multiplication step at a width of at least W + T
1110 // bits. This way, the bottom W+T bits of the product are accurate. Then,
1111 // when we perform the division by 2^T (which is equivalent to a right shift
1112 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
1113 // truncated out after the division by 2^T.
1115 // In comparison to just directly using the first formula, this technique
1116 // is much more efficient; using the first formula requires W * K bits,
1117 // but this formula less than W + K bits. Also, the first formula requires
1118 // a division step, whereas this formula only requires multiplies and shifts.
1120 // It doesn't matter whether the subtraction step is done in the calculation
1121 // width or the input iteration count's width; if the subtraction overflows,
1122 // the result must be zero anyway. We prefer here to do it in the width of
1123 // the induction variable because it helps a lot for certain cases; CodeGen
1124 // isn't smart enough to ignore the overflow, which leads to much less
1125 // efficient code if the width of the subtraction is wider than the native
1128 // (It's possible to not widen at all by pulling out factors of 2 before
1129 // the multiplication; for example, K=2 can be calculated as
1130 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
1131 // extra arithmetic, so it's not an obvious win, and it gets
1132 // much more complicated for K > 3.)
1134 // Protection from insane SCEVs; this bound is conservative,
1135 // but it probably doesn't matter.
1137 return SE
.getCouldNotCompute();
1139 unsigned W
= SE
.getTypeSizeInBits(ResultTy
);
1141 // Calculate K! / 2^T and T; we divide out the factors of two before
1142 // multiplying for calculating K! / 2^T to avoid overflow.
1143 // Other overflow doesn't matter because we only care about the bottom
1144 // W bits of the result.
1145 APInt
OddFactorial(W
, 1);
1147 for (unsigned i
= 3; i
<= K
; ++i
) {
1149 unsigned TwoFactors
= Mult
.countTrailingZeros();
1151 Mult
.lshrInPlace(TwoFactors
);
1152 OddFactorial
*= Mult
;
1155 // We need at least W + T bits for the multiplication step
1156 unsigned CalculationBits
= W
+ T
;
1158 // Calculate 2^T, at width T+W.
1159 APInt DivFactor
= APInt::getOneBitSet(CalculationBits
, T
);
1161 // Calculate the multiplicative inverse of K! / 2^T;
1162 // this multiplication factor will perform the exact division by
1164 APInt Mod
= APInt::getSignedMinValue(W
+1);
1165 APInt MultiplyFactor
= OddFactorial
.zext(W
+1);
1166 MultiplyFactor
= MultiplyFactor
.multiplicativeInverse(Mod
);
1167 MultiplyFactor
= MultiplyFactor
.trunc(W
);
1169 // Calculate the product, at width T+W
1170 IntegerType
*CalculationTy
= IntegerType::get(SE
.getContext(),
1172 const SCEV
*Dividend
= SE
.getTruncateOrZeroExtend(It
, CalculationTy
);
1173 for (unsigned i
= 1; i
!= K
; ++i
) {
1174 const SCEV
*S
= SE
.getMinusSCEV(It
, SE
.getConstant(It
->getType(), i
));
1175 Dividend
= SE
.getMulExpr(Dividend
,
1176 SE
.getTruncateOrZeroExtend(S
, CalculationTy
));
1180 const SCEV
*DivResult
= SE
.getUDivExpr(Dividend
, SE
.getConstant(DivFactor
));
1182 // Truncate the result, and divide by K! / 2^T.
1184 return SE
.getMulExpr(SE
.getConstant(MultiplyFactor
),
1185 SE
.getTruncateOrZeroExtend(DivResult
, ResultTy
));
1188 /// Return the value of this chain of recurrences at the specified iteration
1189 /// number. We can evaluate this recurrence by multiplying each element in the
1190 /// chain by the binomial coefficient corresponding to it. In other words, we
1191 /// can evaluate {A,+,B,+,C,+,D} as:
1193 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1195 /// where BC(It, k) stands for binomial coefficient.
1196 const SCEV
*SCEVAddRecExpr::evaluateAtIteration(const SCEV
*It
,
1197 ScalarEvolution
&SE
) const {
1198 const SCEV
*Result
= getStart();
1199 for (unsigned i
= 1, e
= getNumOperands(); i
!= e
; ++i
) {
1200 // The computation is correct in the face of overflow provided that the
1201 // multiplication is performed _after_ the evaluation of the binomial
1203 const SCEV
*Coeff
= BinomialCoefficient(It
, i
, SE
, getType());
1204 if (isa
<SCEVCouldNotCompute
>(Coeff
))
1207 Result
= SE
.getAddExpr(Result
, SE
.getMulExpr(getOperand(i
), Coeff
));
1212 //===----------------------------------------------------------------------===//
1213 // SCEV Expression folder implementations
1214 //===----------------------------------------------------------------------===//
1216 const SCEV
*ScalarEvolution::getTruncateExpr(const SCEV
*Op
,
1218 assert(getTypeSizeInBits(Op
->getType()) > getTypeSizeInBits(Ty
) &&
1219 "This is not a truncating conversion!");
1220 assert(isSCEVable(Ty
) &&
1221 "This is not a conversion to a SCEVable type!");
1222 Ty
= getEffectiveSCEVType(Ty
);
1224 FoldingSetNodeID ID
;
1225 ID
.AddInteger(scTruncate
);
1229 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1231 // Fold if the operand is constant.
1232 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1234 cast
<ConstantInt
>(ConstantExpr::getTrunc(SC
->getValue(), Ty
)));
1236 // trunc(trunc(x)) --> trunc(x)
1237 if (const SCEVTruncateExpr
*ST
= dyn_cast
<SCEVTruncateExpr
>(Op
))
1238 return getTruncateExpr(ST
->getOperand(), Ty
);
1240 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1241 if (const SCEVSignExtendExpr
*SS
= dyn_cast
<SCEVSignExtendExpr
>(Op
))
1242 return getTruncateOrSignExtend(SS
->getOperand(), Ty
);
1244 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1245 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
1246 return getTruncateOrZeroExtend(SZ
->getOperand(), Ty
);
1248 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
1249 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
1250 // if after transforming we have at most one truncate, not counting truncates
1251 // that replace other casts.
1252 if (isa
<SCEVAddExpr
>(Op
) || isa
<SCEVMulExpr
>(Op
)) {
1253 auto *CommOp
= cast
<SCEVCommutativeExpr
>(Op
);
1254 SmallVector
<const SCEV
*, 4> Operands
;
1255 unsigned numTruncs
= 0;
1256 for (unsigned i
= 0, e
= CommOp
->getNumOperands(); i
!= e
&& numTruncs
< 2;
1258 const SCEV
*S
= getTruncateExpr(CommOp
->getOperand(i
), Ty
);
1259 if (!isa
<SCEVCastExpr
>(CommOp
->getOperand(i
)) && isa
<SCEVTruncateExpr
>(S
))
1261 Operands
.push_back(S
);
1263 if (numTruncs
< 2) {
1264 if (isa
<SCEVAddExpr
>(Op
))
1265 return getAddExpr(Operands
);
1266 else if (isa
<SCEVMulExpr
>(Op
))
1267 return getMulExpr(Operands
);
1269 llvm_unreachable("Unexpected SCEV type for Op.");
1271 // Although we checked in the beginning that ID is not in the cache, it is
1272 // possible that during recursion and different modification ID was inserted
1273 // into the cache. So if we find it, just return it.
1274 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
))
1278 // If the input value is a chrec scev, truncate the chrec's operands.
1279 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(Op
)) {
1280 SmallVector
<const SCEV
*, 4> Operands
;
1281 for (const SCEV
*Op
: AddRec
->operands())
1282 Operands
.push_back(getTruncateExpr(Op
, Ty
));
1283 return getAddRecExpr(Operands
, AddRec
->getLoop(), SCEV::FlagAnyWrap
);
1286 // The cast wasn't folded; create an explicit cast node. We can reuse
1287 // the existing insert position since if we get here, we won't have
1288 // made any changes which would invalidate it.
1289 SCEV
*S
= new (SCEVAllocator
) SCEVTruncateExpr(ID
.Intern(SCEVAllocator
),
1291 UniqueSCEVs
.InsertNode(S
, IP
);
1292 addToLoopUseLists(S
);
1296 // Get the limit of a recurrence such that incrementing by Step cannot cause
1297 // signed overflow as long as the value of the recurrence within the
1298 // loop does not exceed this limit before incrementing.
1299 static const SCEV
*getSignedOverflowLimitForStep(const SCEV
*Step
,
1300 ICmpInst::Predicate
*Pred
,
1301 ScalarEvolution
*SE
) {
1302 unsigned BitWidth
= SE
->getTypeSizeInBits(Step
->getType());
1303 if (SE
->isKnownPositive(Step
)) {
1304 *Pred
= ICmpInst::ICMP_SLT
;
1305 return SE
->getConstant(APInt::getSignedMinValue(BitWidth
) -
1306 SE
->getSignedRangeMax(Step
));
1308 if (SE
->isKnownNegative(Step
)) {
1309 *Pred
= ICmpInst::ICMP_SGT
;
1310 return SE
->getConstant(APInt::getSignedMaxValue(BitWidth
) -
1311 SE
->getSignedRangeMin(Step
));
1316 // Get the limit of a recurrence such that incrementing by Step cannot cause
1317 // unsigned overflow as long as the value of the recurrence within the loop does
1318 // not exceed this limit before incrementing.
1319 static const SCEV
*getUnsignedOverflowLimitForStep(const SCEV
*Step
,
1320 ICmpInst::Predicate
*Pred
,
1321 ScalarEvolution
*SE
) {
1322 unsigned BitWidth
= SE
->getTypeSizeInBits(Step
->getType());
1323 *Pred
= ICmpInst::ICMP_ULT
;
1325 return SE
->getConstant(APInt::getMinValue(BitWidth
) -
1326 SE
->getUnsignedRangeMax(Step
));
1331 struct ExtendOpTraitsBase
{
1332 typedef const SCEV
*(ScalarEvolution::*GetExtendExprTy
)(const SCEV
*, Type
*,
1336 // Used to make code generic over signed and unsigned overflow.
1337 template <typename ExtendOp
> struct ExtendOpTraits
{
1340 // static const SCEV::NoWrapFlags WrapType;
1342 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1344 // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1345 // ICmpInst::Predicate *Pred,
1346 // ScalarEvolution *SE);
1350 struct ExtendOpTraits
<SCEVSignExtendExpr
> : public ExtendOpTraitsBase
{
1351 static const SCEV::NoWrapFlags WrapType
= SCEV::FlagNSW
;
1353 static const GetExtendExprTy GetExtendExpr
;
1355 static const SCEV
*getOverflowLimitForStep(const SCEV
*Step
,
1356 ICmpInst::Predicate
*Pred
,
1357 ScalarEvolution
*SE
) {
1358 return getSignedOverflowLimitForStep(Step
, Pred
, SE
);
1362 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits
<
1363 SCEVSignExtendExpr
>::GetExtendExpr
= &ScalarEvolution::getSignExtendExpr
;
1366 struct ExtendOpTraits
<SCEVZeroExtendExpr
> : public ExtendOpTraitsBase
{
1367 static const SCEV::NoWrapFlags WrapType
= SCEV::FlagNUW
;
1369 static const GetExtendExprTy GetExtendExpr
;
1371 static const SCEV
*getOverflowLimitForStep(const SCEV
*Step
,
1372 ICmpInst::Predicate
*Pred
,
1373 ScalarEvolution
*SE
) {
1374 return getUnsignedOverflowLimitForStep(Step
, Pred
, SE
);
1378 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits
<
1379 SCEVZeroExtendExpr
>::GetExtendExpr
= &ScalarEvolution::getZeroExtendExpr
;
1381 } // end anonymous namespace
1383 // The recurrence AR has been shown to have no signed/unsigned wrap or something
1384 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1385 // easily prove NSW/NUW for its preincrement or postincrement sibling. This
1386 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1387 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1388 // expression "Step + sext/zext(PreIncAR)" is congruent with
1389 // "sext/zext(PostIncAR)"
1390 template <typename ExtendOpTy
>
1391 static const SCEV
*getPreStartForExtend(const SCEVAddRecExpr
*AR
, Type
*Ty
,
1392 ScalarEvolution
*SE
, unsigned Depth
) {
1393 auto WrapType
= ExtendOpTraits
<ExtendOpTy
>::WrapType
;
1394 auto GetExtendExpr
= ExtendOpTraits
<ExtendOpTy
>::GetExtendExpr
;
1396 const Loop
*L
= AR
->getLoop();
1397 const SCEV
*Start
= AR
->getStart();
1398 const SCEV
*Step
= AR
->getStepRecurrence(*SE
);
1400 // Check for a simple looking step prior to loop entry.
1401 const SCEVAddExpr
*SA
= dyn_cast
<SCEVAddExpr
>(Start
);
1405 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1406 // subtraction is expensive. For this purpose, perform a quick and dirty
1407 // difference, by checking for Step in the operand list.
1408 SmallVector
<const SCEV
*, 4> DiffOps
;
1409 for (const SCEV
*Op
: SA
->operands())
1411 DiffOps
.push_back(Op
);
1413 if (DiffOps
.size() == SA
->getNumOperands())
1416 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1419 // 1. NSW/NUW flags on the step increment.
1420 auto PreStartFlags
=
1421 ScalarEvolution::maskFlags(SA
->getNoWrapFlags(), SCEV::FlagNUW
);
1422 const SCEV
*PreStart
= SE
->getAddExpr(DiffOps
, PreStartFlags
);
1423 const SCEVAddRecExpr
*PreAR
= dyn_cast
<SCEVAddRecExpr
>(
1424 SE
->getAddRecExpr(PreStart
, Step
, L
, SCEV::FlagAnyWrap
));
1426 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1427 // "S+X does not sign/unsign-overflow".
1430 const SCEV
*BECount
= SE
->getBackedgeTakenCount(L
);
1431 if (PreAR
&& PreAR
->getNoWrapFlags(WrapType
) &&
1432 !isa
<SCEVCouldNotCompute
>(BECount
) && SE
->isKnownPositive(BECount
))
1435 // 2. Direct overflow check on the step operation's expression.
1436 unsigned BitWidth
= SE
->getTypeSizeInBits(AR
->getType());
1437 Type
*WideTy
= IntegerType::get(SE
->getContext(), BitWidth
* 2);
1438 const SCEV
*OperandExtendedStart
=
1439 SE
->getAddExpr((SE
->*GetExtendExpr
)(PreStart
, WideTy
, Depth
),
1440 (SE
->*GetExtendExpr
)(Step
, WideTy
, Depth
));
1441 if ((SE
->*GetExtendExpr
)(Start
, WideTy
, Depth
) == OperandExtendedStart
) {
1442 if (PreAR
&& AR
->getNoWrapFlags(WrapType
)) {
1443 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1444 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1445 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1446 const_cast<SCEVAddRecExpr
*>(PreAR
)->setNoWrapFlags(WrapType
);
1451 // 3. Loop precondition.
1452 ICmpInst::Predicate Pred
;
1453 const SCEV
*OverflowLimit
=
1454 ExtendOpTraits
<ExtendOpTy
>::getOverflowLimitForStep(Step
, &Pred
, SE
);
1456 if (OverflowLimit
&&
1457 SE
->isLoopEntryGuardedByCond(L
, Pred
, PreStart
, OverflowLimit
))
1463 // Get the normalized zero or sign extended expression for this AddRec's Start.
1464 template <typename ExtendOpTy
>
1465 static const SCEV
*getExtendAddRecStart(const SCEVAddRecExpr
*AR
, Type
*Ty
,
1466 ScalarEvolution
*SE
,
1468 auto GetExtendExpr
= ExtendOpTraits
<ExtendOpTy
>::GetExtendExpr
;
1470 const SCEV
*PreStart
= getPreStartForExtend
<ExtendOpTy
>(AR
, Ty
, SE
, Depth
);
1472 return (SE
->*GetExtendExpr
)(AR
->getStart(), Ty
, Depth
);
1474 return SE
->getAddExpr((SE
->*GetExtendExpr
)(AR
->getStepRecurrence(*SE
), Ty
,
1476 (SE
->*GetExtendExpr
)(PreStart
, Ty
, Depth
));
1479 // Try to prove away overflow by looking at "nearby" add recurrences. A
1480 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1481 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1485 // {S,+,X} == {S-T,+,X} + T
1486 // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1488 // If ({S-T,+,X} + T) does not overflow ... (1)
1490 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1492 // If {S-T,+,X} does not overflow ... (2)
1494 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1495 // == {Ext(S-T)+Ext(T),+,Ext(X)}
1497 // If (S-T)+T does not overflow ... (3)
1499 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1500 // == {Ext(S),+,Ext(X)} == LHS
1502 // Thus, if (1), (2) and (3) are true for some T, then
1503 // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1505 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1506 // does not overflow" restricted to the 0th iteration. Therefore we only need
1507 // to check for (1) and (2).
1509 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1510 // is `Delta` (defined below).
1511 template <typename ExtendOpTy
>
1512 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV
*Start
,
1515 auto WrapType
= ExtendOpTraits
<ExtendOpTy
>::WrapType
;
1517 // We restrict `Start` to a constant to prevent SCEV from spending too much
1518 // time here. It is correct (but more expensive) to continue with a
1519 // non-constant `Start` and do a general SCEV subtraction to compute
1520 // `PreStart` below.
1521 const SCEVConstant
*StartC
= dyn_cast
<SCEVConstant
>(Start
);
1525 APInt StartAI
= StartC
->getAPInt();
1527 for (unsigned Delta
: {-2, -1, 1, 2}) {
1528 const SCEV
*PreStart
= getConstant(StartAI
- Delta
);
1530 FoldingSetNodeID ID
;
1531 ID
.AddInteger(scAddRecExpr
);
1532 ID
.AddPointer(PreStart
);
1533 ID
.AddPointer(Step
);
1537 static_cast<SCEVAddRecExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
1539 // Give up if we don't already have the add recurrence we need because
1540 // actually constructing an add recurrence is relatively expensive.
1541 if (PreAR
&& PreAR
->getNoWrapFlags(WrapType
)) { // proves (2)
1542 const SCEV
*DeltaS
= getConstant(StartC
->getType(), Delta
);
1543 ICmpInst::Predicate Pred
= ICmpInst::BAD_ICMP_PREDICATE
;
1544 const SCEV
*Limit
= ExtendOpTraits
<ExtendOpTy
>::getOverflowLimitForStep(
1545 DeltaS
, &Pred
, this);
1546 if (Limit
&& isKnownPredicate(Pred
, PreAR
, Limit
)) // proves (1)
1554 // Finds an integer D for an expression (C + x + y + ...) such that the top
1555 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
1556 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
1557 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
1558 // the (C + x + y + ...) expression is \p WholeAddExpr.
1559 static APInt
extractConstantWithoutWrapping(ScalarEvolution
&SE
,
1560 const SCEVConstant
*ConstantTerm
,
1561 const SCEVAddExpr
*WholeAddExpr
) {
1562 const APInt C
= ConstantTerm
->getAPInt();
1563 const unsigned BitWidth
= C
.getBitWidth();
1564 // Find number of trailing zeros of (x + y + ...) w/o the C first:
1565 uint32_t TZ
= BitWidth
;
1566 for (unsigned I
= 1, E
= WholeAddExpr
->getNumOperands(); I
< E
&& TZ
; ++I
)
1567 TZ
= std::min(TZ
, SE
.GetMinTrailingZeros(WholeAddExpr
->getOperand(I
)));
1569 // Set D to be as many least significant bits of C as possible while still
1570 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
1571 return TZ
< BitWidth
? C
.trunc(TZ
).zext(BitWidth
) : C
;
1573 return APInt(BitWidth
, 0);
1576 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top
1577 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
1578 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
1579 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
1580 static APInt
extractConstantWithoutWrapping(ScalarEvolution
&SE
,
1581 const APInt
&ConstantStart
,
1583 const unsigned BitWidth
= ConstantStart
.getBitWidth();
1584 const uint32_t TZ
= SE
.GetMinTrailingZeros(Step
);
1586 return TZ
< BitWidth
? ConstantStart
.trunc(TZ
).zext(BitWidth
)
1588 return APInt(BitWidth
, 0);
1592 ScalarEvolution::getZeroExtendExpr(const SCEV
*Op
, Type
*Ty
, unsigned Depth
) {
1593 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
1594 "This is not an extending conversion!");
1595 assert(isSCEVable(Ty
) &&
1596 "This is not a conversion to a SCEVable type!");
1597 Ty
= getEffectiveSCEVType(Ty
);
1599 // Fold if the operand is constant.
1600 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1602 cast
<ConstantInt
>(ConstantExpr::getZExt(SC
->getValue(), Ty
)));
1604 // zext(zext(x)) --> zext(x)
1605 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
1606 return getZeroExtendExpr(SZ
->getOperand(), Ty
, Depth
+ 1);
1608 // Before doing any expensive analysis, check to see if we've already
1609 // computed a SCEV for this Op and Ty.
1610 FoldingSetNodeID ID
;
1611 ID
.AddInteger(scZeroExtend
);
1615 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1616 if (Depth
> MaxExtDepth
) {
1617 SCEV
*S
= new (SCEVAllocator
) SCEVZeroExtendExpr(ID
.Intern(SCEVAllocator
),
1619 UniqueSCEVs
.InsertNode(S
, IP
);
1620 addToLoopUseLists(S
);
1624 // zext(trunc(x)) --> zext(x) or x or trunc(x)
1625 if (const SCEVTruncateExpr
*ST
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
1626 // It's possible the bits taken off by the truncate were all zero bits. If
1627 // so, we should be able to simplify this further.
1628 const SCEV
*X
= ST
->getOperand();
1629 ConstantRange CR
= getUnsignedRange(X
);
1630 unsigned TruncBits
= getTypeSizeInBits(ST
->getType());
1631 unsigned NewBits
= getTypeSizeInBits(Ty
);
1632 if (CR
.truncate(TruncBits
).zeroExtend(NewBits
).contains(
1633 CR
.zextOrTrunc(NewBits
)))
1634 return getTruncateOrZeroExtend(X
, Ty
);
1637 // If the input value is a chrec scev, and we can prove that the value
1638 // did not overflow the old, smaller, value, we can zero extend all of the
1639 // operands (often constants). This allows analysis of something like
1640 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1641 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
))
1642 if (AR
->isAffine()) {
1643 const SCEV
*Start
= AR
->getStart();
1644 const SCEV
*Step
= AR
->getStepRecurrence(*this);
1645 unsigned BitWidth
= getTypeSizeInBits(AR
->getType());
1646 const Loop
*L
= AR
->getLoop();
1648 if (!AR
->hasNoUnsignedWrap()) {
1649 auto NewFlags
= proveNoWrapViaConstantRanges(AR
);
1650 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(NewFlags
);
1653 // If we have special knowledge that this addrec won't overflow,
1654 // we don't need to do any further analysis.
1655 if (AR
->hasNoUnsignedWrap())
1656 return getAddRecExpr(
1657 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
1658 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
, AR
->getNoWrapFlags());
1660 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1661 // Note that this serves two purposes: It filters out loops that are
1662 // simply not analyzable, and it covers the case where this code is
1663 // being called from within backedge-taken count analysis, such that
1664 // attempting to ask for the backedge-taken count would likely result
1665 // in infinite recursion. In the later case, the analysis code will
1666 // cope with a conservative value, and it will take care to purge
1667 // that value once it has finished.
1668 const SCEV
*MaxBECount
= getMaxBackedgeTakenCount(L
);
1669 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
)) {
1670 // Manually compute the final value for AR, checking for
1673 // Check whether the backedge-taken count can be losslessly casted to
1674 // the addrec's type. The count is always unsigned.
1675 const SCEV
*CastedMaxBECount
=
1676 getTruncateOrZeroExtend(MaxBECount
, Start
->getType());
1677 const SCEV
*RecastedMaxBECount
=
1678 getTruncateOrZeroExtend(CastedMaxBECount
, MaxBECount
->getType());
1679 if (MaxBECount
== RecastedMaxBECount
) {
1680 Type
*WideTy
= IntegerType::get(getContext(), BitWidth
* 2);
1681 // Check whether Start+Step*MaxBECount has no unsigned overflow.
1682 const SCEV
*ZMul
= getMulExpr(CastedMaxBECount
, Step
,
1683 SCEV::FlagAnyWrap
, Depth
+ 1);
1684 const SCEV
*ZAdd
= getZeroExtendExpr(getAddExpr(Start
, ZMul
,
1688 const SCEV
*WideStart
= getZeroExtendExpr(Start
, WideTy
, Depth
+ 1);
1689 const SCEV
*WideMaxBECount
=
1690 getZeroExtendExpr(CastedMaxBECount
, WideTy
, Depth
+ 1);
1691 const SCEV
*OperandExtendedAdd
=
1692 getAddExpr(WideStart
,
1693 getMulExpr(WideMaxBECount
,
1694 getZeroExtendExpr(Step
, WideTy
, Depth
+ 1),
1695 SCEV::FlagAnyWrap
, Depth
+ 1),
1696 SCEV::FlagAnyWrap
, Depth
+ 1);
1697 if (ZAdd
== OperandExtendedAdd
) {
1698 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1699 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNUW
);
1700 // Return the expression with the addrec on the outside.
1701 return getAddRecExpr(
1702 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1704 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
,
1705 AR
->getNoWrapFlags());
1707 // Similar to above, only this time treat the step value as signed.
1708 // This covers loops that count down.
1709 OperandExtendedAdd
=
1710 getAddExpr(WideStart
,
1711 getMulExpr(WideMaxBECount
,
1712 getSignExtendExpr(Step
, WideTy
, Depth
+ 1),
1713 SCEV::FlagAnyWrap
, Depth
+ 1),
1714 SCEV::FlagAnyWrap
, Depth
+ 1);
1715 if (ZAdd
== OperandExtendedAdd
) {
1716 // Cache knowledge of AR NW, which is propagated to this AddRec.
1717 // Negative step causes unsigned wrap, but it still can't self-wrap.
1718 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNW
);
1719 // Return the expression with the addrec on the outside.
1720 return getAddRecExpr(
1721 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1723 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
,
1724 AR
->getNoWrapFlags());
1729 // Normally, in the cases we can prove no-overflow via a
1730 // backedge guarding condition, we can also compute a backedge
1731 // taken count for the loop. The exceptions are assumptions and
1732 // guards present in the loop -- SCEV is not great at exploiting
1733 // these to compute max backedge taken counts, but can still use
1734 // these to prove lack of overflow. Use this fact to avoid
1735 // doing extra work that may not pay off.
1736 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
) || HasGuards
||
1737 !AC
.assumptions().empty()) {
1738 // If the backedge is guarded by a comparison with the pre-inc
1739 // value the addrec is safe. Also, if the entry is guarded by
1740 // a comparison with the start value and the backedge is
1741 // guarded by a comparison with the post-inc value, the addrec
1743 if (isKnownPositive(Step
)) {
1744 const SCEV
*N
= getConstant(APInt::getMinValue(BitWidth
) -
1745 getUnsignedRangeMax(Step
));
1746 if (isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_ULT
, AR
, N
) ||
1747 isKnownOnEveryIteration(ICmpInst::ICMP_ULT
, AR
, N
)) {
1748 // Cache knowledge of AR NUW, which is propagated to this
1750 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNUW
);
1751 // Return the expression with the addrec on the outside.
1752 return getAddRecExpr(
1753 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1755 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
,
1756 AR
->getNoWrapFlags());
1758 } else if (isKnownNegative(Step
)) {
1759 const SCEV
*N
= getConstant(APInt::getMaxValue(BitWidth
) -
1760 getSignedRangeMin(Step
));
1761 if (isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_UGT
, AR
, N
) ||
1762 isKnownOnEveryIteration(ICmpInst::ICMP_UGT
, AR
, N
)) {
1763 // Cache knowledge of AR NW, which is propagated to this
1764 // AddRec. Negative step causes unsigned wrap, but it
1765 // still can't self-wrap.
1766 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNW
);
1767 // Return the expression with the addrec on the outside.
1768 return getAddRecExpr(
1769 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1771 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
,
1772 AR
->getNoWrapFlags());
1777 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
1778 // if D + (C - D + Step * n) could be proven to not unsigned wrap
1779 // where D maximizes the number of trailing zeros of (C - D + Step * n)
1780 if (const auto *SC
= dyn_cast
<SCEVConstant
>(Start
)) {
1781 const APInt
&C
= SC
->getAPInt();
1782 const APInt
&D
= extractConstantWithoutWrapping(*this, C
, Step
);
1784 const SCEV
*SZExtD
= getZeroExtendExpr(getConstant(D
), Ty
, Depth
);
1785 const SCEV
*SResidual
=
1786 getAddRecExpr(getConstant(C
- D
), Step
, L
, AR
->getNoWrapFlags());
1787 const SCEV
*SZExtR
= getZeroExtendExpr(SResidual
, Ty
, Depth
+ 1);
1788 return getAddExpr(SZExtD
, SZExtR
,
1789 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
1794 if (proveNoWrapByVaryingStart
<SCEVZeroExtendExpr
>(Start
, Step
, L
)) {
1795 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNUW
);
1796 return getAddRecExpr(
1797 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
1798 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
, AR
->getNoWrapFlags());
1802 // zext(A % B) --> zext(A) % zext(B)
1806 if (matchURem(Op
, LHS
, RHS
))
1807 return getURemExpr(getZeroExtendExpr(LHS
, Ty
, Depth
+ 1),
1808 getZeroExtendExpr(RHS
, Ty
, Depth
+ 1));
1811 // zext(A / B) --> zext(A) / zext(B).
1812 if (auto *Div
= dyn_cast
<SCEVUDivExpr
>(Op
))
1813 return getUDivExpr(getZeroExtendExpr(Div
->getLHS(), Ty
, Depth
+ 1),
1814 getZeroExtendExpr(Div
->getRHS(), Ty
, Depth
+ 1));
1816 if (auto *SA
= dyn_cast
<SCEVAddExpr
>(Op
)) {
1817 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
1818 if (SA
->hasNoUnsignedWrap()) {
1819 // If the addition does not unsign overflow then we can, by definition,
1820 // commute the zero extension with the addition operation.
1821 SmallVector
<const SCEV
*, 4> Ops
;
1822 for (const auto *Op
: SA
->operands())
1823 Ops
.push_back(getZeroExtendExpr(Op
, Ty
, Depth
+ 1));
1824 return getAddExpr(Ops
, SCEV::FlagNUW
, Depth
+ 1);
1827 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
1828 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
1829 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1831 // Often address arithmetics contain expressions like
1832 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
1833 // This transformation is useful while proving that such expressions are
1834 // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
1835 if (const auto *SC
= dyn_cast
<SCEVConstant
>(SA
->getOperand(0))) {
1836 const APInt
&D
= extractConstantWithoutWrapping(*this, SC
, SA
);
1838 const SCEV
*SZExtD
= getZeroExtendExpr(getConstant(D
), Ty
, Depth
);
1839 const SCEV
*SResidual
=
1840 getAddExpr(getConstant(-D
), SA
, SCEV::FlagAnyWrap
, Depth
);
1841 const SCEV
*SZExtR
= getZeroExtendExpr(SResidual
, Ty
, Depth
+ 1);
1842 return getAddExpr(SZExtD
, SZExtR
,
1843 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
1849 if (auto *SM
= dyn_cast
<SCEVMulExpr
>(Op
)) {
1850 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw>
1851 if (SM
->hasNoUnsignedWrap()) {
1852 // If the multiply does not unsign overflow then we can, by definition,
1853 // commute the zero extension with the multiply operation.
1854 SmallVector
<const SCEV
*, 4> Ops
;
1855 for (const auto *Op
: SM
->operands())
1856 Ops
.push_back(getZeroExtendExpr(Op
, Ty
, Depth
+ 1));
1857 return getMulExpr(Ops
, SCEV::FlagNUW
, Depth
+ 1);
1860 // zext(2^K * (trunc X to iN)) to iM ->
1861 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
1865 // zext(2^K * (trunc X to iN)) to iM
1866 // = zext((trunc X to iN) << K) to iM
1867 // = zext((trunc X to i{N-K}) << K)<nuw> to iM
1868 // (because shl removes the top K bits)
1869 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
1870 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
1872 if (SM
->getNumOperands() == 2)
1873 if (auto *MulLHS
= dyn_cast
<SCEVConstant
>(SM
->getOperand(0)))
1874 if (MulLHS
->getAPInt().isPowerOf2())
1875 if (auto *TruncRHS
= dyn_cast
<SCEVTruncateExpr
>(SM
->getOperand(1))) {
1876 int NewTruncBits
= getTypeSizeInBits(TruncRHS
->getType()) -
1877 MulLHS
->getAPInt().logBase2();
1878 Type
*NewTruncTy
= IntegerType::get(getContext(), NewTruncBits
);
1880 getZeroExtendExpr(MulLHS
, Ty
),
1882 getTruncateExpr(TruncRHS
->getOperand(), NewTruncTy
), Ty
),
1883 SCEV::FlagNUW
, Depth
+ 1);
1887 // The cast wasn't folded; create an explicit cast node.
1888 // Recompute the insert position, as it may have been invalidated.
1889 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1890 SCEV
*S
= new (SCEVAllocator
) SCEVZeroExtendExpr(ID
.Intern(SCEVAllocator
),
1892 UniqueSCEVs
.InsertNode(S
, IP
);
1893 addToLoopUseLists(S
);
1898 ScalarEvolution::getSignExtendExpr(const SCEV
*Op
, Type
*Ty
, unsigned Depth
) {
1899 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
1900 "This is not an extending conversion!");
1901 assert(isSCEVable(Ty
) &&
1902 "This is not a conversion to a SCEVable type!");
1903 Ty
= getEffectiveSCEVType(Ty
);
1905 // Fold if the operand is constant.
1906 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1908 cast
<ConstantInt
>(ConstantExpr::getSExt(SC
->getValue(), Ty
)));
1910 // sext(sext(x)) --> sext(x)
1911 if (const SCEVSignExtendExpr
*SS
= dyn_cast
<SCEVSignExtendExpr
>(Op
))
1912 return getSignExtendExpr(SS
->getOperand(), Ty
, Depth
+ 1);
1914 // sext(zext(x)) --> zext(x)
1915 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
1916 return getZeroExtendExpr(SZ
->getOperand(), Ty
, Depth
+ 1);
1918 // Before doing any expensive analysis, check to see if we've already
1919 // computed a SCEV for this Op and Ty.
1920 FoldingSetNodeID ID
;
1921 ID
.AddInteger(scSignExtend
);
1925 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1926 // Limit recursion depth.
1927 if (Depth
> MaxExtDepth
) {
1928 SCEV
*S
= new (SCEVAllocator
) SCEVSignExtendExpr(ID
.Intern(SCEVAllocator
),
1930 UniqueSCEVs
.InsertNode(S
, IP
);
1931 addToLoopUseLists(S
);
1935 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1936 if (const SCEVTruncateExpr
*ST
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
1937 // It's possible the bits taken off by the truncate were all sign bits. If
1938 // so, we should be able to simplify this further.
1939 const SCEV
*X
= ST
->getOperand();
1940 ConstantRange CR
= getSignedRange(X
);
1941 unsigned TruncBits
= getTypeSizeInBits(ST
->getType());
1942 unsigned NewBits
= getTypeSizeInBits(Ty
);
1943 if (CR
.truncate(TruncBits
).signExtend(NewBits
).contains(
1944 CR
.sextOrTrunc(NewBits
)))
1945 return getTruncateOrSignExtend(X
, Ty
);
1948 if (auto *SA
= dyn_cast
<SCEVAddExpr
>(Op
)) {
1949 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
1950 if (SA
->hasNoSignedWrap()) {
1951 // If the addition does not sign overflow then we can, by definition,
1952 // commute the sign extension with the addition operation.
1953 SmallVector
<const SCEV
*, 4> Ops
;
1954 for (const auto *Op
: SA
->operands())
1955 Ops
.push_back(getSignExtendExpr(Op
, Ty
, Depth
+ 1));
1956 return getAddExpr(Ops
, SCEV::FlagNSW
, Depth
+ 1);
1959 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
1960 // if D + (C - D + x + y + ...) could be proven to not signed wrap
1961 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1963 // For instance, this will bring two seemingly different expressions:
1964 // 1 + sext(5 + 20 * %x + 24 * %y) and
1965 // sext(6 + 20 * %x + 24 * %y)
1966 // to the same form:
1967 // 2 + sext(4 + 20 * %x + 24 * %y)
1968 if (const auto *SC
= dyn_cast
<SCEVConstant
>(SA
->getOperand(0))) {
1969 const APInt
&D
= extractConstantWithoutWrapping(*this, SC
, SA
);
1971 const SCEV
*SSExtD
= getSignExtendExpr(getConstant(D
), Ty
, Depth
);
1972 const SCEV
*SResidual
=
1973 getAddExpr(getConstant(-D
), SA
, SCEV::FlagAnyWrap
, Depth
);
1974 const SCEV
*SSExtR
= getSignExtendExpr(SResidual
, Ty
, Depth
+ 1);
1975 return getAddExpr(SSExtD
, SSExtR
,
1976 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
1981 // If the input value is a chrec scev, and we can prove that the value
1982 // did not overflow the old, smaller, value, we can sign extend all of the
1983 // operands (often constants). This allows analysis of something like
1984 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
1985 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
))
1986 if (AR
->isAffine()) {
1987 const SCEV
*Start
= AR
->getStart();
1988 const SCEV
*Step
= AR
->getStepRecurrence(*this);
1989 unsigned BitWidth
= getTypeSizeInBits(AR
->getType());
1990 const Loop
*L
= AR
->getLoop();
1992 if (!AR
->hasNoSignedWrap()) {
1993 auto NewFlags
= proveNoWrapViaConstantRanges(AR
);
1994 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(NewFlags
);
1997 // If we have special knowledge that this addrec won't overflow,
1998 // we don't need to do any further analysis.
1999 if (AR
->hasNoSignedWrap())
2000 return getAddRecExpr(
2001 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
2002 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
, SCEV::FlagNSW
);
2004 // Check whether the backedge-taken count is SCEVCouldNotCompute.
2005 // Note that this serves two purposes: It filters out loops that are
2006 // simply not analyzable, and it covers the case where this code is
2007 // being called from within backedge-taken count analysis, such that
2008 // attempting to ask for the backedge-taken count would likely result
2009 // in infinite recursion. In the later case, the analysis code will
2010 // cope with a conservative value, and it will take care to purge
2011 // that value once it has finished.
2012 const SCEV
*MaxBECount
= getMaxBackedgeTakenCount(L
);
2013 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
)) {
2014 // Manually compute the final value for AR, checking for
2017 // Check whether the backedge-taken count can be losslessly casted to
2018 // the addrec's type. The count is always unsigned.
2019 const SCEV
*CastedMaxBECount
=
2020 getTruncateOrZeroExtend(MaxBECount
, Start
->getType());
2021 const SCEV
*RecastedMaxBECount
=
2022 getTruncateOrZeroExtend(CastedMaxBECount
, MaxBECount
->getType());
2023 if (MaxBECount
== RecastedMaxBECount
) {
2024 Type
*WideTy
= IntegerType::get(getContext(), BitWidth
* 2);
2025 // Check whether Start+Step*MaxBECount has no signed overflow.
2026 const SCEV
*SMul
= getMulExpr(CastedMaxBECount
, Step
,
2027 SCEV::FlagAnyWrap
, Depth
+ 1);
2028 const SCEV
*SAdd
= getSignExtendExpr(getAddExpr(Start
, SMul
,
2032 const SCEV
*WideStart
= getSignExtendExpr(Start
, WideTy
, Depth
+ 1);
2033 const SCEV
*WideMaxBECount
=
2034 getZeroExtendExpr(CastedMaxBECount
, WideTy
, Depth
+ 1);
2035 const SCEV
*OperandExtendedAdd
=
2036 getAddExpr(WideStart
,
2037 getMulExpr(WideMaxBECount
,
2038 getSignExtendExpr(Step
, WideTy
, Depth
+ 1),
2039 SCEV::FlagAnyWrap
, Depth
+ 1),
2040 SCEV::FlagAnyWrap
, Depth
+ 1);
2041 if (SAdd
== OperandExtendedAdd
) {
2042 // Cache knowledge of AR NSW, which is propagated to this AddRec.
2043 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNSW
);
2044 // Return the expression with the addrec on the outside.
2045 return getAddRecExpr(
2046 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this,
2048 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
,
2049 AR
->getNoWrapFlags());
2051 // Similar to above, only this time treat the step value as unsigned.
2052 // This covers loops that count up with an unsigned step.
2053 OperandExtendedAdd
=
2054 getAddExpr(WideStart
,
2055 getMulExpr(WideMaxBECount
,
2056 getZeroExtendExpr(Step
, WideTy
, Depth
+ 1),
2057 SCEV::FlagAnyWrap
, Depth
+ 1),
2058 SCEV::FlagAnyWrap
, Depth
+ 1);
2059 if (SAdd
== OperandExtendedAdd
) {
2060 // If AR wraps around then
2062 // abs(Step) * MaxBECount > unsigned-max(AR->getType())
2063 // => SAdd != OperandExtendedAdd
2065 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
2066 // (SAdd == OperandExtendedAdd => AR is NW)
2068 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNW
);
2070 // Return the expression with the addrec on the outside.
2071 return getAddRecExpr(
2072 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this,
2074 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
,
2075 AR
->getNoWrapFlags());
2080 // Normally, in the cases we can prove no-overflow via a
2081 // backedge guarding condition, we can also compute a backedge
2082 // taken count for the loop. The exceptions are assumptions and
2083 // guards present in the loop -- SCEV is not great at exploiting
2084 // these to compute max backedge taken counts, but can still use
2085 // these to prove lack of overflow. Use this fact to avoid
2086 // doing extra work that may not pay off.
2088 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
) || HasGuards
||
2089 !AC
.assumptions().empty()) {
2090 // If the backedge is guarded by a comparison with the pre-inc
2091 // value the addrec is safe. Also, if the entry is guarded by
2092 // a comparison with the start value and the backedge is
2093 // guarded by a comparison with the post-inc value, the addrec
2095 ICmpInst::Predicate Pred
;
2096 const SCEV
*OverflowLimit
=
2097 getSignedOverflowLimitForStep(Step
, &Pred
, this);
2098 if (OverflowLimit
&&
2099 (isLoopBackedgeGuardedByCond(L
, Pred
, AR
, OverflowLimit
) ||
2100 isKnownOnEveryIteration(Pred
, AR
, OverflowLimit
))) {
2101 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
2102 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNSW
);
2103 return getAddRecExpr(
2104 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
2105 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
, AR
->getNoWrapFlags());
2109 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
2110 // if D + (C - D + Step * n) could be proven to not signed wrap
2111 // where D maximizes the number of trailing zeros of (C - D + Step * n)
2112 if (const auto *SC
= dyn_cast
<SCEVConstant
>(Start
)) {
2113 const APInt
&C
= SC
->getAPInt();
2114 const APInt
&D
= extractConstantWithoutWrapping(*this, C
, Step
);
2116 const SCEV
*SSExtD
= getSignExtendExpr(getConstant(D
), Ty
, Depth
);
2117 const SCEV
*SResidual
=
2118 getAddRecExpr(getConstant(C
- D
), Step
, L
, AR
->getNoWrapFlags());
2119 const SCEV
*SSExtR
= getSignExtendExpr(SResidual
, Ty
, Depth
+ 1);
2120 return getAddExpr(SSExtD
, SSExtR
,
2121 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
2126 if (proveNoWrapByVaryingStart
<SCEVSignExtendExpr
>(Start
, Step
, L
)) {
2127 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNSW
);
2128 return getAddRecExpr(
2129 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
2130 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
, AR
->getNoWrapFlags());
2134 // If the input value is provably positive and we could not simplify
2135 // away the sext build a zext instead.
2136 if (isKnownNonNegative(Op
))
2137 return getZeroExtendExpr(Op
, Ty
, Depth
+ 1);
2139 // The cast wasn't folded; create an explicit cast node.
2140 // Recompute the insert position, as it may have been invalidated.
2141 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
2142 SCEV
*S
= new (SCEVAllocator
) SCEVSignExtendExpr(ID
.Intern(SCEVAllocator
),
2144 UniqueSCEVs
.InsertNode(S
, IP
);
2145 addToLoopUseLists(S
);
2149 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
2150 /// unspecified bits out to the given type.
2151 const SCEV
*ScalarEvolution::getAnyExtendExpr(const SCEV
*Op
,
2153 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
2154 "This is not an extending conversion!");
2155 assert(isSCEVable(Ty
) &&
2156 "This is not a conversion to a SCEVable type!");
2157 Ty
= getEffectiveSCEVType(Ty
);
2159 // Sign-extend negative constants.
2160 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
2161 if (SC
->getAPInt().isNegative())
2162 return getSignExtendExpr(Op
, Ty
);
2164 // Peel off a truncate cast.
2165 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
2166 const SCEV
*NewOp
= T
->getOperand();
2167 if (getTypeSizeInBits(NewOp
->getType()) < getTypeSizeInBits(Ty
))
2168 return getAnyExtendExpr(NewOp
, Ty
);
2169 return getTruncateOrNoop(NewOp
, Ty
);
2172 // Next try a zext cast. If the cast is folded, use it.
2173 const SCEV
*ZExt
= getZeroExtendExpr(Op
, Ty
);
2174 if (!isa
<SCEVZeroExtendExpr
>(ZExt
))
2177 // Next try a sext cast. If the cast is folded, use it.
2178 const SCEV
*SExt
= getSignExtendExpr(Op
, Ty
);
2179 if (!isa
<SCEVSignExtendExpr
>(SExt
))
2182 // Force the cast to be folded into the operands of an addrec.
2183 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
)) {
2184 SmallVector
<const SCEV
*, 4> Ops
;
2185 for (const SCEV
*Op
: AR
->operands())
2186 Ops
.push_back(getAnyExtendExpr(Op
, Ty
));
2187 return getAddRecExpr(Ops
, AR
->getLoop(), SCEV::FlagNW
);
2190 // If the expression is obviously signed, use the sext cast value.
2191 if (isa
<SCEVSMaxExpr
>(Op
))
2194 // Absent any other information, use the zext cast value.
2198 /// Process the given Ops list, which is a list of operands to be added under
2199 /// the given scale, update the given map. This is a helper function for
2200 /// getAddRecExpr. As an example of what it does, given a sequence of operands
2201 /// that would form an add expression like this:
2203 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
2205 /// where A and B are constants, update the map with these values:
2207 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
2209 /// and add 13 + A*B*29 to AccumulatedConstant.
2210 /// This will allow getAddRecExpr to produce this:
2212 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
2214 /// This form often exposes folding opportunities that are hidden in
2215 /// the original operand list.
2217 /// Return true iff it appears that any interesting folding opportunities
2218 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
2219 /// the common case where no interesting opportunities are present, and
2220 /// is also used as a check to avoid infinite recursion.
2222 CollectAddOperandsWithScales(DenseMap
<const SCEV
*, APInt
> &M
,
2223 SmallVectorImpl
<const SCEV
*> &NewOps
,
2224 APInt
&AccumulatedConstant
,
2225 const SCEV
*const *Ops
, size_t NumOperands
,
2227 ScalarEvolution
&SE
) {
2228 bool Interesting
= false;
2230 // Iterate over the add operands. They are sorted, with constants first.
2232 while (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Ops
[i
])) {
2234 // Pull a buried constant out to the outside.
2235 if (Scale
!= 1 || AccumulatedConstant
!= 0 || C
->getValue()->isZero())
2237 AccumulatedConstant
+= Scale
* C
->getAPInt();
2240 // Next comes everything else. We're especially interested in multiplies
2241 // here, but they're in the middle, so just visit the rest with one loop.
2242 for (; i
!= NumOperands
; ++i
) {
2243 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[i
]);
2244 if (Mul
&& isa
<SCEVConstant
>(Mul
->getOperand(0))) {
2246 Scale
* cast
<SCEVConstant
>(Mul
->getOperand(0))->getAPInt();
2247 if (Mul
->getNumOperands() == 2 && isa
<SCEVAddExpr
>(Mul
->getOperand(1))) {
2248 // A multiplication of a constant with another add; recurse.
2249 const SCEVAddExpr
*Add
= cast
<SCEVAddExpr
>(Mul
->getOperand(1));
2251 CollectAddOperandsWithScales(M
, NewOps
, AccumulatedConstant
,
2252 Add
->op_begin(), Add
->getNumOperands(),
2255 // A multiplication of a constant with some other value. Update
2257 SmallVector
<const SCEV
*, 4> MulOps(Mul
->op_begin()+1, Mul
->op_end());
2258 const SCEV
*Key
= SE
.getMulExpr(MulOps
);
2259 auto Pair
= M
.insert({Key
, NewScale
});
2261 NewOps
.push_back(Pair
.first
->first
);
2263 Pair
.first
->second
+= NewScale
;
2264 // The map already had an entry for this value, which may indicate
2265 // a folding opportunity.
2270 // An ordinary operand. Update the map.
2271 std::pair
<DenseMap
<const SCEV
*, APInt
>::iterator
, bool> Pair
=
2272 M
.insert({Ops
[i
], Scale
});
2274 NewOps
.push_back(Pair
.first
->first
);
2276 Pair
.first
->second
+= Scale
;
2277 // The map already had an entry for this value, which may indicate
2278 // a folding opportunity.
2287 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
2288 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
2289 // can't-overflow flags for the operation if possible.
2290 static SCEV::NoWrapFlags
2291 StrengthenNoWrapFlags(ScalarEvolution
*SE
, SCEVTypes Type
,
2292 const SmallVectorImpl
<const SCEV
*> &Ops
,
2293 SCEV::NoWrapFlags Flags
) {
2294 using namespace std::placeholders
;
2296 using OBO
= OverflowingBinaryOperator
;
2299 Type
== scAddExpr
|| Type
== scAddRecExpr
|| Type
== scMulExpr
;
2301 assert(CanAnalyze
&& "don't call from other places!");
2303 int SignOrUnsignMask
= SCEV::FlagNUW
| SCEV::FlagNSW
;
2304 SCEV::NoWrapFlags SignOrUnsignWrap
=
2305 ScalarEvolution::maskFlags(Flags
, SignOrUnsignMask
);
2307 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2308 auto IsKnownNonNegative
= [&](const SCEV
*S
) {
2309 return SE
->isKnownNonNegative(S
);
2312 if (SignOrUnsignWrap
== SCEV::FlagNSW
&& all_of(Ops
, IsKnownNonNegative
))
2314 ScalarEvolution::setFlags(Flags
, (SCEV::NoWrapFlags
)SignOrUnsignMask
);
2316 SignOrUnsignWrap
= ScalarEvolution::maskFlags(Flags
, SignOrUnsignMask
);
2318 if (SignOrUnsignWrap
!= SignOrUnsignMask
&&
2319 (Type
== scAddExpr
|| Type
== scMulExpr
) && Ops
.size() == 2 &&
2320 isa
<SCEVConstant
>(Ops
[0])) {
2325 return Instruction::Add
;
2327 return Instruction::Mul
;
2329 llvm_unreachable("Unexpected SCEV op.");
2333 const APInt
&C
= cast
<SCEVConstant
>(Ops
[0])->getAPInt();
2335 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow.
2336 if (!(SignOrUnsignWrap
& SCEV::FlagNSW
)) {
2337 auto NSWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
2338 Opcode
, C
, OBO::NoSignedWrap
);
2339 if (NSWRegion
.contains(SE
->getSignedRange(Ops
[1])))
2340 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNSW
);
2343 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow.
2344 if (!(SignOrUnsignWrap
& SCEV::FlagNUW
)) {
2345 auto NUWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
2346 Opcode
, C
, OBO::NoUnsignedWrap
);
2347 if (NUWRegion
.contains(SE
->getUnsignedRange(Ops
[1])))
2348 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNUW
);
2355 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV
*S
, const Loop
*L
) {
2356 return isLoopInvariant(S
, L
) && properlyDominates(S
, L
->getHeader());
2359 /// Get a canonical add expression, or something simpler if possible.
2360 const SCEV
*ScalarEvolution::getAddExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
2361 SCEV::NoWrapFlags Flags
,
2363 assert(!(Flags
& ~(SCEV::FlagNUW
| SCEV::FlagNSW
)) &&
2364 "only nuw or nsw allowed");
2365 assert(!Ops
.empty() && "Cannot get empty add!");
2366 if (Ops
.size() == 1) return Ops
[0];
2368 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
2369 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
2370 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
2371 "SCEVAddExpr operand types don't match!");
2374 // Sort by complexity, this groups all similar expression types together.
2375 GroupByComplexity(Ops
, &LI
, DT
);
2377 Flags
= StrengthenNoWrapFlags(this, scAddExpr
, Ops
, Flags
);
2379 // If there are any constants, fold them together.
2381 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
2383 assert(Idx
< Ops
.size());
2384 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
2385 // We found two constants, fold them together!
2386 Ops
[0] = getConstant(LHSC
->getAPInt() + RHSC
->getAPInt());
2387 if (Ops
.size() == 2) return Ops
[0];
2388 Ops
.erase(Ops
.begin()+1); // Erase the folded element
2389 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
2392 // If we are left with a constant zero being added, strip it off.
2393 if (LHSC
->getValue()->isZero()) {
2394 Ops
.erase(Ops
.begin());
2398 if (Ops
.size() == 1) return Ops
[0];
2401 // Limit recursion calls depth.
2402 if (Depth
> MaxArithDepth
)
2403 return getOrCreateAddExpr(Ops
, Flags
);
2405 // Okay, check to see if the same value occurs in the operand list more than
2406 // once. If so, merge them together into an multiply expression. Since we
2407 // sorted the list, these values are required to be adjacent.
2408 Type
*Ty
= Ops
[0]->getType();
2409 bool FoundMatch
= false;
2410 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
-1; ++i
)
2411 if (Ops
[i
] == Ops
[i
+1]) { // X + Y + Y --> X + Y*2
2412 // Scan ahead to count how many equal operands there are.
2414 while (i
+Count
!= e
&& Ops
[i
+Count
] == Ops
[i
])
2416 // Merge the values into a multiply.
2417 const SCEV
*Scale
= getConstant(Ty
, Count
);
2418 const SCEV
*Mul
= getMulExpr(Scale
, Ops
[i
], SCEV::FlagAnyWrap
, Depth
+ 1);
2419 if (Ops
.size() == Count
)
2422 Ops
.erase(Ops
.begin()+i
+1, Ops
.begin()+i
+Count
);
2423 --i
; e
-= Count
- 1;
2427 return getAddExpr(Ops
, Flags
, Depth
+ 1);
2429 // Check for truncates. If all the operands are truncated from the same
2430 // type, see if factoring out the truncate would permit the result to be
2431 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y)
2432 // if the contents of the resulting outer trunc fold to something simple.
2433 auto FindTruncSrcType
= [&]() -> Type
* {
2434 // We're ultimately looking to fold an addrec of truncs and muls of only
2435 // constants and truncs, so if we find any other types of SCEV
2436 // as operands of the addrec then we bail and return nullptr here.
2437 // Otherwise, we return the type of the operand of a trunc that we find.
2438 if (auto *T
= dyn_cast
<SCEVTruncateExpr
>(Ops
[Idx
]))
2439 return T
->getOperand()->getType();
2440 if (const auto *Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[Idx
])) {
2441 const auto *LastOp
= Mul
->getOperand(Mul
->getNumOperands() - 1);
2442 if (const auto *T
= dyn_cast
<SCEVTruncateExpr
>(LastOp
))
2443 return T
->getOperand()->getType();
2447 if (auto *SrcType
= FindTruncSrcType()) {
2448 SmallVector
<const SCEV
*, 8> LargeOps
;
2450 // Check all the operands to see if they can be represented in the
2451 // source type of the truncate.
2452 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
2453 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(Ops
[i
])) {
2454 if (T
->getOperand()->getType() != SrcType
) {
2458 LargeOps
.push_back(T
->getOperand());
2459 } else if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Ops
[i
])) {
2460 LargeOps
.push_back(getAnyExtendExpr(C
, SrcType
));
2461 } else if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(Ops
[i
])) {
2462 SmallVector
<const SCEV
*, 8> LargeMulOps
;
2463 for (unsigned j
= 0, f
= M
->getNumOperands(); j
!= f
&& Ok
; ++j
) {
2464 if (const SCEVTruncateExpr
*T
=
2465 dyn_cast
<SCEVTruncateExpr
>(M
->getOperand(j
))) {
2466 if (T
->getOperand()->getType() != SrcType
) {
2470 LargeMulOps
.push_back(T
->getOperand());
2471 } else if (const auto *C
= dyn_cast
<SCEVConstant
>(M
->getOperand(j
))) {
2472 LargeMulOps
.push_back(getAnyExtendExpr(C
, SrcType
));
2479 LargeOps
.push_back(getMulExpr(LargeMulOps
, SCEV::FlagAnyWrap
, Depth
+ 1));
2486 // Evaluate the expression in the larger type.
2487 const SCEV
*Fold
= getAddExpr(LargeOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2488 // If it folds to something simple, use it. Otherwise, don't.
2489 if (isa
<SCEVConstant
>(Fold
) || isa
<SCEVUnknown
>(Fold
))
2490 return getTruncateExpr(Fold
, Ty
);
2494 // Skip past any other cast SCEVs.
2495 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddExpr
)
2498 // If there are add operands they would be next.
2499 if (Idx
< Ops
.size()) {
2500 bool DeletedAdd
= false;
2501 while (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[Idx
])) {
2502 if (Ops
.size() > AddOpsInlineThreshold
||
2503 Add
->getNumOperands() > AddOpsInlineThreshold
)
2505 // If we have an add, expand the add operands onto the end of the operands
2507 Ops
.erase(Ops
.begin()+Idx
);
2508 Ops
.append(Add
->op_begin(), Add
->op_end());
2512 // If we deleted at least one add, we added operands to the end of the list,
2513 // and they are not necessarily sorted. Recurse to resort and resimplify
2514 // any operands we just acquired.
2516 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2519 // Skip over the add expression until we get to a multiply.
2520 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scMulExpr
)
2523 // Check to see if there are any folding opportunities present with
2524 // operands multiplied by constant values.
2525 if (Idx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[Idx
])) {
2526 uint64_t BitWidth
= getTypeSizeInBits(Ty
);
2527 DenseMap
<const SCEV
*, APInt
> M
;
2528 SmallVector
<const SCEV
*, 8> NewOps
;
2529 APInt
AccumulatedConstant(BitWidth
, 0);
2530 if (CollectAddOperandsWithScales(M
, NewOps
, AccumulatedConstant
,
2531 Ops
.data(), Ops
.size(),
2532 APInt(BitWidth
, 1), *this)) {
2533 struct APIntCompare
{
2534 bool operator()(const APInt
&LHS
, const APInt
&RHS
) const {
2535 return LHS
.ult(RHS
);
2539 // Some interesting folding opportunity is present, so its worthwhile to
2540 // re-generate the operands list. Group the operands by constant scale,
2541 // to avoid multiplying by the same constant scale multiple times.
2542 std::map
<APInt
, SmallVector
<const SCEV
*, 4>, APIntCompare
> MulOpLists
;
2543 for (const SCEV
*NewOp
: NewOps
)
2544 MulOpLists
[M
.find(NewOp
)->second
].push_back(NewOp
);
2545 // Re-generate the operands list.
2547 if (AccumulatedConstant
!= 0)
2548 Ops
.push_back(getConstant(AccumulatedConstant
));
2549 for (auto &MulOp
: MulOpLists
)
2550 if (MulOp
.first
!= 0)
2551 Ops
.push_back(getMulExpr(
2552 getConstant(MulOp
.first
),
2553 getAddExpr(MulOp
.second
, SCEV::FlagAnyWrap
, Depth
+ 1),
2554 SCEV::FlagAnyWrap
, Depth
+ 1));
2557 if (Ops
.size() == 1)
2559 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2563 // If we are adding something to a multiply expression, make sure the
2564 // something is not already an operand of the multiply. If so, merge it into
2566 for (; Idx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[Idx
]); ++Idx
) {
2567 const SCEVMulExpr
*Mul
= cast
<SCEVMulExpr
>(Ops
[Idx
]);
2568 for (unsigned MulOp
= 0, e
= Mul
->getNumOperands(); MulOp
!= e
; ++MulOp
) {
2569 const SCEV
*MulOpSCEV
= Mul
->getOperand(MulOp
);
2570 if (isa
<SCEVConstant
>(MulOpSCEV
))
2572 for (unsigned AddOp
= 0, e
= Ops
.size(); AddOp
!= e
; ++AddOp
)
2573 if (MulOpSCEV
== Ops
[AddOp
]) {
2574 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2575 const SCEV
*InnerMul
= Mul
->getOperand(MulOp
== 0);
2576 if (Mul
->getNumOperands() != 2) {
2577 // If the multiply has more than two operands, we must get the
2579 SmallVector
<const SCEV
*, 4> MulOps(Mul
->op_begin(),
2580 Mul
->op_begin()+MulOp
);
2581 MulOps
.append(Mul
->op_begin()+MulOp
+1, Mul
->op_end());
2582 InnerMul
= getMulExpr(MulOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2584 SmallVector
<const SCEV
*, 2> TwoOps
= {getOne(Ty
), InnerMul
};
2585 const SCEV
*AddOne
= getAddExpr(TwoOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2586 const SCEV
*OuterMul
= getMulExpr(AddOne
, MulOpSCEV
,
2587 SCEV::FlagAnyWrap
, Depth
+ 1);
2588 if (Ops
.size() == 2) return OuterMul
;
2590 Ops
.erase(Ops
.begin()+AddOp
);
2591 Ops
.erase(Ops
.begin()+Idx
-1);
2593 Ops
.erase(Ops
.begin()+Idx
);
2594 Ops
.erase(Ops
.begin()+AddOp
-1);
2596 Ops
.push_back(OuterMul
);
2597 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2600 // Check this multiply against other multiplies being added together.
2601 for (unsigned OtherMulIdx
= Idx
+1;
2602 OtherMulIdx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[OtherMulIdx
]);
2604 const SCEVMulExpr
*OtherMul
= cast
<SCEVMulExpr
>(Ops
[OtherMulIdx
]);
2605 // If MulOp occurs in OtherMul, we can fold the two multiplies
2607 for (unsigned OMulOp
= 0, e
= OtherMul
->getNumOperands();
2608 OMulOp
!= e
; ++OMulOp
)
2609 if (OtherMul
->getOperand(OMulOp
) == MulOpSCEV
) {
2610 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2611 const SCEV
*InnerMul1
= Mul
->getOperand(MulOp
== 0);
2612 if (Mul
->getNumOperands() != 2) {
2613 SmallVector
<const SCEV
*, 4> MulOps(Mul
->op_begin(),
2614 Mul
->op_begin()+MulOp
);
2615 MulOps
.append(Mul
->op_begin()+MulOp
+1, Mul
->op_end());
2616 InnerMul1
= getMulExpr(MulOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2618 const SCEV
*InnerMul2
= OtherMul
->getOperand(OMulOp
== 0);
2619 if (OtherMul
->getNumOperands() != 2) {
2620 SmallVector
<const SCEV
*, 4> MulOps(OtherMul
->op_begin(),
2621 OtherMul
->op_begin()+OMulOp
);
2622 MulOps
.append(OtherMul
->op_begin()+OMulOp
+1, OtherMul
->op_end());
2623 InnerMul2
= getMulExpr(MulOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2625 SmallVector
<const SCEV
*, 2> TwoOps
= {InnerMul1
, InnerMul2
};
2626 const SCEV
*InnerMulSum
=
2627 getAddExpr(TwoOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2628 const SCEV
*OuterMul
= getMulExpr(MulOpSCEV
, InnerMulSum
,
2629 SCEV::FlagAnyWrap
, Depth
+ 1);
2630 if (Ops
.size() == 2) return OuterMul
;
2631 Ops
.erase(Ops
.begin()+Idx
);
2632 Ops
.erase(Ops
.begin()+OtherMulIdx
-1);
2633 Ops
.push_back(OuterMul
);
2634 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2640 // If there are any add recurrences in the operands list, see if any other
2641 // added values are loop invariant. If so, we can fold them into the
2643 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddRecExpr
)
2646 // Scan over all recurrences, trying to fold loop invariants into them.
2647 for (; Idx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[Idx
]); ++Idx
) {
2648 // Scan all of the other operands to this add and add them to the vector if
2649 // they are loop invariant w.r.t. the recurrence.
2650 SmallVector
<const SCEV
*, 8> LIOps
;
2651 const SCEVAddRecExpr
*AddRec
= cast
<SCEVAddRecExpr
>(Ops
[Idx
]);
2652 const Loop
*AddRecLoop
= AddRec
->getLoop();
2653 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
2654 if (isAvailableAtLoopEntry(Ops
[i
], AddRecLoop
)) {
2655 LIOps
.push_back(Ops
[i
]);
2656 Ops
.erase(Ops
.begin()+i
);
2660 // If we found some loop invariants, fold them into the recurrence.
2661 if (!LIOps
.empty()) {
2662 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
2663 LIOps
.push_back(AddRec
->getStart());
2665 SmallVector
<const SCEV
*, 4> AddRecOps(AddRec
->op_begin(),
2667 // This follows from the fact that the no-wrap flags on the outer add
2668 // expression are applicable on the 0th iteration, when the add recurrence
2669 // will be equal to its start value.
2670 AddRecOps
[0] = getAddExpr(LIOps
, Flags
, Depth
+ 1);
2672 // Build the new addrec. Propagate the NUW and NSW flags if both the
2673 // outer add and the inner addrec are guaranteed to have no overflow.
2674 // Always propagate NW.
2675 Flags
= AddRec
->getNoWrapFlags(setFlags(Flags
, SCEV::FlagNW
));
2676 const SCEV
*NewRec
= getAddRecExpr(AddRecOps
, AddRecLoop
, Flags
);
2678 // If all of the other operands were loop invariant, we are done.
2679 if (Ops
.size() == 1) return NewRec
;
2681 // Otherwise, add the folded AddRec by the non-invariant parts.
2682 for (unsigned i
= 0;; ++i
)
2683 if (Ops
[i
] == AddRec
) {
2687 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2690 // Okay, if there weren't any loop invariants to be folded, check to see if
2691 // there are multiple AddRec's with the same loop induction variable being
2692 // added together. If so, we can fold them.
2693 for (unsigned OtherIdx
= Idx
+1;
2694 OtherIdx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
2696 // We expect the AddRecExpr's to be sorted in reverse dominance order,
2697 // so that the 1st found AddRecExpr is dominated by all others.
2698 assert(DT
.dominates(
2699 cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
])->getLoop()->getHeader(),
2700 AddRec
->getLoop()->getHeader()) &&
2701 "AddRecExprs are not sorted in reverse dominance order?");
2702 if (AddRecLoop
== cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
])->getLoop()) {
2703 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
2704 SmallVector
<const SCEV
*, 4> AddRecOps(AddRec
->op_begin(),
2706 for (; OtherIdx
!= Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
2708 const auto *OtherAddRec
= cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
2709 if (OtherAddRec
->getLoop() == AddRecLoop
) {
2710 for (unsigned i
= 0, e
= OtherAddRec
->getNumOperands();
2712 if (i
>= AddRecOps
.size()) {
2713 AddRecOps
.append(OtherAddRec
->op_begin()+i
,
2714 OtherAddRec
->op_end());
2717 SmallVector
<const SCEV
*, 2> TwoOps
= {
2718 AddRecOps
[i
], OtherAddRec
->getOperand(i
)};
2719 AddRecOps
[i
] = getAddExpr(TwoOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2721 Ops
.erase(Ops
.begin() + OtherIdx
); --OtherIdx
;
2724 // Step size has changed, so we cannot guarantee no self-wraparound.
2725 Ops
[Idx
] = getAddRecExpr(AddRecOps
, AddRecLoop
, SCEV::FlagAnyWrap
);
2726 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2730 // Otherwise couldn't fold anything into this recurrence. Move onto the
2734 // Okay, it looks like we really DO need an add expr. Check to see if we
2735 // already have one, otherwise create a new one.
2736 return getOrCreateAddExpr(Ops
, Flags
);
2740 ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
2741 SCEV::NoWrapFlags Flags
) {
2742 FoldingSetNodeID ID
;
2743 ID
.AddInteger(scAddExpr
);
2744 for (const SCEV
*Op
: Ops
)
2748 static_cast<SCEVAddExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
2750 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
2751 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
2752 S
= new (SCEVAllocator
)
2753 SCEVAddExpr(ID
.Intern(SCEVAllocator
), O
, Ops
.size());
2754 UniqueSCEVs
.InsertNode(S
, IP
);
2755 addToLoopUseLists(S
);
2757 S
->setNoWrapFlags(Flags
);
2762 ScalarEvolution::getOrCreateAddRecExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
2763 const Loop
*L
, SCEV::NoWrapFlags Flags
) {
2764 FoldingSetNodeID ID
;
2765 ID
.AddInteger(scAddRecExpr
);
2766 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
2767 ID
.AddPointer(Ops
[i
]);
2771 static_cast<SCEVAddRecExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
2773 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
2774 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
2775 S
= new (SCEVAllocator
)
2776 SCEVAddRecExpr(ID
.Intern(SCEVAllocator
), O
, Ops
.size(), L
);
2777 UniqueSCEVs
.InsertNode(S
, IP
);
2778 addToLoopUseLists(S
);
2780 S
->setNoWrapFlags(Flags
);
2785 ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
2786 SCEV::NoWrapFlags Flags
) {
2787 FoldingSetNodeID ID
;
2788 ID
.AddInteger(scMulExpr
);
2789 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
2790 ID
.AddPointer(Ops
[i
]);
2793 static_cast<SCEVMulExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
2795 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
2796 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
2797 S
= new (SCEVAllocator
) SCEVMulExpr(ID
.Intern(SCEVAllocator
),
2799 UniqueSCEVs
.InsertNode(S
, IP
);
2800 addToLoopUseLists(S
);
2802 S
->setNoWrapFlags(Flags
);
2806 static uint64_t umul_ov(uint64_t i
, uint64_t j
, bool &Overflow
) {
2808 if (j
> 1 && k
/ j
!= i
) Overflow
= true;
2812 /// Compute the result of "n choose k", the binomial coefficient. If an
2813 /// intermediate computation overflows, Overflow will be set and the return will
2814 /// be garbage. Overflow is not cleared on absence of overflow.
2815 static uint64_t Choose(uint64_t n
, uint64_t k
, bool &Overflow
) {
2816 // We use the multiplicative formula:
2817 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
2818 // At each iteration, we take the n-th term of the numeral and divide by the
2819 // (k-n)th term of the denominator. This division will always produce an
2820 // integral result, and helps reduce the chance of overflow in the
2821 // intermediate computations. However, we can still overflow even when the
2822 // final result would fit.
2824 if (n
== 0 || n
== k
) return 1;
2825 if (k
> n
) return 0;
2831 for (uint64_t i
= 1; i
<= k
; ++i
) {
2832 r
= umul_ov(r
, n
-(i
-1), Overflow
);
2838 /// Determine if any of the operands in this SCEV are a constant or if
2839 /// any of the add or multiply expressions in this SCEV contain a constant.
2840 static bool containsConstantInAddMulChain(const SCEV
*StartExpr
) {
2841 struct FindConstantInAddMulChain
{
2842 bool FoundConstant
= false;
2844 bool follow(const SCEV
*S
) {
2845 FoundConstant
|= isa
<SCEVConstant
>(S
);
2846 return isa
<SCEVAddExpr
>(S
) || isa
<SCEVMulExpr
>(S
);
2849 bool isDone() const {
2850 return FoundConstant
;
2854 FindConstantInAddMulChain F
;
2855 SCEVTraversal
<FindConstantInAddMulChain
> ST(F
);
2856 ST
.visitAll(StartExpr
);
2857 return F
.FoundConstant
;
2860 /// Get a canonical multiply expression, or something simpler if possible.
2861 const SCEV
*ScalarEvolution::getMulExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
2862 SCEV::NoWrapFlags Flags
,
2864 assert(Flags
== maskFlags(Flags
, SCEV::FlagNUW
| SCEV::FlagNSW
) &&
2865 "only nuw or nsw allowed");
2866 assert(!Ops
.empty() && "Cannot get empty mul!");
2867 if (Ops
.size() == 1) return Ops
[0];
2869 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
2870 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
2871 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
2872 "SCEVMulExpr operand types don't match!");
2875 // Sort by complexity, this groups all similar expression types together.
2876 GroupByComplexity(Ops
, &LI
, DT
);
2878 Flags
= StrengthenNoWrapFlags(this, scMulExpr
, Ops
, Flags
);
2880 // Limit recursion calls depth.
2881 if (Depth
> MaxArithDepth
)
2882 return getOrCreateMulExpr(Ops
, Flags
);
2884 // If there are any constants, fold them together.
2886 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
2888 if (Ops
.size() == 2)
2889 // C1*(C2+V) -> C1*C2 + C1*V
2890 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[1]))
2891 // If any of Add's ops are Adds or Muls with a constant, apply this
2892 // transformation as well.
2894 // TODO: There are some cases where this transformation is not
2895 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
2896 // this transformation should be narrowed down.
2897 if (Add
->getNumOperands() == 2 && containsConstantInAddMulChain(Add
))
2898 return getAddExpr(getMulExpr(LHSC
, Add
->getOperand(0),
2899 SCEV::FlagAnyWrap
, Depth
+ 1),
2900 getMulExpr(LHSC
, Add
->getOperand(1),
2901 SCEV::FlagAnyWrap
, Depth
+ 1),
2902 SCEV::FlagAnyWrap
, Depth
+ 1);
2905 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
2906 // We found two constants, fold them together!
2908 ConstantInt::get(getContext(), LHSC
->getAPInt() * RHSC
->getAPInt());
2909 Ops
[0] = getConstant(Fold
);
2910 Ops
.erase(Ops
.begin()+1); // Erase the folded element
2911 if (Ops
.size() == 1) return Ops
[0];
2912 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
2915 // If we are left with a constant one being multiplied, strip it off.
2916 if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isOne()) {
2917 Ops
.erase(Ops
.begin());
2919 } else if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isZero()) {
2920 // If we have a multiply of zero, it will always be zero.
2922 } else if (Ops
[0]->isAllOnesValue()) {
2923 // If we have a mul by -1 of an add, try distributing the -1 among the
2925 if (Ops
.size() == 2) {
2926 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[1])) {
2927 SmallVector
<const SCEV
*, 4> NewOps
;
2928 bool AnyFolded
= false;
2929 for (const SCEV
*AddOp
: Add
->operands()) {
2930 const SCEV
*Mul
= getMulExpr(Ops
[0], AddOp
, SCEV::FlagAnyWrap
,
2932 if (!isa
<SCEVMulExpr
>(Mul
)) AnyFolded
= true;
2933 NewOps
.push_back(Mul
);
2936 return getAddExpr(NewOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2937 } else if (const auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(Ops
[1])) {
2938 // Negation preserves a recurrence's no self-wrap property.
2939 SmallVector
<const SCEV
*, 4> Operands
;
2940 for (const SCEV
*AddRecOp
: AddRec
->operands())
2941 Operands
.push_back(getMulExpr(Ops
[0], AddRecOp
, SCEV::FlagAnyWrap
,
2944 return getAddRecExpr(Operands
, AddRec
->getLoop(),
2945 AddRec
->getNoWrapFlags(SCEV::FlagNW
));
2950 if (Ops
.size() == 1)
2954 // Skip over the add expression until we get to a multiply.
2955 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scMulExpr
)
2958 // If there are mul operands inline them all into this expression.
2959 if (Idx
< Ops
.size()) {
2960 bool DeletedMul
= false;
2961 while (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[Idx
])) {
2962 if (Ops
.size() > MulOpsInlineThreshold
)
2964 // If we have an mul, expand the mul operands onto the end of the
2966 Ops
.erase(Ops
.begin()+Idx
);
2967 Ops
.append(Mul
->op_begin(), Mul
->op_end());
2971 // If we deleted at least one mul, we added operands to the end of the
2972 // list, and they are not necessarily sorted. Recurse to resort and
2973 // resimplify any operands we just acquired.
2975 return getMulExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2978 // If there are any add recurrences in the operands list, see if any other
2979 // added values are loop invariant. If so, we can fold them into the
2981 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddRecExpr
)
2984 // Scan over all recurrences, trying to fold loop invariants into them.
2985 for (; Idx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[Idx
]); ++Idx
) {
2986 // Scan all of the other operands to this mul and add them to the vector
2987 // if they are loop invariant w.r.t. the recurrence.
2988 SmallVector
<const SCEV
*, 8> LIOps
;
2989 const SCEVAddRecExpr
*AddRec
= cast
<SCEVAddRecExpr
>(Ops
[Idx
]);
2990 const Loop
*AddRecLoop
= AddRec
->getLoop();
2991 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
2992 if (isAvailableAtLoopEntry(Ops
[i
], AddRecLoop
)) {
2993 LIOps
.push_back(Ops
[i
]);
2994 Ops
.erase(Ops
.begin()+i
);
2998 // If we found some loop invariants, fold them into the recurrence.
2999 if (!LIOps
.empty()) {
3000 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
3001 SmallVector
<const SCEV
*, 4> NewOps
;
3002 NewOps
.reserve(AddRec
->getNumOperands());
3003 const SCEV
*Scale
= getMulExpr(LIOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
3004 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
)
3005 NewOps
.push_back(getMulExpr(Scale
, AddRec
->getOperand(i
),
3006 SCEV::FlagAnyWrap
, Depth
+ 1));
3008 // Build the new addrec. Propagate the NUW and NSW flags if both the
3009 // outer mul and the inner addrec are guaranteed to have no overflow.
3011 // No self-wrap cannot be guaranteed after changing the step size, but
3012 // will be inferred if either NUW or NSW is true.
3013 Flags
= AddRec
->getNoWrapFlags(clearFlags(Flags
, SCEV::FlagNW
));
3014 const SCEV
*NewRec
= getAddRecExpr(NewOps
, AddRecLoop
, Flags
);
3016 // If all of the other operands were loop invariant, we are done.
3017 if (Ops
.size() == 1) return NewRec
;
3019 // Otherwise, multiply the folded AddRec by the non-invariant parts.
3020 for (unsigned i
= 0;; ++i
)
3021 if (Ops
[i
] == AddRec
) {
3025 return getMulExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
3028 // Okay, if there weren't any loop invariants to be folded, check to see
3029 // if there are multiple AddRec's with the same loop induction variable
3030 // being multiplied together. If so, we can fold them.
3032 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
3033 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
3034 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
3035 // ]]],+,...up to x=2n}.
3036 // Note that the arguments to choose() are always integers with values
3037 // known at compile time, never SCEV objects.
3039 // The implementation avoids pointless extra computations when the two
3040 // addrec's are of different length (mathematically, it's equivalent to
3041 // an infinite stream of zeros on the right).
3042 bool OpsModified
= false;
3043 for (unsigned OtherIdx
= Idx
+1;
3044 OtherIdx
!= Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
3046 const SCEVAddRecExpr
*OtherAddRec
=
3047 dyn_cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
3048 if (!OtherAddRec
|| OtherAddRec
->getLoop() != AddRecLoop
)
3051 // Limit max number of arguments to avoid creation of unreasonably big
3052 // SCEVAddRecs with very complex operands.
3053 if (AddRec
->getNumOperands() + OtherAddRec
->getNumOperands() - 1 >
3057 bool Overflow
= false;
3058 Type
*Ty
= AddRec
->getType();
3059 bool LargerThan64Bits
= getTypeSizeInBits(Ty
) > 64;
3060 SmallVector
<const SCEV
*, 7> AddRecOps
;
3061 for (int x
= 0, xe
= AddRec
->getNumOperands() +
3062 OtherAddRec
->getNumOperands() - 1; x
!= xe
&& !Overflow
; ++x
) {
3063 const SCEV
*Term
= getZero(Ty
);
3064 for (int y
= x
, ye
= 2*x
+1; y
!= ye
&& !Overflow
; ++y
) {
3065 uint64_t Coeff1
= Choose(x
, 2*x
- y
, Overflow
);
3066 for (int z
= std::max(y
-x
, y
-(int)AddRec
->getNumOperands()+1),
3067 ze
= std::min(x
+1, (int)OtherAddRec
->getNumOperands());
3068 z
< ze
&& !Overflow
; ++z
) {
3069 uint64_t Coeff2
= Choose(2*x
- y
, x
-z
, Overflow
);
3071 if (LargerThan64Bits
)
3072 Coeff
= umul_ov(Coeff1
, Coeff2
, Overflow
);
3074 Coeff
= Coeff1
*Coeff2
;
3075 const SCEV
*CoeffTerm
= getConstant(Ty
, Coeff
);
3076 const SCEV
*Term1
= AddRec
->getOperand(y
-z
);
3077 const SCEV
*Term2
= OtherAddRec
->getOperand(z
);
3078 Term
= getAddExpr(Term
, getMulExpr(CoeffTerm
, Term1
, Term2
,
3079 SCEV::FlagAnyWrap
, Depth
+ 1),
3080 SCEV::FlagAnyWrap
, Depth
+ 1);
3083 AddRecOps
.push_back(Term
);
3086 const SCEV
*NewAddRec
= getAddRecExpr(AddRecOps
, AddRec
->getLoop(),
3088 if (Ops
.size() == 2) return NewAddRec
;
3089 Ops
[Idx
] = NewAddRec
;
3090 Ops
.erase(Ops
.begin() + OtherIdx
); --OtherIdx
;
3092 AddRec
= dyn_cast
<SCEVAddRecExpr
>(NewAddRec
);
3098 return getMulExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
3100 // Otherwise couldn't fold anything into this recurrence. Move onto the
3104 // Okay, it looks like we really DO need an mul expr. Check to see if we
3105 // already have one, otherwise create a new one.
3106 return getOrCreateMulExpr(Ops
, Flags
);
3109 /// Represents an unsigned remainder expression based on unsigned division.
3110 const SCEV
*ScalarEvolution::getURemExpr(const SCEV
*LHS
,
3112 assert(getEffectiveSCEVType(LHS
->getType()) ==
3113 getEffectiveSCEVType(RHS
->getType()) &&
3114 "SCEVURemExpr operand types don't match!");
3116 // Short-circuit easy cases
3117 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
3118 // If constant is one, the result is trivial
3119 if (RHSC
->getValue()->isOne())
3120 return getZero(LHS
->getType()); // X urem 1 --> 0
3122 // If constant is a power of two, fold into a zext(trunc(LHS)).
3123 if (RHSC
->getAPInt().isPowerOf2()) {
3124 Type
*FullTy
= LHS
->getType();
3126 IntegerType::get(getContext(), RHSC
->getAPInt().logBase2());
3127 return getZeroExtendExpr(getTruncateExpr(LHS
, TruncTy
), FullTy
);
3131 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
3132 const SCEV
*UDiv
= getUDivExpr(LHS
, RHS
);
3133 const SCEV
*Mult
= getMulExpr(UDiv
, RHS
, SCEV::FlagNUW
);
3134 return getMinusSCEV(LHS
, Mult
, SCEV::FlagNUW
);
3137 /// Get a canonical unsigned division expression, or something simpler if
3139 const SCEV
*ScalarEvolution::getUDivExpr(const SCEV
*LHS
,
3141 assert(getEffectiveSCEVType(LHS
->getType()) ==
3142 getEffectiveSCEVType(RHS
->getType()) &&
3143 "SCEVUDivExpr operand types don't match!");
3145 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
3146 if (RHSC
->getValue()->isOne())
3147 return LHS
; // X udiv 1 --> x
3148 // If the denominator is zero, the result of the udiv is undefined. Don't
3149 // try to analyze it, because the resolution chosen here may differ from
3150 // the resolution chosen in other parts of the compiler.
3151 if (!RHSC
->getValue()->isZero()) {
3152 // Determine if the division can be folded into the operands of
3154 // TODO: Generalize this to non-constants by using known-bits information.
3155 Type
*Ty
= LHS
->getType();
3156 unsigned LZ
= RHSC
->getAPInt().countLeadingZeros();
3157 unsigned MaxShiftAmt
= getTypeSizeInBits(Ty
) - LZ
- 1;
3158 // For non-power-of-two values, effectively round the value up to the
3159 // nearest power of two.
3160 if (!RHSC
->getAPInt().isPowerOf2())
3162 IntegerType
*ExtTy
=
3163 IntegerType::get(getContext(), getTypeSizeInBits(Ty
) + MaxShiftAmt
);
3164 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(LHS
))
3165 if (const SCEVConstant
*Step
=
3166 dyn_cast
<SCEVConstant
>(AR
->getStepRecurrence(*this))) {
3167 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
3168 const APInt
&StepInt
= Step
->getAPInt();
3169 const APInt
&DivInt
= RHSC
->getAPInt();
3170 if (!StepInt
.urem(DivInt
) &&
3171 getZeroExtendExpr(AR
, ExtTy
) ==
3172 getAddRecExpr(getZeroExtendExpr(AR
->getStart(), ExtTy
),
3173 getZeroExtendExpr(Step
, ExtTy
),
3174 AR
->getLoop(), SCEV::FlagAnyWrap
)) {
3175 SmallVector
<const SCEV
*, 4> Operands
;
3176 for (const SCEV
*Op
: AR
->operands())
3177 Operands
.push_back(getUDivExpr(Op
, RHS
));
3178 return getAddRecExpr(Operands
, AR
->getLoop(), SCEV::FlagNW
);
3180 /// Get a canonical UDivExpr for a recurrence.
3181 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
3182 // We can currently only fold X%N if X is constant.
3183 const SCEVConstant
*StartC
= dyn_cast
<SCEVConstant
>(AR
->getStart());
3184 if (StartC
&& !DivInt
.urem(StepInt
) &&
3185 getZeroExtendExpr(AR
, ExtTy
) ==
3186 getAddRecExpr(getZeroExtendExpr(AR
->getStart(), ExtTy
),
3187 getZeroExtendExpr(Step
, ExtTy
),
3188 AR
->getLoop(), SCEV::FlagAnyWrap
)) {
3189 const APInt
&StartInt
= StartC
->getAPInt();
3190 const APInt
&StartRem
= StartInt
.urem(StepInt
);
3192 LHS
= getAddRecExpr(getConstant(StartInt
- StartRem
), Step
,
3193 AR
->getLoop(), SCEV::FlagNW
);
3196 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
3197 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(LHS
)) {
3198 SmallVector
<const SCEV
*, 4> Operands
;
3199 for (const SCEV
*Op
: M
->operands())
3200 Operands
.push_back(getZeroExtendExpr(Op
, ExtTy
));
3201 if (getZeroExtendExpr(M
, ExtTy
) == getMulExpr(Operands
))
3202 // Find an operand that's safely divisible.
3203 for (unsigned i
= 0, e
= M
->getNumOperands(); i
!= e
; ++i
) {
3204 const SCEV
*Op
= M
->getOperand(i
);
3205 const SCEV
*Div
= getUDivExpr(Op
, RHSC
);
3206 if (!isa
<SCEVUDivExpr
>(Div
) && getMulExpr(Div
, RHSC
) == Op
) {
3207 Operands
= SmallVector
<const SCEV
*, 4>(M
->op_begin(),
3210 return getMulExpr(Operands
);
3215 // (A/B)/C --> A/(B*C) if safe and B*C can be folded.
3216 if (const SCEVUDivExpr
*OtherDiv
= dyn_cast
<SCEVUDivExpr
>(LHS
)) {
3217 if (auto *DivisorConstant
=
3218 dyn_cast
<SCEVConstant
>(OtherDiv
->getRHS())) {
3219 bool Overflow
= false;
3221 DivisorConstant
->getAPInt().umul_ov(RHSC
->getAPInt(), Overflow
);
3223 return getConstant(RHSC
->getType(), 0, false);
3225 return getUDivExpr(OtherDiv
->getLHS(), getConstant(NewRHS
));
3229 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
3230 if (const SCEVAddExpr
*A
= dyn_cast
<SCEVAddExpr
>(LHS
)) {
3231 SmallVector
<const SCEV
*, 4> Operands
;
3232 for (const SCEV
*Op
: A
->operands())
3233 Operands
.push_back(getZeroExtendExpr(Op
, ExtTy
));
3234 if (getZeroExtendExpr(A
, ExtTy
) == getAddExpr(Operands
)) {
3236 for (unsigned i
= 0, e
= A
->getNumOperands(); i
!= e
; ++i
) {
3237 const SCEV
*Op
= getUDivExpr(A
->getOperand(i
), RHS
);
3238 if (isa
<SCEVUDivExpr
>(Op
) ||
3239 getMulExpr(Op
, RHS
) != A
->getOperand(i
))
3241 Operands
.push_back(Op
);
3243 if (Operands
.size() == A
->getNumOperands())
3244 return getAddExpr(Operands
);
3248 // Fold if both operands are constant.
3249 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(LHS
)) {
3250 Constant
*LHSCV
= LHSC
->getValue();
3251 Constant
*RHSCV
= RHSC
->getValue();
3252 return getConstant(cast
<ConstantInt
>(ConstantExpr::getUDiv(LHSCV
,
3258 FoldingSetNodeID ID
;
3259 ID
.AddInteger(scUDivExpr
);
3263 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
3264 SCEV
*S
= new (SCEVAllocator
) SCEVUDivExpr(ID
.Intern(SCEVAllocator
),
3266 UniqueSCEVs
.InsertNode(S
, IP
);
3267 addToLoopUseLists(S
);
3271 static const APInt
gcd(const SCEVConstant
*C1
, const SCEVConstant
*C2
) {
3272 APInt A
= C1
->getAPInt().abs();
3273 APInt B
= C2
->getAPInt().abs();
3274 uint32_t ABW
= A
.getBitWidth();
3275 uint32_t BBW
= B
.getBitWidth();
3282 return APIntOps::GreatestCommonDivisor(std::move(A
), std::move(B
));
3285 /// Get a canonical unsigned division expression, or something simpler if
3286 /// possible. There is no representation for an exact udiv in SCEV IR, but we
3287 /// can attempt to remove factors from the LHS and RHS. We can't do this when
3288 /// it's not exact because the udiv may be clearing bits.
3289 const SCEV
*ScalarEvolution::getUDivExactExpr(const SCEV
*LHS
,
3291 // TODO: we could try to find factors in all sorts of things, but for now we
3292 // just deal with u/exact (multiply, constant). See SCEVDivision towards the
3293 // end of this file for inspiration.
3295 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(LHS
);
3296 if (!Mul
|| !Mul
->hasNoUnsignedWrap())
3297 return getUDivExpr(LHS
, RHS
);
3299 if (const SCEVConstant
*RHSCst
= dyn_cast
<SCEVConstant
>(RHS
)) {
3300 // If the mulexpr multiplies by a constant, then that constant must be the
3301 // first element of the mulexpr.
3302 if (const auto *LHSCst
= dyn_cast
<SCEVConstant
>(Mul
->getOperand(0))) {
3303 if (LHSCst
== RHSCst
) {
3304 SmallVector
<const SCEV
*, 2> Operands
;
3305 Operands
.append(Mul
->op_begin() + 1, Mul
->op_end());
3306 return getMulExpr(Operands
);
3309 // We can't just assume that LHSCst divides RHSCst cleanly, it could be
3310 // that there's a factor provided by one of the other terms. We need to
3312 APInt Factor
= gcd(LHSCst
, RHSCst
);
3313 if (!Factor
.isIntN(1)) {
3315 cast
<SCEVConstant
>(getConstant(LHSCst
->getAPInt().udiv(Factor
)));
3317 cast
<SCEVConstant
>(getConstant(RHSCst
->getAPInt().udiv(Factor
)));
3318 SmallVector
<const SCEV
*, 2> Operands
;
3319 Operands
.push_back(LHSCst
);
3320 Operands
.append(Mul
->op_begin() + 1, Mul
->op_end());
3321 LHS
= getMulExpr(Operands
);
3323 Mul
= dyn_cast
<SCEVMulExpr
>(LHS
);
3325 return getUDivExactExpr(LHS
, RHS
);
3330 for (int i
= 0, e
= Mul
->getNumOperands(); i
!= e
; ++i
) {
3331 if (Mul
->getOperand(i
) == RHS
) {
3332 SmallVector
<const SCEV
*, 2> Operands
;
3333 Operands
.append(Mul
->op_begin(), Mul
->op_begin() + i
);
3334 Operands
.append(Mul
->op_begin() + i
+ 1, Mul
->op_end());
3335 return getMulExpr(Operands
);
3339 return getUDivExpr(LHS
, RHS
);
3342 /// Get an add recurrence expression for the specified loop. Simplify the
3343 /// expression as much as possible.
3344 const SCEV
*ScalarEvolution::getAddRecExpr(const SCEV
*Start
, const SCEV
*Step
,
3346 SCEV::NoWrapFlags Flags
) {
3347 SmallVector
<const SCEV
*, 4> Operands
;
3348 Operands
.push_back(Start
);
3349 if (const SCEVAddRecExpr
*StepChrec
= dyn_cast
<SCEVAddRecExpr
>(Step
))
3350 if (StepChrec
->getLoop() == L
) {
3351 Operands
.append(StepChrec
->op_begin(), StepChrec
->op_end());
3352 return getAddRecExpr(Operands
, L
, maskFlags(Flags
, SCEV::FlagNW
));
3355 Operands
.push_back(Step
);
3356 return getAddRecExpr(Operands
, L
, Flags
);
3359 /// Get an add recurrence expression for the specified loop. Simplify the
3360 /// expression as much as possible.
3362 ScalarEvolution::getAddRecExpr(SmallVectorImpl
<const SCEV
*> &Operands
,
3363 const Loop
*L
, SCEV::NoWrapFlags Flags
) {
3364 if (Operands
.size() == 1) return Operands
[0];
3366 Type
*ETy
= getEffectiveSCEVType(Operands
[0]->getType());
3367 for (unsigned i
= 1, e
= Operands
.size(); i
!= e
; ++i
)
3368 assert(getEffectiveSCEVType(Operands
[i
]->getType()) == ETy
&&
3369 "SCEVAddRecExpr operand types don't match!");
3370 for (unsigned i
= 0, e
= Operands
.size(); i
!= e
; ++i
)
3371 assert(isLoopInvariant(Operands
[i
], L
) &&
3372 "SCEVAddRecExpr operand is not loop-invariant!");
3375 if (Operands
.back()->isZero()) {
3376 Operands
.pop_back();
3377 return getAddRecExpr(Operands
, L
, SCEV::FlagAnyWrap
); // {X,+,0} --> X
3380 // It's tempting to want to call getMaxBackedgeTakenCount count here and
3381 // use that information to infer NUW and NSW flags. However, computing a
3382 // BE count requires calling getAddRecExpr, so we may not yet have a
3383 // meaningful BE count at this point (and if we don't, we'd be stuck
3384 // with a SCEVCouldNotCompute as the cached BE count).
3386 Flags
= StrengthenNoWrapFlags(this, scAddRecExpr
, Operands
, Flags
);
3388 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
3389 if (const SCEVAddRecExpr
*NestedAR
= dyn_cast
<SCEVAddRecExpr
>(Operands
[0])) {
3390 const Loop
*NestedLoop
= NestedAR
->getLoop();
3391 if (L
->contains(NestedLoop
)
3392 ? (L
->getLoopDepth() < NestedLoop
->getLoopDepth())
3393 : (!NestedLoop
->contains(L
) &&
3394 DT
.dominates(L
->getHeader(), NestedLoop
->getHeader()))) {
3395 SmallVector
<const SCEV
*, 4> NestedOperands(NestedAR
->op_begin(),
3396 NestedAR
->op_end());
3397 Operands
[0] = NestedAR
->getStart();
3398 // AddRecs require their operands be loop-invariant with respect to their
3399 // loops. Don't perform this transformation if it would break this
3401 bool AllInvariant
= all_of(
3402 Operands
, [&](const SCEV
*Op
) { return isLoopInvariant(Op
, L
); });
3405 // Create a recurrence for the outer loop with the same step size.
3407 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
3408 // inner recurrence has the same property.
3409 SCEV::NoWrapFlags OuterFlags
=
3410 maskFlags(Flags
, SCEV::FlagNW
| NestedAR
->getNoWrapFlags());
3412 NestedOperands
[0] = getAddRecExpr(Operands
, L
, OuterFlags
);
3413 AllInvariant
= all_of(NestedOperands
, [&](const SCEV
*Op
) {
3414 return isLoopInvariant(Op
, NestedLoop
);
3418 // Ok, both add recurrences are valid after the transformation.
3420 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
3421 // the outer recurrence has the same property.
3422 SCEV::NoWrapFlags InnerFlags
=
3423 maskFlags(NestedAR
->getNoWrapFlags(), SCEV::FlagNW
| Flags
);
3424 return getAddRecExpr(NestedOperands
, NestedLoop
, InnerFlags
);
3427 // Reset Operands to its original state.
3428 Operands
[0] = NestedAR
;
3432 // Okay, it looks like we really DO need an addrec expr. Check to see if we
3433 // already have one, otherwise create a new one.
3434 return getOrCreateAddRecExpr(Operands
, L
, Flags
);
3438 ScalarEvolution::getGEPExpr(GEPOperator
*GEP
,
3439 const SmallVectorImpl
<const SCEV
*> &IndexExprs
) {
3440 const SCEV
*BaseExpr
= getSCEV(GEP
->getPointerOperand());
3441 // getSCEV(Base)->getType() has the same address space as Base->getType()
3442 // because SCEV::getType() preserves the address space.
3443 Type
*IntPtrTy
= getEffectiveSCEVType(BaseExpr
->getType());
3444 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
3445 // instruction to its SCEV, because the Instruction may be guarded by control
3446 // flow and the no-overflow bits may not be valid for the expression in any
3447 // context. This can be fixed similarly to how these flags are handled for
3449 SCEV::NoWrapFlags Wrap
= GEP
->isInBounds() ? SCEV::FlagNSW
3450 : SCEV::FlagAnyWrap
;
3452 const SCEV
*TotalOffset
= getZero(IntPtrTy
);
3453 // The array size is unimportant. The first thing we do on CurTy is getting
3454 // its element type.
3455 Type
*CurTy
= ArrayType::get(GEP
->getSourceElementType(), 0);
3456 for (const SCEV
*IndexExpr
: IndexExprs
) {
3457 // Compute the (potentially symbolic) offset in bytes for this index.
3458 if (StructType
*STy
= dyn_cast
<StructType
>(CurTy
)) {
3459 // For a struct, add the member offset.
3460 ConstantInt
*Index
= cast
<SCEVConstant
>(IndexExpr
)->getValue();
3461 unsigned FieldNo
= Index
->getZExtValue();
3462 const SCEV
*FieldOffset
= getOffsetOfExpr(IntPtrTy
, STy
, FieldNo
);
3464 // Add the field offset to the running total offset.
3465 TotalOffset
= getAddExpr(TotalOffset
, FieldOffset
);
3467 // Update CurTy to the type of the field at Index.
3468 CurTy
= STy
->getTypeAtIndex(Index
);
3470 // Update CurTy to its element type.
3471 CurTy
= cast
<SequentialType
>(CurTy
)->getElementType();
3472 // For an array, add the element offset, explicitly scaled.
3473 const SCEV
*ElementSize
= getSizeOfExpr(IntPtrTy
, CurTy
);
3474 // Getelementptr indices are signed.
3475 IndexExpr
= getTruncateOrSignExtend(IndexExpr
, IntPtrTy
);
3477 // Multiply the index by the element size to compute the element offset.
3478 const SCEV
*LocalOffset
= getMulExpr(IndexExpr
, ElementSize
, Wrap
);
3480 // Add the element offset to the running total offset.
3481 TotalOffset
= getAddExpr(TotalOffset
, LocalOffset
);
3485 // Add the total offset from all the GEP indices to the base.
3486 return getAddExpr(BaseExpr
, TotalOffset
, Wrap
);
3489 const SCEV
*ScalarEvolution::getSMaxExpr(const SCEV
*LHS
,
3491 SmallVector
<const SCEV
*, 2> Ops
= {LHS
, RHS
};
3492 return getSMaxExpr(Ops
);
3496 ScalarEvolution::getSMaxExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
3497 assert(!Ops
.empty() && "Cannot get empty smax!");
3498 if (Ops
.size() == 1) return Ops
[0];
3500 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
3501 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
3502 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
3503 "SCEVSMaxExpr operand types don't match!");
3506 // Sort by complexity, this groups all similar expression types together.
3507 GroupByComplexity(Ops
, &LI
, DT
);
3509 // If there are any constants, fold them together.
3511 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
3513 assert(Idx
< Ops
.size());
3514 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
3515 // We found two constants, fold them together!
3516 ConstantInt
*Fold
= ConstantInt::get(
3517 getContext(), APIntOps::smax(LHSC
->getAPInt(), RHSC
->getAPInt()));
3518 Ops
[0] = getConstant(Fold
);
3519 Ops
.erase(Ops
.begin()+1); // Erase the folded element
3520 if (Ops
.size() == 1) return Ops
[0];
3521 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
3524 // If we are left with a constant minimum-int, strip it off.
3525 if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isMinValue(true)) {
3526 Ops
.erase(Ops
.begin());
3528 } else if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isMaxValue(true)) {
3529 // If we have an smax with a constant maximum-int, it will always be
3534 if (Ops
.size() == 1) return Ops
[0];
3537 // Find the first SMax
3538 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scSMaxExpr
)
3541 // Check to see if one of the operands is an SMax. If so, expand its operands
3542 // onto our operand list, and recurse to simplify.
3543 if (Idx
< Ops
.size()) {
3544 bool DeletedSMax
= false;
3545 while (const SCEVSMaxExpr
*SMax
= dyn_cast
<SCEVSMaxExpr
>(Ops
[Idx
])) {
3546 Ops
.erase(Ops
.begin()+Idx
);
3547 Ops
.append(SMax
->op_begin(), SMax
->op_end());
3552 return getSMaxExpr(Ops
);
3555 // Okay, check to see if the same value occurs in the operand list twice. If
3556 // so, delete one. Since we sorted the list, these values are required to
3558 for (unsigned i
= 0, e
= Ops
.size()-1; i
!= e
; ++i
)
3559 // X smax Y smax Y --> X smax Y
3560 // X smax Y --> X, if X is always greater than Y
3561 if (Ops
[i
] == Ops
[i
+1] ||
3562 isKnownPredicate(ICmpInst::ICMP_SGE
, Ops
[i
], Ops
[i
+1])) {
3563 Ops
.erase(Ops
.begin()+i
+1, Ops
.begin()+i
+2);
3565 } else if (isKnownPredicate(ICmpInst::ICMP_SLE
, Ops
[i
], Ops
[i
+1])) {
3566 Ops
.erase(Ops
.begin()+i
, Ops
.begin()+i
+1);
3570 if (Ops
.size() == 1) return Ops
[0];
3572 assert(!Ops
.empty() && "Reduced smax down to nothing!");
3574 // Okay, it looks like we really DO need an smax expr. Check to see if we
3575 // already have one, otherwise create a new one.
3576 FoldingSetNodeID ID
;
3577 ID
.AddInteger(scSMaxExpr
);
3578 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
3579 ID
.AddPointer(Ops
[i
]);
3581 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
3582 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
3583 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
3584 SCEV
*S
= new (SCEVAllocator
) SCEVSMaxExpr(ID
.Intern(SCEVAllocator
),
3586 UniqueSCEVs
.InsertNode(S
, IP
);
3587 addToLoopUseLists(S
);
3591 const SCEV
*ScalarEvolution::getUMaxExpr(const SCEV
*LHS
,
3593 SmallVector
<const SCEV
*, 2> Ops
= {LHS
, RHS
};
3594 return getUMaxExpr(Ops
);
3598 ScalarEvolution::getUMaxExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
3599 assert(!Ops
.empty() && "Cannot get empty umax!");
3600 if (Ops
.size() == 1) return Ops
[0];
3602 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
3603 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
3604 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
3605 "SCEVUMaxExpr operand types don't match!");
3608 // Sort by complexity, this groups all similar expression types together.
3609 GroupByComplexity(Ops
, &LI
, DT
);
3611 // If there are any constants, fold them together.
3613 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
3615 assert(Idx
< Ops
.size());
3616 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
3617 // We found two constants, fold them together!
3618 ConstantInt
*Fold
= ConstantInt::get(
3619 getContext(), APIntOps::umax(LHSC
->getAPInt(), RHSC
->getAPInt()));
3620 Ops
[0] = getConstant(Fold
);
3621 Ops
.erase(Ops
.begin()+1); // Erase the folded element
3622 if (Ops
.size() == 1) return Ops
[0];
3623 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
3626 // If we are left with a constant minimum-int, strip it off.
3627 if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isMinValue(false)) {
3628 Ops
.erase(Ops
.begin());
3630 } else if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isMaxValue(false)) {
3631 // If we have an umax with a constant maximum-int, it will always be
3636 if (Ops
.size() == 1) return Ops
[0];
3639 // Find the first UMax
3640 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scUMaxExpr
)
3643 // Check to see if one of the operands is a UMax. If so, expand its operands
3644 // onto our operand list, and recurse to simplify.
3645 if (Idx
< Ops
.size()) {
3646 bool DeletedUMax
= false;
3647 while (const SCEVUMaxExpr
*UMax
= dyn_cast
<SCEVUMaxExpr
>(Ops
[Idx
])) {
3648 Ops
.erase(Ops
.begin()+Idx
);
3649 Ops
.append(UMax
->op_begin(), UMax
->op_end());
3654 return getUMaxExpr(Ops
);
3657 // Okay, check to see if the same value occurs in the operand list twice. If
3658 // so, delete one. Since we sorted the list, these values are required to
3660 for (unsigned i
= 0, e
= Ops
.size()-1; i
!= e
; ++i
)
3661 // X umax Y umax Y --> X umax Y
3662 // X umax Y --> X, if X is always greater than Y
3663 if (Ops
[i
] == Ops
[i
+ 1] || isKnownViaNonRecursiveReasoning(
3664 ICmpInst::ICMP_UGE
, Ops
[i
], Ops
[i
+ 1])) {
3665 Ops
.erase(Ops
.begin() + i
+ 1, Ops
.begin() + i
+ 2);
3667 } else if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE
, Ops
[i
],
3669 Ops
.erase(Ops
.begin() + i
, Ops
.begin() + i
+ 1);
3673 if (Ops
.size() == 1) return Ops
[0];
3675 assert(!Ops
.empty() && "Reduced umax down to nothing!");
3677 // Okay, it looks like we really DO need a umax expr. Check to see if we
3678 // already have one, otherwise create a new one.
3679 FoldingSetNodeID ID
;
3680 ID
.AddInteger(scUMaxExpr
);
3681 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
3682 ID
.AddPointer(Ops
[i
]);
3684 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
3685 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
3686 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
3687 SCEV
*S
= new (SCEVAllocator
) SCEVUMaxExpr(ID
.Intern(SCEVAllocator
),
3689 UniqueSCEVs
.InsertNode(S
, IP
);
3690 addToLoopUseLists(S
);
3694 const SCEV
*ScalarEvolution::getSMinExpr(const SCEV
*LHS
,
3696 SmallVector
<const SCEV
*, 2> Ops
= { LHS
, RHS
};
3697 return getSMinExpr(Ops
);
3700 const SCEV
*ScalarEvolution::getSMinExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
3701 // ~smax(~x, ~y, ~z) == smin(x, y, z).
3702 SmallVector
<const SCEV
*, 2> NotOps
;
3704 NotOps
.push_back(getNotSCEV(S
));
3705 return getNotSCEV(getSMaxExpr(NotOps
));
3708 const SCEV
*ScalarEvolution::getUMinExpr(const SCEV
*LHS
,
3710 SmallVector
<const SCEV
*, 2> Ops
= { LHS
, RHS
};
3711 return getUMinExpr(Ops
);
3714 const SCEV
*ScalarEvolution::getUMinExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
3715 assert(!Ops
.empty() && "At least one operand must be!");
3717 if (Ops
.size() == 1)
3720 // ~umax(~x, ~y, ~z) == umin(x, y, z).
3721 SmallVector
<const SCEV
*, 2> NotOps
;
3723 NotOps
.push_back(getNotSCEV(S
));
3724 return getNotSCEV(getUMaxExpr(NotOps
));
3727 const SCEV
*ScalarEvolution::getSizeOfExpr(Type
*IntTy
, Type
*AllocTy
) {
3728 // We can bypass creating a target-independent
3729 // constant expression and then folding it back into a ConstantInt.
3730 // This is just a compile-time optimization.
3731 return getConstant(IntTy
, getDataLayout().getTypeAllocSize(AllocTy
));
3734 const SCEV
*ScalarEvolution::getOffsetOfExpr(Type
*IntTy
,
3737 // We can bypass creating a target-independent
3738 // constant expression and then folding it back into a ConstantInt.
3739 // This is just a compile-time optimization.
3741 IntTy
, getDataLayout().getStructLayout(STy
)->getElementOffset(FieldNo
));
3744 const SCEV
*ScalarEvolution::getUnknown(Value
*V
) {
3745 // Don't attempt to do anything other than create a SCEVUnknown object
3746 // here. createSCEV only calls getUnknown after checking for all other
3747 // interesting possibilities, and any other code that calls getUnknown
3748 // is doing so in order to hide a value from SCEV canonicalization.
3750 FoldingSetNodeID ID
;
3751 ID
.AddInteger(scUnknown
);
3754 if (SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) {
3755 assert(cast
<SCEVUnknown
>(S
)->getValue() == V
&&
3756 "Stale SCEVUnknown in uniquing map!");
3759 SCEV
*S
= new (SCEVAllocator
) SCEVUnknown(ID
.Intern(SCEVAllocator
), V
, this,
3761 FirstUnknown
= cast
<SCEVUnknown
>(S
);
3762 UniqueSCEVs
.InsertNode(S
, IP
);
3766 //===----------------------------------------------------------------------===//
3767 // Basic SCEV Analysis and PHI Idiom Recognition Code
3770 /// Test if values of the given type are analyzable within the SCEV
3771 /// framework. This primarily includes integer types, and it can optionally
3772 /// include pointer types if the ScalarEvolution class has access to
3773 /// target-specific information.
3774 bool ScalarEvolution::isSCEVable(Type
*Ty
) const {
3775 // Integers and pointers are always SCEVable.
3776 return Ty
->isIntOrPtrTy();
3779 /// Return the size in bits of the specified type, for which isSCEVable must
3781 uint64_t ScalarEvolution::getTypeSizeInBits(Type
*Ty
) const {
3782 assert(isSCEVable(Ty
) && "Type is not SCEVable!");
3783 if (Ty
->isPointerTy())
3784 return getDataLayout().getIndexTypeSizeInBits(Ty
);
3785 return getDataLayout().getTypeSizeInBits(Ty
);
3788 /// Return a type with the same bitwidth as the given type and which represents
3789 /// how SCEV will treat the given type, for which isSCEVable must return
3790 /// true. For pointer types, this is the pointer-sized integer type.
3791 Type
*ScalarEvolution::getEffectiveSCEVType(Type
*Ty
) const {
3792 assert(isSCEVable(Ty
) && "Type is not SCEVable!");
3794 if (Ty
->isIntegerTy())
3797 // The only other support type is pointer.
3798 assert(Ty
->isPointerTy() && "Unexpected non-pointer non-integer type!");
3799 return getDataLayout().getIntPtrType(Ty
);
3802 Type
*ScalarEvolution::getWiderType(Type
*T1
, Type
*T2
) const {
3803 return getTypeSizeInBits(T1
) >= getTypeSizeInBits(T2
) ? T1
: T2
;
3806 const SCEV
*ScalarEvolution::getCouldNotCompute() {
3807 return CouldNotCompute
.get();
3810 bool ScalarEvolution::checkValidity(const SCEV
*S
) const {
3811 bool ContainsNulls
= SCEVExprContains(S
, [](const SCEV
*S
) {
3812 auto *SU
= dyn_cast
<SCEVUnknown
>(S
);
3813 return SU
&& SU
->getValue() == nullptr;
3816 return !ContainsNulls
;
3819 bool ScalarEvolution::containsAddRecurrence(const SCEV
*S
) {
3820 HasRecMapType::iterator I
= HasRecMap
.find(S
);
3821 if (I
!= HasRecMap
.end())
3824 bool FoundAddRec
= SCEVExprContains(S
, isa
<SCEVAddRecExpr
, const SCEV
*>);
3825 HasRecMap
.insert({S
, FoundAddRec
});
3829 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}.
3830 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an
3831 /// offset I, then return {S', I}, else return {\p S, nullptr}.
3832 static std::pair
<const SCEV
*, ConstantInt
*> splitAddExpr(const SCEV
*S
) {
3833 const auto *Add
= dyn_cast
<SCEVAddExpr
>(S
);
3835 return {S
, nullptr};
3837 if (Add
->getNumOperands() != 2)
3838 return {S
, nullptr};
3840 auto *ConstOp
= dyn_cast
<SCEVConstant
>(Add
->getOperand(0));
3842 return {S
, nullptr};
3844 return {Add
->getOperand(1), ConstOp
->getValue()};
3847 /// Return the ValueOffsetPair set for \p S. \p S can be represented
3848 /// by the value and offset from any ValueOffsetPair in the set.
3849 SetVector
<ScalarEvolution::ValueOffsetPair
> *
3850 ScalarEvolution::getSCEVValues(const SCEV
*S
) {
3851 ExprValueMapType::iterator SI
= ExprValueMap
.find_as(S
);
3852 if (SI
== ExprValueMap
.end())
3855 if (VerifySCEVMap
) {
3856 // Check there is no dangling Value in the set returned.
3857 for (const auto &VE
: SI
->second
)
3858 assert(ValueExprMap
.count(VE
.first
));
3864 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V)
3865 /// cannot be used separately. eraseValueFromMap should be used to remove
3866 /// V from ValueExprMap and ExprValueMap at the same time.
3867 void ScalarEvolution::eraseValueFromMap(Value
*V
) {
3868 ValueExprMapType::iterator I
= ValueExprMap
.find_as(V
);
3869 if (I
!= ValueExprMap
.end()) {
3870 const SCEV
*S
= I
->second
;
3871 // Remove {V, 0} from the set of ExprValueMap[S]
3872 if (SetVector
<ValueOffsetPair
> *SV
= getSCEVValues(S
))
3873 SV
->remove({V
, nullptr});
3875 // Remove {V, Offset} from the set of ExprValueMap[Stripped]
3876 const SCEV
*Stripped
;
3877 ConstantInt
*Offset
;
3878 std::tie(Stripped
, Offset
) = splitAddExpr(S
);
3879 if (Offset
!= nullptr) {
3880 if (SetVector
<ValueOffsetPair
> *SV
= getSCEVValues(Stripped
))
3881 SV
->remove({V
, Offset
});
3883 ValueExprMap
.erase(V
);
3887 /// Check whether value has nuw/nsw/exact set but SCEV does not.
3888 /// TODO: In reality it is better to check the poison recursevely
3889 /// but this is better than nothing.
3890 static bool SCEVLostPoisonFlags(const SCEV
*S
, const Value
*V
) {
3891 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
3892 if (isa
<OverflowingBinaryOperator
>(I
)) {
3893 if (auto *NS
= dyn_cast
<SCEVNAryExpr
>(S
)) {
3894 if (I
->hasNoSignedWrap() && !NS
->hasNoSignedWrap())
3896 if (I
->hasNoUnsignedWrap() && !NS
->hasNoUnsignedWrap())
3899 } else if (isa
<PossiblyExactOperator
>(I
) && I
->isExact())
3905 /// Return an existing SCEV if it exists, otherwise analyze the expression and
3906 /// create a new one.
3907 const SCEV
*ScalarEvolution::getSCEV(Value
*V
) {
3908 assert(isSCEVable(V
->getType()) && "Value is not SCEVable!");
3910 const SCEV
*S
= getExistingSCEV(V
);
3913 // During PHI resolution, it is possible to create two SCEVs for the same
3914 // V, so it is needed to double check whether V->S is inserted into
3915 // ValueExprMap before insert S->{V, 0} into ExprValueMap.
3916 std::pair
<ValueExprMapType::iterator
, bool> Pair
=
3917 ValueExprMap
.insert({SCEVCallbackVH(V
, this), S
});
3918 if (Pair
.second
&& !SCEVLostPoisonFlags(S
, V
)) {
3919 ExprValueMap
[S
].insert({V
, nullptr});
3921 // If S == Stripped + Offset, add Stripped -> {V, Offset} into
3923 const SCEV
*Stripped
= S
;
3924 ConstantInt
*Offset
= nullptr;
3925 std::tie(Stripped
, Offset
) = splitAddExpr(S
);
3926 // If stripped is SCEVUnknown, don't bother to save
3927 // Stripped -> {V, offset}. It doesn't simplify and sometimes even
3928 // increase the complexity of the expansion code.
3929 // If V is GetElementPtrInst, don't save Stripped -> {V, offset}
3930 // because it may generate add/sub instead of GEP in SCEV expansion.
3931 if (Offset
!= nullptr && !isa
<SCEVUnknown
>(Stripped
) &&
3932 !isa
<GetElementPtrInst
>(V
))
3933 ExprValueMap
[Stripped
].insert({V
, Offset
});
3939 const SCEV
*ScalarEvolution::getExistingSCEV(Value
*V
) {
3940 assert(isSCEVable(V
->getType()) && "Value is not SCEVable!");
3942 ValueExprMapType::iterator I
= ValueExprMap
.find_as(V
);
3943 if (I
!= ValueExprMap
.end()) {
3944 const SCEV
*S
= I
->second
;
3945 if (checkValidity(S
))
3947 eraseValueFromMap(V
);
3948 forgetMemoizedResults(S
);
3953 /// Return a SCEV corresponding to -V = -1*V
3954 const SCEV
*ScalarEvolution::getNegativeSCEV(const SCEV
*V
,
3955 SCEV::NoWrapFlags Flags
) {
3956 if (const SCEVConstant
*VC
= dyn_cast
<SCEVConstant
>(V
))
3958 cast
<ConstantInt
>(ConstantExpr::getNeg(VC
->getValue())));
3960 Type
*Ty
= V
->getType();
3961 Ty
= getEffectiveSCEVType(Ty
);
3963 V
, getConstant(cast
<ConstantInt
>(Constant::getAllOnesValue(Ty
))), Flags
);
3966 /// Return a SCEV corresponding to ~V = -1-V
3967 const SCEV
*ScalarEvolution::getNotSCEV(const SCEV
*V
) {
3968 if (const SCEVConstant
*VC
= dyn_cast
<SCEVConstant
>(V
))
3970 cast
<ConstantInt
>(ConstantExpr::getNot(VC
->getValue())));
3972 Type
*Ty
= V
->getType();
3973 Ty
= getEffectiveSCEVType(Ty
);
3974 const SCEV
*AllOnes
=
3975 getConstant(cast
<ConstantInt
>(Constant::getAllOnesValue(Ty
)));
3976 return getMinusSCEV(AllOnes
, V
);
3979 const SCEV
*ScalarEvolution::getMinusSCEV(const SCEV
*LHS
, const SCEV
*RHS
,
3980 SCEV::NoWrapFlags Flags
,
3982 // Fast path: X - X --> 0.
3984 return getZero(LHS
->getType());
3986 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
3987 // makes it so that we cannot make much use of NUW.
3988 auto AddFlags
= SCEV::FlagAnyWrap
;
3989 const bool RHSIsNotMinSigned
=
3990 !getSignedRangeMin(RHS
).isMinSignedValue();
3991 if (maskFlags(Flags
, SCEV::FlagNSW
) == SCEV::FlagNSW
) {
3992 // Let M be the minimum representable signed value. Then (-1)*RHS
3993 // signed-wraps if and only if RHS is M. That can happen even for
3994 // a NSW subtraction because e.g. (-1)*M signed-wraps even though
3995 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS +
3996 // (-1)*RHS, we need to prove that RHS != M.
3998 // If LHS is non-negative and we know that LHS - RHS does not
3999 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap
4000 // either by proving that RHS > M or that LHS >= 0.
4001 if (RHSIsNotMinSigned
|| isKnownNonNegative(LHS
)) {
4002 AddFlags
= SCEV::FlagNSW
;
4006 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS -
4007 // RHS is NSW and LHS >= 0.
4009 // The difficulty here is that the NSW flag may have been proven
4010 // relative to a loop that is to be found in a recurrence in LHS and
4011 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a
4012 // larger scope than intended.
4013 auto NegFlags
= RHSIsNotMinSigned
? SCEV::FlagNSW
: SCEV::FlagAnyWrap
;
4015 return getAddExpr(LHS
, getNegativeSCEV(RHS
, NegFlags
), AddFlags
, Depth
);
4019 ScalarEvolution::getTruncateOrZeroExtend(const SCEV
*V
, Type
*Ty
) {
4020 Type
*SrcTy
= V
->getType();
4021 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4022 "Cannot truncate or zero extend with non-integer arguments!");
4023 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4024 return V
; // No conversion
4025 if (getTypeSizeInBits(SrcTy
) > getTypeSizeInBits(Ty
))
4026 return getTruncateExpr(V
, Ty
);
4027 return getZeroExtendExpr(V
, Ty
);
4031 ScalarEvolution::getTruncateOrSignExtend(const SCEV
*V
,
4033 Type
*SrcTy
= V
->getType();
4034 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4035 "Cannot truncate or zero extend with non-integer arguments!");
4036 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4037 return V
; // No conversion
4038 if (getTypeSizeInBits(SrcTy
) > getTypeSizeInBits(Ty
))
4039 return getTruncateExpr(V
, Ty
);
4040 return getSignExtendExpr(V
, Ty
);
4044 ScalarEvolution::getNoopOrZeroExtend(const SCEV
*V
, Type
*Ty
) {
4045 Type
*SrcTy
= V
->getType();
4046 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4047 "Cannot noop or zero extend with non-integer arguments!");
4048 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
4049 "getNoopOrZeroExtend cannot truncate!");
4050 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4051 return V
; // No conversion
4052 return getZeroExtendExpr(V
, Ty
);
4056 ScalarEvolution::getNoopOrSignExtend(const SCEV
*V
, Type
*Ty
) {
4057 Type
*SrcTy
= V
->getType();
4058 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4059 "Cannot noop or sign extend with non-integer arguments!");
4060 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
4061 "getNoopOrSignExtend cannot truncate!");
4062 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4063 return V
; // No conversion
4064 return getSignExtendExpr(V
, Ty
);
4068 ScalarEvolution::getNoopOrAnyExtend(const SCEV
*V
, Type
*Ty
) {
4069 Type
*SrcTy
= V
->getType();
4070 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4071 "Cannot noop or any extend with non-integer arguments!");
4072 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
4073 "getNoopOrAnyExtend cannot truncate!");
4074 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4075 return V
; // No conversion
4076 return getAnyExtendExpr(V
, Ty
);
4080 ScalarEvolution::getTruncateOrNoop(const SCEV
*V
, Type
*Ty
) {
4081 Type
*SrcTy
= V
->getType();
4082 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4083 "Cannot truncate or noop with non-integer arguments!");
4084 assert(getTypeSizeInBits(SrcTy
) >= getTypeSizeInBits(Ty
) &&
4085 "getTruncateOrNoop cannot extend!");
4086 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4087 return V
; // No conversion
4088 return getTruncateExpr(V
, Ty
);
4091 const SCEV
*ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV
*LHS
,
4093 const SCEV
*PromotedLHS
= LHS
;
4094 const SCEV
*PromotedRHS
= RHS
;
4096 if (getTypeSizeInBits(LHS
->getType()) > getTypeSizeInBits(RHS
->getType()))
4097 PromotedRHS
= getZeroExtendExpr(RHS
, LHS
->getType());
4099 PromotedLHS
= getNoopOrZeroExtend(LHS
, RHS
->getType());
4101 return getUMaxExpr(PromotedLHS
, PromotedRHS
);
4104 const SCEV
*ScalarEvolution::getUMinFromMismatchedTypes(const SCEV
*LHS
,
4106 SmallVector
<const SCEV
*, 2> Ops
= { LHS
, RHS
};
4107 return getUMinFromMismatchedTypes(Ops
);
4110 const SCEV
*ScalarEvolution::getUMinFromMismatchedTypes(
4111 SmallVectorImpl
<const SCEV
*> &Ops
) {
4112 assert(!Ops
.empty() && "At least one operand must be!");
4114 if (Ops
.size() == 1)
4117 // Find the max type first.
4118 Type
*MaxType
= nullptr;
4121 MaxType
= getWiderType(MaxType
, S
->getType());
4123 MaxType
= S
->getType();
4125 // Extend all ops to max type.
4126 SmallVector
<const SCEV
*, 2> PromotedOps
;
4128 PromotedOps
.push_back(getNoopOrZeroExtend(S
, MaxType
));
4131 return getUMinExpr(PromotedOps
);
4134 const SCEV
*ScalarEvolution::getPointerBase(const SCEV
*V
) {
4135 // A pointer operand may evaluate to a nonpointer expression, such as null.
4136 if (!V
->getType()->isPointerTy())
4139 if (const SCEVCastExpr
*Cast
= dyn_cast
<SCEVCastExpr
>(V
)) {
4140 return getPointerBase(Cast
->getOperand());
4141 } else if (const SCEVNAryExpr
*NAry
= dyn_cast
<SCEVNAryExpr
>(V
)) {
4142 const SCEV
*PtrOp
= nullptr;
4143 for (const SCEV
*NAryOp
: NAry
->operands()) {
4144 if (NAryOp
->getType()->isPointerTy()) {
4145 // Cannot find the base of an expression with multiple pointer operands.
4153 return getPointerBase(PtrOp
);
4158 /// Push users of the given Instruction onto the given Worklist.
4160 PushDefUseChildren(Instruction
*I
,
4161 SmallVectorImpl
<Instruction
*> &Worklist
) {
4162 // Push the def-use children onto the Worklist stack.
4163 for (User
*U
: I
->users())
4164 Worklist
.push_back(cast
<Instruction
>(U
));
4167 void ScalarEvolution::forgetSymbolicName(Instruction
*PN
, const SCEV
*SymName
) {
4168 SmallVector
<Instruction
*, 16> Worklist
;
4169 PushDefUseChildren(PN
, Worklist
);
4171 SmallPtrSet
<Instruction
*, 8> Visited
;
4173 while (!Worklist
.empty()) {
4174 Instruction
*I
= Worklist
.pop_back_val();
4175 if (!Visited
.insert(I
).second
)
4178 auto It
= ValueExprMap
.find_as(static_cast<Value
*>(I
));
4179 if (It
!= ValueExprMap
.end()) {
4180 const SCEV
*Old
= It
->second
;
4182 // Short-circuit the def-use traversal if the symbolic name
4183 // ceases to appear in expressions.
4184 if (Old
!= SymName
&& !hasOperand(Old
, SymName
))
4187 // SCEVUnknown for a PHI either means that it has an unrecognized
4188 // structure, it's a PHI that's in the progress of being computed
4189 // by createNodeForPHI, or it's a single-value PHI. In the first case,
4190 // additional loop trip count information isn't going to change anything.
4191 // In the second case, createNodeForPHI will perform the necessary
4192 // updates on its own when it gets to that point. In the third, we do
4193 // want to forget the SCEVUnknown.
4194 if (!isa
<PHINode
>(I
) ||
4195 !isa
<SCEVUnknown
>(Old
) ||
4196 (I
!= PN
&& Old
== SymName
)) {
4197 eraseValueFromMap(It
->first
);
4198 forgetMemoizedResults(Old
);
4202 PushDefUseChildren(I
, Worklist
);
4208 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start
4209 /// expression in case its Loop is L. If it is not L then
4210 /// if IgnoreOtherLoops is true then use AddRec itself
4211 /// otherwise rewrite cannot be done.
4212 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4213 class SCEVInitRewriter
: public SCEVRewriteVisitor
<SCEVInitRewriter
> {
4215 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
, ScalarEvolution
&SE
,
4216 bool IgnoreOtherLoops
= true) {
4217 SCEVInitRewriter
Rewriter(L
, SE
);
4218 const SCEV
*Result
= Rewriter
.visit(S
);
4219 if (Rewriter
.hasSeenLoopVariantSCEVUnknown())
4220 return SE
.getCouldNotCompute();
4221 return Rewriter
.hasSeenOtherLoops() && !IgnoreOtherLoops
4222 ? SE
.getCouldNotCompute()
4226 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4227 if (!SE
.isLoopInvariant(Expr
, L
))
4228 SeenLoopVariantSCEVUnknown
= true;
4232 const SCEV
*visitAddRecExpr(const SCEVAddRecExpr
*Expr
) {
4233 // Only re-write AddRecExprs for this loop.
4234 if (Expr
->getLoop() == L
)
4235 return Expr
->getStart();
4236 SeenOtherLoops
= true;
4240 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown
; }
4242 bool hasSeenOtherLoops() { return SeenOtherLoops
; }
4245 explicit SCEVInitRewriter(const Loop
*L
, ScalarEvolution
&SE
)
4246 : SCEVRewriteVisitor(SE
), L(L
) {}
4249 bool SeenLoopVariantSCEVUnknown
= false;
4250 bool SeenOtherLoops
= false;
4253 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post
4254 /// increment expression in case its Loop is L. If it is not L then
4255 /// use AddRec itself.
4256 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4257 class SCEVPostIncRewriter
: public SCEVRewriteVisitor
<SCEVPostIncRewriter
> {
4259 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
, ScalarEvolution
&SE
) {
4260 SCEVPostIncRewriter
Rewriter(L
, SE
);
4261 const SCEV
*Result
= Rewriter
.visit(S
);
4262 return Rewriter
.hasSeenLoopVariantSCEVUnknown()
4263 ? SE
.getCouldNotCompute()
4267 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4268 if (!SE
.isLoopInvariant(Expr
, L
))
4269 SeenLoopVariantSCEVUnknown
= true;
4273 const SCEV
*visitAddRecExpr(const SCEVAddRecExpr
*Expr
) {
4274 // Only re-write AddRecExprs for this loop.
4275 if (Expr
->getLoop() == L
)
4276 return Expr
->getPostIncExpr(SE
);
4277 SeenOtherLoops
= true;
4281 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown
; }
4283 bool hasSeenOtherLoops() { return SeenOtherLoops
; }
4286 explicit SCEVPostIncRewriter(const Loop
*L
, ScalarEvolution
&SE
)
4287 : SCEVRewriteVisitor(SE
), L(L
) {}
4290 bool SeenLoopVariantSCEVUnknown
= false;
4291 bool SeenOtherLoops
= false;
4294 /// This class evaluates the compare condition by matching it against the
4295 /// condition of loop latch. If there is a match we assume a true value
4296 /// for the condition while building SCEV nodes.
4297 class SCEVBackedgeConditionFolder
4298 : public SCEVRewriteVisitor
<SCEVBackedgeConditionFolder
> {
4300 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
,
4301 ScalarEvolution
&SE
) {
4302 bool IsPosBECond
= false;
4303 Value
*BECond
= nullptr;
4304 if (BasicBlock
*Latch
= L
->getLoopLatch()) {
4305 BranchInst
*BI
= dyn_cast
<BranchInst
>(Latch
->getTerminator());
4306 if (BI
&& BI
->isConditional()) {
4307 assert(BI
->getSuccessor(0) != BI
->getSuccessor(1) &&
4308 "Both outgoing branches should not target same header!");
4309 BECond
= BI
->getCondition();
4310 IsPosBECond
= BI
->getSuccessor(0) == L
->getHeader();
4315 SCEVBackedgeConditionFolder
Rewriter(L
, BECond
, IsPosBECond
, SE
);
4316 return Rewriter
.visit(S
);
4319 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4320 const SCEV
*Result
= Expr
;
4321 bool InvariantF
= SE
.isLoopInvariant(Expr
, L
);
4324 Instruction
*I
= cast
<Instruction
>(Expr
->getValue());
4325 switch (I
->getOpcode()) {
4326 case Instruction::Select
: {
4327 SelectInst
*SI
= cast
<SelectInst
>(I
);
4328 Optional
<const SCEV
*> Res
=
4329 compareWithBackedgeCondition(SI
->getCondition());
4330 if (Res
.hasValue()) {
4331 bool IsOne
= cast
<SCEVConstant
>(Res
.getValue())->getValue()->isOne();
4332 Result
= SE
.getSCEV(IsOne
? SI
->getTrueValue() : SI
->getFalseValue());
4337 Optional
<const SCEV
*> Res
= compareWithBackedgeCondition(I
);
4339 Result
= Res
.getValue();
4348 explicit SCEVBackedgeConditionFolder(const Loop
*L
, Value
*BECond
,
4349 bool IsPosBECond
, ScalarEvolution
&SE
)
4350 : SCEVRewriteVisitor(SE
), L(L
), BackedgeCond(BECond
),
4351 IsPositiveBECond(IsPosBECond
) {}
4353 Optional
<const SCEV
*> compareWithBackedgeCondition(Value
*IC
);
4356 /// Loop back condition.
4357 Value
*BackedgeCond
= nullptr;
4358 /// Set to true if loop back is on positive branch condition.
4359 bool IsPositiveBECond
;
4362 Optional
<const SCEV
*>
4363 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value
*IC
) {
4365 // If value matches the backedge condition for loop latch,
4366 // then return a constant evolution node based on loopback
4368 if (BackedgeCond
== IC
)
4369 return IsPositiveBECond
? SE
.getOne(Type::getInt1Ty(SE
.getContext()))
4370 : SE
.getZero(Type::getInt1Ty(SE
.getContext()));
4374 class SCEVShiftRewriter
: public SCEVRewriteVisitor
<SCEVShiftRewriter
> {
4376 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
,
4377 ScalarEvolution
&SE
) {
4378 SCEVShiftRewriter
Rewriter(L
, SE
);
4379 const SCEV
*Result
= Rewriter
.visit(S
);
4380 return Rewriter
.isValid() ? Result
: SE
.getCouldNotCompute();
4383 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4384 // Only allow AddRecExprs for this loop.
4385 if (!SE
.isLoopInvariant(Expr
, L
))
4390 const SCEV
*visitAddRecExpr(const SCEVAddRecExpr
*Expr
) {
4391 if (Expr
->getLoop() == L
&& Expr
->isAffine())
4392 return SE
.getMinusSCEV(Expr
, Expr
->getStepRecurrence(SE
));
4397 bool isValid() { return Valid
; }
4400 explicit SCEVShiftRewriter(const Loop
*L
, ScalarEvolution
&SE
)
4401 : SCEVRewriteVisitor(SE
), L(L
) {}
4407 } // end anonymous namespace
4410 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr
*AR
) {
4411 if (!AR
->isAffine())
4412 return SCEV::FlagAnyWrap
;
4414 using OBO
= OverflowingBinaryOperator
;
4416 SCEV::NoWrapFlags Result
= SCEV::FlagAnyWrap
;
4418 if (!AR
->hasNoSignedWrap()) {
4419 ConstantRange AddRecRange
= getSignedRange(AR
);
4420 ConstantRange IncRange
= getSignedRange(AR
->getStepRecurrence(*this));
4422 auto NSWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
4423 Instruction::Add
, IncRange
, OBO::NoSignedWrap
);
4424 if (NSWRegion
.contains(AddRecRange
))
4425 Result
= ScalarEvolution::setFlags(Result
, SCEV::FlagNSW
);
4428 if (!AR
->hasNoUnsignedWrap()) {
4429 ConstantRange AddRecRange
= getUnsignedRange(AR
);
4430 ConstantRange IncRange
= getUnsignedRange(AR
->getStepRecurrence(*this));
4432 auto NUWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
4433 Instruction::Add
, IncRange
, OBO::NoUnsignedWrap
);
4434 if (NUWRegion
.contains(AddRecRange
))
4435 Result
= ScalarEvolution::setFlags(Result
, SCEV::FlagNUW
);
4443 /// Represents an abstract binary operation. This may exist as a
4444 /// normal instruction or constant expression, or may have been
4445 /// derived from an expression tree.
4453 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
4454 /// constant expression.
4455 Operator
*Op
= nullptr;
4457 explicit BinaryOp(Operator
*Op
)
4458 : Opcode(Op
->getOpcode()), LHS(Op
->getOperand(0)), RHS(Op
->getOperand(1)),
4460 if (auto *OBO
= dyn_cast
<OverflowingBinaryOperator
>(Op
)) {
4461 IsNSW
= OBO
->hasNoSignedWrap();
4462 IsNUW
= OBO
->hasNoUnsignedWrap();
4466 explicit BinaryOp(unsigned Opcode
, Value
*LHS
, Value
*RHS
, bool IsNSW
= false,
4468 : Opcode(Opcode
), LHS(LHS
), RHS(RHS
), IsNSW(IsNSW
), IsNUW(IsNUW
) {}
4471 } // end anonymous namespace
4473 /// Try to map \p V into a BinaryOp, and return \c None on failure.
4474 static Optional
<BinaryOp
> MatchBinaryOp(Value
*V
, DominatorTree
&DT
) {
4475 auto *Op
= dyn_cast
<Operator
>(V
);
4479 // Implementation detail: all the cleverness here should happen without
4480 // creating new SCEV expressions -- our caller knowns tricks to avoid creating
4481 // SCEV expressions when possible, and we should not break that.
4483 switch (Op
->getOpcode()) {
4484 case Instruction::Add
:
4485 case Instruction::Sub
:
4486 case Instruction::Mul
:
4487 case Instruction::UDiv
:
4488 case Instruction::URem
:
4489 case Instruction::And
:
4490 case Instruction::Or
:
4491 case Instruction::AShr
:
4492 case Instruction::Shl
:
4493 return BinaryOp(Op
);
4495 case Instruction::Xor
:
4496 if (auto *RHSC
= dyn_cast
<ConstantInt
>(Op
->getOperand(1)))
4497 // If the RHS of the xor is a signmask, then this is just an add.
4498 // Instcombine turns add of signmask into xor as a strength reduction step.
4499 if (RHSC
->getValue().isSignMask())
4500 return BinaryOp(Instruction::Add
, Op
->getOperand(0), Op
->getOperand(1));
4501 return BinaryOp(Op
);
4503 case Instruction::LShr
:
4504 // Turn logical shift right of a constant into a unsigned divide.
4505 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(Op
->getOperand(1))) {
4506 uint32_t BitWidth
= cast
<IntegerType
>(Op
->getType())->getBitWidth();
4508 // If the shift count is not less than the bitwidth, the result of
4509 // the shift is undefined. Don't try to analyze it, because the
4510 // resolution chosen here may differ from the resolution chosen in
4511 // other parts of the compiler.
4512 if (SA
->getValue().ult(BitWidth
)) {
4514 ConstantInt::get(SA
->getContext(),
4515 APInt::getOneBitSet(BitWidth
, SA
->getZExtValue()));
4516 return BinaryOp(Instruction::UDiv
, Op
->getOperand(0), X
);
4519 return BinaryOp(Op
);
4521 case Instruction::ExtractValue
: {
4522 auto *EVI
= cast
<ExtractValueInst
>(Op
);
4523 if (EVI
->getNumIndices() != 1 || EVI
->getIndices()[0] != 0)
4526 auto *CI
= dyn_cast
<CallInst
>(EVI
->getAggregateOperand());
4530 if (auto *F
= CI
->getCalledFunction())
4531 switch (F
->getIntrinsicID()) {
4532 case Intrinsic::sadd_with_overflow
:
4533 case Intrinsic::uadd_with_overflow
:
4534 if (!isOverflowIntrinsicNoWrap(cast
<IntrinsicInst
>(CI
), DT
))
4535 return BinaryOp(Instruction::Add
, CI
->getArgOperand(0),
4536 CI
->getArgOperand(1));
4538 // Now that we know that all uses of the arithmetic-result component of
4539 // CI are guarded by the overflow check, we can go ahead and pretend
4540 // that the arithmetic is non-overflowing.
4541 if (F
->getIntrinsicID() == Intrinsic::sadd_with_overflow
)
4542 return BinaryOp(Instruction::Add
, CI
->getArgOperand(0),
4543 CI
->getArgOperand(1), /* IsNSW = */ true,
4544 /* IsNUW = */ false);
4546 return BinaryOp(Instruction::Add
, CI
->getArgOperand(0),
4547 CI
->getArgOperand(1), /* IsNSW = */ false,
4549 case Intrinsic::ssub_with_overflow
:
4550 case Intrinsic::usub_with_overflow
:
4551 if (!isOverflowIntrinsicNoWrap(cast
<IntrinsicInst
>(CI
), DT
))
4552 return BinaryOp(Instruction::Sub
, CI
->getArgOperand(0),
4553 CI
->getArgOperand(1));
4555 // The same reasoning as sadd/uadd above.
4556 if (F
->getIntrinsicID() == Intrinsic::ssub_with_overflow
)
4557 return BinaryOp(Instruction::Sub
, CI
->getArgOperand(0),
4558 CI
->getArgOperand(1), /* IsNSW = */ true,
4559 /* IsNUW = */ false);
4561 return BinaryOp(Instruction::Sub
, CI
->getArgOperand(0),
4562 CI
->getArgOperand(1), /* IsNSW = */ false,
4563 /* IsNUW = */ true);
4564 case Intrinsic::smul_with_overflow
:
4565 case Intrinsic::umul_with_overflow
:
4566 return BinaryOp(Instruction::Mul
, CI
->getArgOperand(0),
4567 CI
->getArgOperand(1));
4581 /// Helper function to createAddRecFromPHIWithCasts. We have a phi
4582 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
4583 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
4584 /// way. This function checks if \p Op, an operand of this SCEVAddExpr,
4585 /// follows one of the following patterns:
4586 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4587 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4588 /// If the SCEV expression of \p Op conforms with one of the expected patterns
4589 /// we return the type of the truncation operation, and indicate whether the
4590 /// truncated type should be treated as signed/unsigned by setting
4591 /// \p Signed to true/false, respectively.
4592 static Type
*isSimpleCastedPHI(const SCEV
*Op
, const SCEVUnknown
*SymbolicPHI
,
4593 bool &Signed
, ScalarEvolution
&SE
) {
4594 // The case where Op == SymbolicPHI (that is, with no type conversions on
4595 // the way) is handled by the regular add recurrence creating logic and
4596 // would have already been triggered in createAddRecForPHI. Reaching it here
4597 // means that createAddRecFromPHI had failed for this PHI before (e.g.,
4598 // because one of the other operands of the SCEVAddExpr updating this PHI is
4601 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
4602 // this case predicates that allow us to prove that Op == SymbolicPHI will
4604 if (Op
== SymbolicPHI
)
4607 unsigned SourceBits
= SE
.getTypeSizeInBits(SymbolicPHI
->getType());
4608 unsigned NewBits
= SE
.getTypeSizeInBits(Op
->getType());
4609 if (SourceBits
!= NewBits
)
4612 const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(Op
);
4613 const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(Op
);
4616 const SCEVTruncateExpr
*Trunc
=
4617 SExt
? dyn_cast
<SCEVTruncateExpr
>(SExt
->getOperand())
4618 : dyn_cast
<SCEVTruncateExpr
>(ZExt
->getOperand());
4621 const SCEV
*X
= Trunc
->getOperand();
4622 if (X
!= SymbolicPHI
)
4624 Signed
= SExt
!= nullptr;
4625 return Trunc
->getType();
4628 static const Loop
*isIntegerLoopHeaderPHI(const PHINode
*PN
, LoopInfo
&LI
) {
4629 if (!PN
->getType()->isIntegerTy())
4631 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
4632 if (!L
|| L
->getHeader() != PN
->getParent())
4637 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
4638 // computation that updates the phi follows the following pattern:
4639 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
4640 // which correspond to a phi->trunc->sext/zext->add->phi update chain.
4641 // If so, try to see if it can be rewritten as an AddRecExpr under some
4642 // Predicates. If successful, return them as a pair. Also cache the results
4645 // Example usage scenario:
4646 // Say the Rewriter is called for the following SCEV:
4647 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4649 // %X = phi i64 (%Start, %BEValue)
4650 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
4651 // and call this function with %SymbolicPHI = %X.
4653 // The analysis will find that the value coming around the backedge has
4654 // the following SCEV:
4655 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4656 // Upon concluding that this matches the desired pattern, the function
4657 // will return the pair {NewAddRec, SmallPredsVec} where:
4658 // NewAddRec = {%Start,+,%Step}
4659 // SmallPredsVec = {P1, P2, P3} as follows:
4660 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
4661 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
4662 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
4663 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec
4664 // under the predicates {P1,P2,P3}.
4665 // This predicated rewrite will be cached in PredicatedSCEVRewrites:
4666 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
4670 // 1) Extend the Induction descriptor to also support inductions that involve
4671 // casts: When needed (namely, when we are called in the context of the
4672 // vectorizer induction analysis), a Set of cast instructions will be
4673 // populated by this method, and provided back to isInductionPHI. This is
4674 // needed to allow the vectorizer to properly record them to be ignored by
4675 // the cost model and to avoid vectorizing them (otherwise these casts,
4676 // which are redundant under the runtime overflow checks, will be
4677 // vectorized, which can be costly).
4679 // 2) Support additional induction/PHISCEV patterns: We also want to support
4680 // inductions where the sext-trunc / zext-trunc operations (partly) occur
4681 // after the induction update operation (the induction increment):
4683 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
4684 // which correspond to a phi->add->trunc->sext/zext->phi update chain.
4686 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
4687 // which correspond to a phi->trunc->add->sext/zext->phi update chain.
4689 // 3) Outline common code with createAddRecFromPHI to avoid duplication.
4690 Optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
4691 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown
*SymbolicPHI
) {
4692 SmallVector
<const SCEVPredicate
*, 3> Predicates
;
4694 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can
4695 // return an AddRec expression under some predicate.
4697 auto *PN
= cast
<PHINode
>(SymbolicPHI
->getValue());
4698 const Loop
*L
= isIntegerLoopHeaderPHI(PN
, LI
);
4699 assert(L
&& "Expecting an integer loop header phi");
4701 // The loop may have multiple entrances or multiple exits; we can analyze
4702 // this phi as an addrec if it has a unique entry value and a unique
4704 Value
*BEValueV
= nullptr, *StartValueV
= nullptr;
4705 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
4706 Value
*V
= PN
->getIncomingValue(i
);
4707 if (L
->contains(PN
->getIncomingBlock(i
))) {
4710 } else if (BEValueV
!= V
) {
4714 } else if (!StartValueV
) {
4716 } else if (StartValueV
!= V
) {
4717 StartValueV
= nullptr;
4721 if (!BEValueV
|| !StartValueV
)
4724 const SCEV
*BEValue
= getSCEV(BEValueV
);
4726 // If the value coming around the backedge is an add with the symbolic
4727 // value we just inserted, possibly with casts that we can ignore under
4728 // an appropriate runtime guard, then we found a simple induction variable!
4729 const auto *Add
= dyn_cast
<SCEVAddExpr
>(BEValue
);
4733 // If there is a single occurrence of the symbolic value, possibly
4734 // casted, replace it with a recurrence.
4735 unsigned FoundIndex
= Add
->getNumOperands();
4736 Type
*TruncTy
= nullptr;
4738 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
4740 isSimpleCastedPHI(Add
->getOperand(i
), SymbolicPHI
, Signed
, *this)))
4741 if (FoundIndex
== e
) {
4746 if (FoundIndex
== Add
->getNumOperands())
4749 // Create an add with everything but the specified operand.
4750 SmallVector
<const SCEV
*, 8> Ops
;
4751 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
4752 if (i
!= FoundIndex
)
4753 Ops
.push_back(Add
->getOperand(i
));
4754 const SCEV
*Accum
= getAddExpr(Ops
);
4756 // The runtime checks will not be valid if the step amount is
4757 // varying inside the loop.
4758 if (!isLoopInvariant(Accum
, L
))
4761 // *** Part2: Create the predicates
4763 // Analysis was successful: we have a phi-with-cast pattern for which we
4764 // can return an AddRec expression under the following predicates:
4766 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
4767 // fits within the truncated type (does not overflow) for i = 0 to n-1.
4768 // P2: An Equal predicate that guarantees that
4769 // Start = (Ext ix (Trunc iy (Start) to ix) to iy)
4770 // P3: An Equal predicate that guarantees that
4771 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
4773 // As we next prove, the above predicates guarantee that:
4774 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
4777 // More formally, we want to prove that:
4778 // Expr(i+1) = Start + (i+1) * Accum
4779 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
4782 // 1) Expr(0) = Start
4783 // 2) Expr(1) = Start + Accum
4784 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
4785 // 3) Induction hypothesis (step i):
4786 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
4790 // = Start + (i+1)*Accum
4791 // = (Start + i*Accum) + Accum
4792 // = Expr(i) + Accum
4793 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
4796 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
4798 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
4799 // + (Ext ix (Trunc iy (Accum) to ix) to iy)
4800 // + Accum :: from P3
4802 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
4803 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
4805 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
4806 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
4808 // By induction, the same applies to all iterations 1<=i<n:
4811 // Create a truncated addrec for which we will add a no overflow check (P1).
4812 const SCEV
*StartVal
= getSCEV(StartValueV
);
4813 const SCEV
*PHISCEV
=
4814 getAddRecExpr(getTruncateExpr(StartVal
, TruncTy
),
4815 getTruncateExpr(Accum
, TruncTy
), L
, SCEV::FlagAnyWrap
);
4817 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr.
4818 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV
4819 // will be constant.
4821 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't
4823 if (const auto *AR
= dyn_cast
<SCEVAddRecExpr
>(PHISCEV
)) {
4824 SCEVWrapPredicate::IncrementWrapFlags AddedFlags
=
4825 Signed
? SCEVWrapPredicate::IncrementNSSW
4826 : SCEVWrapPredicate::IncrementNUSW
;
4827 const SCEVPredicate
*AddRecPred
= getWrapPredicate(AR
, AddedFlags
);
4828 Predicates
.push_back(AddRecPred
);
4831 // Create the Equal Predicates P2,P3:
4833 // It is possible that the predicates P2 and/or P3 are computable at
4834 // compile time due to StartVal and/or Accum being constants.
4835 // If either one is, then we can check that now and escape if either P2
4838 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
4839 // for each of StartVal and Accum
4840 auto getExtendedExpr
= [&](const SCEV
*Expr
,
4841 bool CreateSignExtend
) -> const SCEV
* {
4842 assert(isLoopInvariant(Expr
, L
) && "Expr is expected to be invariant");
4843 const SCEV
*TruncatedExpr
= getTruncateExpr(Expr
, TruncTy
);
4844 const SCEV
*ExtendedExpr
=
4845 CreateSignExtend
? getSignExtendExpr(TruncatedExpr
, Expr
->getType())
4846 : getZeroExtendExpr(TruncatedExpr
, Expr
->getType());
4847 return ExtendedExpr
;
4851 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy
4852 // = getExtendedExpr(Expr)
4853 // Determine whether the predicate P: Expr == ExtendedExpr
4854 // is known to be false at compile time
4855 auto PredIsKnownFalse
= [&](const SCEV
*Expr
,
4856 const SCEV
*ExtendedExpr
) -> bool {
4857 return Expr
!= ExtendedExpr
&&
4858 isKnownPredicate(ICmpInst::ICMP_NE
, Expr
, ExtendedExpr
);
4861 const SCEV
*StartExtended
= getExtendedExpr(StartVal
, Signed
);
4862 if (PredIsKnownFalse(StartVal
, StartExtended
)) {
4863 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
4867 // The Step is always Signed (because the overflow checks are either
4869 const SCEV
*AccumExtended
= getExtendedExpr(Accum
, /*CreateSignExtend=*/true);
4870 if (PredIsKnownFalse(Accum
, AccumExtended
)) {
4871 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
4875 auto AppendPredicate
= [&](const SCEV
*Expr
,
4876 const SCEV
*ExtendedExpr
) -> void {
4877 if (Expr
!= ExtendedExpr
&&
4878 !isKnownPredicate(ICmpInst::ICMP_EQ
, Expr
, ExtendedExpr
)) {
4879 const SCEVPredicate
*Pred
= getEqualPredicate(Expr
, ExtendedExpr
);
4880 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred
);
4881 Predicates
.push_back(Pred
);
4885 AppendPredicate(StartVal
, StartExtended
);
4886 AppendPredicate(Accum
, AccumExtended
);
4888 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in
4889 // which the casts had been folded away. The caller can rewrite SymbolicPHI
4890 // into NewAR if it will also add the runtime overflow checks specified in
4892 auto *NewAR
= getAddRecExpr(StartVal
, Accum
, L
, SCEV::FlagAnyWrap
);
4894 std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>> PredRewrite
=
4895 std::make_pair(NewAR
, Predicates
);
4896 // Remember the result of the analysis for this SCEV at this locayyytion.
4897 PredicatedSCEVRewrites
[{SymbolicPHI
, L
}] = PredRewrite
;
4901 Optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
4902 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown
*SymbolicPHI
) {
4903 auto *PN
= cast
<PHINode
>(SymbolicPHI
->getValue());
4904 const Loop
*L
= isIntegerLoopHeaderPHI(PN
, LI
);
4908 // Check to see if we already analyzed this PHI.
4909 auto I
= PredicatedSCEVRewrites
.find({SymbolicPHI
, L
});
4910 if (I
!= PredicatedSCEVRewrites
.end()) {
4911 std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>> Rewrite
=
4913 // Analysis was done before and failed to create an AddRec:
4914 if (Rewrite
.first
== SymbolicPHI
)
4916 // Analysis was done before and succeeded to create an AddRec under
4918 assert(isa
<SCEVAddRecExpr
>(Rewrite
.first
) && "Expected an AddRec");
4919 assert(!(Rewrite
.second
).empty() && "Expected to find Predicates");
4923 Optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
4924 Rewrite
= createAddRecFromPHIWithCastsImpl(SymbolicPHI
);
4926 // Record in the cache that the analysis failed
4928 SmallVector
<const SCEVPredicate
*, 3> Predicates
;
4929 PredicatedSCEVRewrites
[{SymbolicPHI
, L
}] = {SymbolicPHI
, Predicates
};
4936 // FIXME: This utility is currently required because the Rewriter currently
4937 // does not rewrite this expression:
4938 // {0, +, (sext ix (trunc iy to ix) to iy)}
4939 // into {0, +, %step},
4940 // even when the following Equal predicate exists:
4941 // "%step == (sext ix (trunc iy to ix) to iy)".
4942 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
4943 const SCEVAddRecExpr
*AR1
, const SCEVAddRecExpr
*AR2
) const {
4947 auto areExprsEqual
= [&](const SCEV
*Expr1
, const SCEV
*Expr2
) -> bool {
4948 if (Expr1
!= Expr2
&& !Preds
.implies(SE
.getEqualPredicate(Expr1
, Expr2
)) &&
4949 !Preds
.implies(SE
.getEqualPredicate(Expr2
, Expr1
)))
4954 if (!areExprsEqual(AR1
->getStart(), AR2
->getStart()) ||
4955 !areExprsEqual(AR1
->getStepRecurrence(SE
), AR2
->getStepRecurrence(SE
)))
4960 /// A helper function for createAddRecFromPHI to handle simple cases.
4962 /// This function tries to find an AddRec expression for the simplest (yet most
4963 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
4964 /// If it fails, createAddRecFromPHI will use a more general, but slow,
4965 /// technique for finding the AddRec expression.
4966 const SCEV
*ScalarEvolution::createSimpleAffineAddRec(PHINode
*PN
,
4968 Value
*StartValueV
) {
4969 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
4970 assert(L
&& L
->getHeader() == PN
->getParent());
4971 assert(BEValueV
&& StartValueV
);
4973 auto BO
= MatchBinaryOp(BEValueV
, DT
);
4977 if (BO
->Opcode
!= Instruction::Add
)
4980 const SCEV
*Accum
= nullptr;
4981 if (BO
->LHS
== PN
&& L
->isLoopInvariant(BO
->RHS
))
4982 Accum
= getSCEV(BO
->RHS
);
4983 else if (BO
->RHS
== PN
&& L
->isLoopInvariant(BO
->LHS
))
4984 Accum
= getSCEV(BO
->LHS
);
4989 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
4991 Flags
= setFlags(Flags
, SCEV::FlagNUW
);
4993 Flags
= setFlags(Flags
, SCEV::FlagNSW
);
4995 const SCEV
*StartVal
= getSCEV(StartValueV
);
4996 const SCEV
*PHISCEV
= getAddRecExpr(StartVal
, Accum
, L
, Flags
);
4998 ValueExprMap
[SCEVCallbackVH(PN
, this)] = PHISCEV
;
5000 // We can add Flags to the post-inc expression only if we
5001 // know that it is *undefined behavior* for BEValueV to
5003 if (auto *BEInst
= dyn_cast
<Instruction
>(BEValueV
))
5004 if (isLoopInvariant(Accum
, L
) && isAddRecNeverPoison(BEInst
, L
))
5005 (void)getAddRecExpr(getAddExpr(StartVal
, Accum
), Accum
, L
, Flags
);
5010 const SCEV
*ScalarEvolution::createAddRecFromPHI(PHINode
*PN
) {
5011 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
5012 if (!L
|| L
->getHeader() != PN
->getParent())
5015 // The loop may have multiple entrances or multiple exits; we can analyze
5016 // this phi as an addrec if it has a unique entry value and a unique
5018 Value
*BEValueV
= nullptr, *StartValueV
= nullptr;
5019 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
5020 Value
*V
= PN
->getIncomingValue(i
);
5021 if (L
->contains(PN
->getIncomingBlock(i
))) {
5024 } else if (BEValueV
!= V
) {
5028 } else if (!StartValueV
) {
5030 } else if (StartValueV
!= V
) {
5031 StartValueV
= nullptr;
5035 if (!BEValueV
|| !StartValueV
)
5038 assert(ValueExprMap
.find_as(PN
) == ValueExprMap
.end() &&
5039 "PHI node already processed?");
5041 // First, try to find AddRec expression without creating a fictituos symbolic
5043 if (auto *S
= createSimpleAffineAddRec(PN
, BEValueV
, StartValueV
))
5046 // Handle PHI node value symbolically.
5047 const SCEV
*SymbolicName
= getUnknown(PN
);
5048 ValueExprMap
.insert({SCEVCallbackVH(PN
, this), SymbolicName
});
5050 // Using this symbolic name for the PHI, analyze the value coming around
5052 const SCEV
*BEValue
= getSCEV(BEValueV
);
5054 // NOTE: If BEValue is loop invariant, we know that the PHI node just
5055 // has a special value for the first iteration of the loop.
5057 // If the value coming around the backedge is an add with the symbolic
5058 // value we just inserted, then we found a simple induction variable!
5059 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(BEValue
)) {
5060 // If there is a single occurrence of the symbolic value, replace it
5061 // with a recurrence.
5062 unsigned FoundIndex
= Add
->getNumOperands();
5063 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
5064 if (Add
->getOperand(i
) == SymbolicName
)
5065 if (FoundIndex
== e
) {
5070 if (FoundIndex
!= Add
->getNumOperands()) {
5071 // Create an add with everything but the specified operand.
5072 SmallVector
<const SCEV
*, 8> Ops
;
5073 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
5074 if (i
!= FoundIndex
)
5075 Ops
.push_back(SCEVBackedgeConditionFolder::rewrite(Add
->getOperand(i
),
5077 const SCEV
*Accum
= getAddExpr(Ops
);
5079 // This is not a valid addrec if the step amount is varying each
5080 // loop iteration, but is not itself an addrec in this loop.
5081 if (isLoopInvariant(Accum
, L
) ||
5082 (isa
<SCEVAddRecExpr
>(Accum
) &&
5083 cast
<SCEVAddRecExpr
>(Accum
)->getLoop() == L
)) {
5084 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
5086 if (auto BO
= MatchBinaryOp(BEValueV
, DT
)) {
5087 if (BO
->Opcode
== Instruction::Add
&& BO
->LHS
== PN
) {
5089 Flags
= setFlags(Flags
, SCEV::FlagNUW
);
5091 Flags
= setFlags(Flags
, SCEV::FlagNSW
);
5093 } else if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(BEValueV
)) {
5094 // If the increment is an inbounds GEP, then we know the address
5095 // space cannot be wrapped around. We cannot make any guarantee
5096 // about signed or unsigned overflow because pointers are
5097 // unsigned but we may have a negative index from the base
5098 // pointer. We can guarantee that no unsigned wrap occurs if the
5099 // indices form a positive value.
5100 if (GEP
->isInBounds() && GEP
->getOperand(0) == PN
) {
5101 Flags
= setFlags(Flags
, SCEV::FlagNW
);
5103 const SCEV
*Ptr
= getSCEV(GEP
->getPointerOperand());
5104 if (isKnownPositive(getMinusSCEV(getSCEV(GEP
), Ptr
)))
5105 Flags
= setFlags(Flags
, SCEV::FlagNUW
);
5108 // We cannot transfer nuw and nsw flags from subtraction
5109 // operations -- sub nuw X, Y is not the same as add nuw X, -Y
5113 const SCEV
*StartVal
= getSCEV(StartValueV
);
5114 const SCEV
*PHISCEV
= getAddRecExpr(StartVal
, Accum
, L
, Flags
);
5116 // Okay, for the entire analysis of this edge we assumed the PHI
5117 // to be symbolic. We now need to go back and purge all of the
5118 // entries for the scalars that use the symbolic expression.
5119 forgetSymbolicName(PN
, SymbolicName
);
5120 ValueExprMap
[SCEVCallbackVH(PN
, this)] = PHISCEV
;
5122 // We can add Flags to the post-inc expression only if we
5123 // know that it is *undefined behavior* for BEValueV to
5125 if (auto *BEInst
= dyn_cast
<Instruction
>(BEValueV
))
5126 if (isLoopInvariant(Accum
, L
) && isAddRecNeverPoison(BEInst
, L
))
5127 (void)getAddRecExpr(getAddExpr(StartVal
, Accum
), Accum
, L
, Flags
);
5133 // Otherwise, this could be a loop like this:
5134 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
5135 // In this case, j = {1,+,1} and BEValue is j.
5136 // Because the other in-value of i (0) fits the evolution of BEValue
5137 // i really is an addrec evolution.
5139 // We can generalize this saying that i is the shifted value of BEValue
5140 // by one iteration:
5141 // PHI(f(0), f({1,+,1})) --> f({0,+,1})
5142 const SCEV
*Shifted
= SCEVShiftRewriter::rewrite(BEValue
, L
, *this);
5143 const SCEV
*Start
= SCEVInitRewriter::rewrite(Shifted
, L
, *this, false);
5144 if (Shifted
!= getCouldNotCompute() &&
5145 Start
!= getCouldNotCompute()) {
5146 const SCEV
*StartVal
= getSCEV(StartValueV
);
5147 if (Start
== StartVal
) {
5148 // Okay, for the entire analysis of this edge we assumed the PHI
5149 // to be symbolic. We now need to go back and purge all of the
5150 // entries for the scalars that use the symbolic expression.
5151 forgetSymbolicName(PN
, SymbolicName
);
5152 ValueExprMap
[SCEVCallbackVH(PN
, this)] = Shifted
;
5158 // Remove the temporary PHI node SCEV that has been inserted while intending
5159 // to create an AddRecExpr for this PHI node. We can not keep this temporary
5160 // as it will prevent later (possibly simpler) SCEV expressions to be added
5161 // to the ValueExprMap.
5162 eraseValueFromMap(PN
);
5167 // Checks if the SCEV S is available at BB. S is considered available at BB
5168 // if S can be materialized at BB without introducing a fault.
5169 static bool IsAvailableOnEntry(const Loop
*L
, DominatorTree
&DT
, const SCEV
*S
,
5171 struct CheckAvailable
{
5172 bool TraversalDone
= false;
5173 bool Available
= true;
5175 const Loop
*L
= nullptr; // The loop BB is in (can be nullptr)
5176 BasicBlock
*BB
= nullptr;
5179 CheckAvailable(const Loop
*L
, BasicBlock
*BB
, DominatorTree
&DT
)
5180 : L(L
), BB(BB
), DT(DT
) {}
5182 bool setUnavailable() {
5183 TraversalDone
= true;
5188 bool follow(const SCEV
*S
) {
5189 switch (S
->getSCEVType()) {
5190 case scConstant
: case scTruncate
: case scZeroExtend
: case scSignExtend
:
5191 case scAddExpr
: case scMulExpr
: case scUMaxExpr
: case scSMaxExpr
:
5192 // These expressions are available if their operand(s) is/are.
5195 case scAddRecExpr
: {
5196 // We allow add recurrences that are on the loop BB is in, or some
5197 // outer loop. This guarantees availability because the value of the
5198 // add recurrence at BB is simply the "current" value of the induction
5199 // variable. We can relax this in the future; for instance an add
5200 // recurrence on a sibling dominating loop is also available at BB.
5201 const auto *ARLoop
= cast
<SCEVAddRecExpr
>(S
)->getLoop();
5202 if (L
&& (ARLoop
== L
|| ARLoop
->contains(L
)))
5205 return setUnavailable();
5209 // For SCEVUnknown, we check for simple dominance.
5210 const auto *SU
= cast
<SCEVUnknown
>(S
);
5211 Value
*V
= SU
->getValue();
5213 if (isa
<Argument
>(V
))
5216 if (isa
<Instruction
>(V
) && DT
.dominates(cast
<Instruction
>(V
), BB
))
5219 return setUnavailable();
5223 case scCouldNotCompute
:
5224 // We do not try to smart about these at all.
5225 return setUnavailable();
5227 llvm_unreachable("switch should be fully covered!");
5230 bool isDone() { return TraversalDone
; }
5233 CheckAvailable
CA(L
, BB
, DT
);
5234 SCEVTraversal
<CheckAvailable
> ST(CA
);
5237 return CA
.Available
;
5240 // Try to match a control flow sequence that branches out at BI and merges back
5241 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful
5243 static bool BrPHIToSelect(DominatorTree
&DT
, BranchInst
*BI
, PHINode
*Merge
,
5244 Value
*&C
, Value
*&LHS
, Value
*&RHS
) {
5245 C
= BI
->getCondition();
5247 BasicBlockEdge
LeftEdge(BI
->getParent(), BI
->getSuccessor(0));
5248 BasicBlockEdge
RightEdge(BI
->getParent(), BI
->getSuccessor(1));
5250 if (!LeftEdge
.isSingleEdge())
5253 assert(RightEdge
.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()");
5255 Use
&LeftUse
= Merge
->getOperandUse(0);
5256 Use
&RightUse
= Merge
->getOperandUse(1);
5258 if (DT
.dominates(LeftEdge
, LeftUse
) && DT
.dominates(RightEdge
, RightUse
)) {
5264 if (DT
.dominates(LeftEdge
, RightUse
) && DT
.dominates(RightEdge
, LeftUse
)) {
5273 const SCEV
*ScalarEvolution::createNodeFromSelectLikePHI(PHINode
*PN
) {
5275 [&](BasicBlock
*BB
) { return DT
.isReachableFromEntry(BB
); };
5276 if (PN
->getNumIncomingValues() == 2 && all_of(PN
->blocks(), IsReachable
)) {
5277 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
5279 // We don't want to break LCSSA, even in a SCEV expression tree.
5280 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
5281 if (LI
.getLoopFor(PN
->getIncomingBlock(i
)) != L
)
5286 // br %cond, label %left, label %right
5292 // V = phi [ %x, %left ], [ %y, %right ]
5294 // as "select %cond, %x, %y"
5296 BasicBlock
*IDom
= DT
[PN
->getParent()]->getIDom()->getBlock();
5297 assert(IDom
&& "At least the entry block should dominate PN");
5299 auto *BI
= dyn_cast
<BranchInst
>(IDom
->getTerminator());
5300 Value
*Cond
= nullptr, *LHS
= nullptr, *RHS
= nullptr;
5302 if (BI
&& BI
->isConditional() &&
5303 BrPHIToSelect(DT
, BI
, PN
, Cond
, LHS
, RHS
) &&
5304 IsAvailableOnEntry(L
, DT
, getSCEV(LHS
), PN
->getParent()) &&
5305 IsAvailableOnEntry(L
, DT
, getSCEV(RHS
), PN
->getParent()))
5306 return createNodeForSelectOrPHI(PN
, Cond
, LHS
, RHS
);
5312 const SCEV
*ScalarEvolution::createNodeForPHI(PHINode
*PN
) {
5313 if (const SCEV
*S
= createAddRecFromPHI(PN
))
5316 if (const SCEV
*S
= createNodeFromSelectLikePHI(PN
))
5319 // If the PHI has a single incoming value, follow that value, unless the
5320 // PHI's incoming blocks are in a different loop, in which case doing so
5321 // risks breaking LCSSA form. Instcombine would normally zap these, but
5322 // it doesn't have DominatorTree information, so it may miss cases.
5323 if (Value
*V
= SimplifyInstruction(PN
, {getDataLayout(), &TLI
, &DT
, &AC
}))
5324 if (LI
.replacementPreservesLCSSAForm(PN
, V
))
5327 // If it's not a loop phi, we can't handle it yet.
5328 return getUnknown(PN
);
5331 const SCEV
*ScalarEvolution::createNodeForSelectOrPHI(Instruction
*I
,
5335 // Handle "constant" branch or select. This can occur for instance when a
5336 // loop pass transforms an inner loop and moves on to process the outer loop.
5337 if (auto *CI
= dyn_cast
<ConstantInt
>(Cond
))
5338 return getSCEV(CI
->isOne() ? TrueVal
: FalseVal
);
5340 // Try to match some simple smax or umax patterns.
5341 auto *ICI
= dyn_cast
<ICmpInst
>(Cond
);
5343 return getUnknown(I
);
5345 Value
*LHS
= ICI
->getOperand(0);
5346 Value
*RHS
= ICI
->getOperand(1);
5348 switch (ICI
->getPredicate()) {
5349 case ICmpInst::ICMP_SLT
:
5350 case ICmpInst::ICMP_SLE
:
5351 std::swap(LHS
, RHS
);
5353 case ICmpInst::ICMP_SGT
:
5354 case ICmpInst::ICMP_SGE
:
5355 // a >s b ? a+x : b+x -> smax(a, b)+x
5356 // a >s b ? b+x : a+x -> smin(a, b)+x
5357 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(I
->getType())) {
5358 const SCEV
*LS
= getNoopOrSignExtend(getSCEV(LHS
), I
->getType());
5359 const SCEV
*RS
= getNoopOrSignExtend(getSCEV(RHS
), I
->getType());
5360 const SCEV
*LA
= getSCEV(TrueVal
);
5361 const SCEV
*RA
= getSCEV(FalseVal
);
5362 const SCEV
*LDiff
= getMinusSCEV(LA
, LS
);
5363 const SCEV
*RDiff
= getMinusSCEV(RA
, RS
);
5365 return getAddExpr(getSMaxExpr(LS
, RS
), LDiff
);
5366 LDiff
= getMinusSCEV(LA
, RS
);
5367 RDiff
= getMinusSCEV(RA
, LS
);
5369 return getAddExpr(getSMinExpr(LS
, RS
), LDiff
);
5372 case ICmpInst::ICMP_ULT
:
5373 case ICmpInst::ICMP_ULE
:
5374 std::swap(LHS
, RHS
);
5376 case ICmpInst::ICMP_UGT
:
5377 case ICmpInst::ICMP_UGE
:
5378 // a >u b ? a+x : b+x -> umax(a, b)+x
5379 // a >u b ? b+x : a+x -> umin(a, b)+x
5380 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(I
->getType())) {
5381 const SCEV
*LS
= getNoopOrZeroExtend(getSCEV(LHS
), I
->getType());
5382 const SCEV
*RS
= getNoopOrZeroExtend(getSCEV(RHS
), I
->getType());
5383 const SCEV
*LA
= getSCEV(TrueVal
);
5384 const SCEV
*RA
= getSCEV(FalseVal
);
5385 const SCEV
*LDiff
= getMinusSCEV(LA
, LS
);
5386 const SCEV
*RDiff
= getMinusSCEV(RA
, RS
);
5388 return getAddExpr(getUMaxExpr(LS
, RS
), LDiff
);
5389 LDiff
= getMinusSCEV(LA
, RS
);
5390 RDiff
= getMinusSCEV(RA
, LS
);
5392 return getAddExpr(getUMinExpr(LS
, RS
), LDiff
);
5395 case ICmpInst::ICMP_NE
:
5396 // n != 0 ? n+x : 1+x -> umax(n, 1)+x
5397 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(I
->getType()) &&
5398 isa
<ConstantInt
>(RHS
) && cast
<ConstantInt
>(RHS
)->isZero()) {
5399 const SCEV
*One
= getOne(I
->getType());
5400 const SCEV
*LS
= getNoopOrZeroExtend(getSCEV(LHS
), I
->getType());
5401 const SCEV
*LA
= getSCEV(TrueVal
);
5402 const SCEV
*RA
= getSCEV(FalseVal
);
5403 const SCEV
*LDiff
= getMinusSCEV(LA
, LS
);
5404 const SCEV
*RDiff
= getMinusSCEV(RA
, One
);
5406 return getAddExpr(getUMaxExpr(One
, LS
), LDiff
);
5409 case ICmpInst::ICMP_EQ
:
5410 // n == 0 ? 1+x : n+x -> umax(n, 1)+x
5411 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(I
->getType()) &&
5412 isa
<ConstantInt
>(RHS
) && cast
<ConstantInt
>(RHS
)->isZero()) {
5413 const SCEV
*One
= getOne(I
->getType());
5414 const SCEV
*LS
= getNoopOrZeroExtend(getSCEV(LHS
), I
->getType());
5415 const SCEV
*LA
= getSCEV(TrueVal
);
5416 const SCEV
*RA
= getSCEV(FalseVal
);
5417 const SCEV
*LDiff
= getMinusSCEV(LA
, One
);
5418 const SCEV
*RDiff
= getMinusSCEV(RA
, LS
);
5420 return getAddExpr(getUMaxExpr(One
, LS
), LDiff
);
5427 return getUnknown(I
);
5430 /// Expand GEP instructions into add and multiply operations. This allows them
5431 /// to be analyzed by regular SCEV code.
5432 const SCEV
*ScalarEvolution::createNodeForGEP(GEPOperator
*GEP
) {
5433 // Don't attempt to analyze GEPs over unsized objects.
5434 if (!GEP
->getSourceElementType()->isSized())
5435 return getUnknown(GEP
);
5437 SmallVector
<const SCEV
*, 4> IndexExprs
;
5438 for (auto Index
= GEP
->idx_begin(); Index
!= GEP
->idx_end(); ++Index
)
5439 IndexExprs
.push_back(getSCEV(*Index
));
5440 return getGEPExpr(GEP
, IndexExprs
);
5443 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV
*S
) {
5444 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
))
5445 return C
->getAPInt().countTrailingZeros();
5447 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(S
))
5448 return std::min(GetMinTrailingZeros(T
->getOperand()),
5449 (uint32_t)getTypeSizeInBits(T
->getType()));
5451 if (const SCEVZeroExtendExpr
*E
= dyn_cast
<SCEVZeroExtendExpr
>(S
)) {
5452 uint32_t OpRes
= GetMinTrailingZeros(E
->getOperand());
5453 return OpRes
== getTypeSizeInBits(E
->getOperand()->getType())
5454 ? getTypeSizeInBits(E
->getType())
5458 if (const SCEVSignExtendExpr
*E
= dyn_cast
<SCEVSignExtendExpr
>(S
)) {
5459 uint32_t OpRes
= GetMinTrailingZeros(E
->getOperand());
5460 return OpRes
== getTypeSizeInBits(E
->getOperand()->getType())
5461 ? getTypeSizeInBits(E
->getType())
5465 if (const SCEVAddExpr
*A
= dyn_cast
<SCEVAddExpr
>(S
)) {
5466 // The result is the min of all operands results.
5467 uint32_t MinOpRes
= GetMinTrailingZeros(A
->getOperand(0));
5468 for (unsigned i
= 1, e
= A
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
5469 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(A
->getOperand(i
)));
5473 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(S
)) {
5474 // The result is the sum of all operands results.
5475 uint32_t SumOpRes
= GetMinTrailingZeros(M
->getOperand(0));
5476 uint32_t BitWidth
= getTypeSizeInBits(M
->getType());
5477 for (unsigned i
= 1, e
= M
->getNumOperands();
5478 SumOpRes
!= BitWidth
&& i
!= e
; ++i
)
5480 std::min(SumOpRes
+ GetMinTrailingZeros(M
->getOperand(i
)), BitWidth
);
5484 if (const SCEVAddRecExpr
*A
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
5485 // The result is the min of all operands results.
5486 uint32_t MinOpRes
= GetMinTrailingZeros(A
->getOperand(0));
5487 for (unsigned i
= 1, e
= A
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
5488 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(A
->getOperand(i
)));
5492 if (const SCEVSMaxExpr
*M
= dyn_cast
<SCEVSMaxExpr
>(S
)) {
5493 // The result is the min of all operands results.
5494 uint32_t MinOpRes
= GetMinTrailingZeros(M
->getOperand(0));
5495 for (unsigned i
= 1, e
= M
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
5496 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(M
->getOperand(i
)));
5500 if (const SCEVUMaxExpr
*M
= dyn_cast
<SCEVUMaxExpr
>(S
)) {
5501 // The result is the min of all operands results.
5502 uint32_t MinOpRes
= GetMinTrailingZeros(M
->getOperand(0));
5503 for (unsigned i
= 1, e
= M
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
5504 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(M
->getOperand(i
)));
5508 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
5509 // For a SCEVUnknown, ask ValueTracking.
5510 KnownBits Known
= computeKnownBits(U
->getValue(), getDataLayout(), 0, &AC
, nullptr, &DT
);
5511 return Known
.countMinTrailingZeros();
5518 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV
*S
) {
5519 auto I
= MinTrailingZerosCache
.find(S
);
5520 if (I
!= MinTrailingZerosCache
.end())
5523 uint32_t Result
= GetMinTrailingZerosImpl(S
);
5524 auto InsertPair
= MinTrailingZerosCache
.insert({S
, Result
});
5525 assert(InsertPair
.second
&& "Should insert a new key");
5526 return InsertPair
.first
->second
;
5529 /// Helper method to assign a range to V from metadata present in the IR.
5530 static Optional
<ConstantRange
> GetRangeFromMetadata(Value
*V
) {
5531 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
5532 if (MDNode
*MD
= I
->getMetadata(LLVMContext::MD_range
))
5533 return getConstantRangeFromMetadata(*MD
);
5538 /// Determine the range for a particular SCEV. If SignHint is
5539 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
5540 /// with a "cleaner" unsigned (resp. signed) representation.
5541 const ConstantRange
&
5542 ScalarEvolution::getRangeRef(const SCEV
*S
,
5543 ScalarEvolution::RangeSignHint SignHint
) {
5544 DenseMap
<const SCEV
*, ConstantRange
> &Cache
=
5545 SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
? UnsignedRanges
5548 // See if we've computed this range already.
5549 DenseMap
<const SCEV
*, ConstantRange
>::iterator I
= Cache
.find(S
);
5550 if (I
!= Cache
.end())
5553 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
))
5554 return setRange(C
, SignHint
, ConstantRange(C
->getAPInt()));
5556 unsigned BitWidth
= getTypeSizeInBits(S
->getType());
5557 ConstantRange
ConservativeResult(BitWidth
, /*isFullSet=*/true);
5559 // If the value has known zeros, the maximum value will have those known zeros
5561 uint32_t TZ
= GetMinTrailingZeros(S
);
5563 if (SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
)
5564 ConservativeResult
=
5565 ConstantRange(APInt::getMinValue(BitWidth
),
5566 APInt::getMaxValue(BitWidth
).lshr(TZ
).shl(TZ
) + 1);
5568 ConservativeResult
= ConstantRange(
5569 APInt::getSignedMinValue(BitWidth
),
5570 APInt::getSignedMaxValue(BitWidth
).ashr(TZ
).shl(TZ
) + 1);
5573 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
5574 ConstantRange X
= getRangeRef(Add
->getOperand(0), SignHint
);
5575 for (unsigned i
= 1, e
= Add
->getNumOperands(); i
!= e
; ++i
)
5576 X
= X
.add(getRangeRef(Add
->getOperand(i
), SignHint
));
5577 return setRange(Add
, SignHint
, ConservativeResult
.intersectWith(X
));
5580 if (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(S
)) {
5581 ConstantRange X
= getRangeRef(Mul
->getOperand(0), SignHint
);
5582 for (unsigned i
= 1, e
= Mul
->getNumOperands(); i
!= e
; ++i
)
5583 X
= X
.multiply(getRangeRef(Mul
->getOperand(i
), SignHint
));
5584 return setRange(Mul
, SignHint
, ConservativeResult
.intersectWith(X
));
5587 if (const SCEVSMaxExpr
*SMax
= dyn_cast
<SCEVSMaxExpr
>(S
)) {
5588 ConstantRange X
= getRangeRef(SMax
->getOperand(0), SignHint
);
5589 for (unsigned i
= 1, e
= SMax
->getNumOperands(); i
!= e
; ++i
)
5590 X
= X
.smax(getRangeRef(SMax
->getOperand(i
), SignHint
));
5591 return setRange(SMax
, SignHint
, ConservativeResult
.intersectWith(X
));
5594 if (const SCEVUMaxExpr
*UMax
= dyn_cast
<SCEVUMaxExpr
>(S
)) {
5595 ConstantRange X
= getRangeRef(UMax
->getOperand(0), SignHint
);
5596 for (unsigned i
= 1, e
= UMax
->getNumOperands(); i
!= e
; ++i
)
5597 X
= X
.umax(getRangeRef(UMax
->getOperand(i
), SignHint
));
5598 return setRange(UMax
, SignHint
, ConservativeResult
.intersectWith(X
));
5601 if (const SCEVUDivExpr
*UDiv
= dyn_cast
<SCEVUDivExpr
>(S
)) {
5602 ConstantRange X
= getRangeRef(UDiv
->getLHS(), SignHint
);
5603 ConstantRange Y
= getRangeRef(UDiv
->getRHS(), SignHint
);
5604 return setRange(UDiv
, SignHint
,
5605 ConservativeResult
.intersectWith(X
.udiv(Y
)));
5608 if (const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(S
)) {
5609 ConstantRange X
= getRangeRef(ZExt
->getOperand(), SignHint
);
5610 return setRange(ZExt
, SignHint
,
5611 ConservativeResult
.intersectWith(X
.zeroExtend(BitWidth
)));
5614 if (const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(S
)) {
5615 ConstantRange X
= getRangeRef(SExt
->getOperand(), SignHint
);
5616 return setRange(SExt
, SignHint
,
5617 ConservativeResult
.intersectWith(X
.signExtend(BitWidth
)));
5620 if (const SCEVTruncateExpr
*Trunc
= dyn_cast
<SCEVTruncateExpr
>(S
)) {
5621 ConstantRange X
= getRangeRef(Trunc
->getOperand(), SignHint
);
5622 return setRange(Trunc
, SignHint
,
5623 ConservativeResult
.intersectWith(X
.truncate(BitWidth
)));
5626 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
5627 // If there's no unsigned wrap, the value will never be less than its
5629 if (AddRec
->hasNoUnsignedWrap())
5630 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(AddRec
->getStart()))
5631 if (!C
->getValue()->isZero())
5632 ConservativeResult
= ConservativeResult
.intersectWith(
5633 ConstantRange(C
->getAPInt(), APInt(BitWidth
, 0)));
5635 // If there's no signed wrap, and all the operands have the same sign or
5636 // zero, the value won't ever change sign.
5637 if (AddRec
->hasNoSignedWrap()) {
5638 bool AllNonNeg
= true;
5639 bool AllNonPos
= true;
5640 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
) {
5641 if (!isKnownNonNegative(AddRec
->getOperand(i
))) AllNonNeg
= false;
5642 if (!isKnownNonPositive(AddRec
->getOperand(i
))) AllNonPos
= false;
5645 ConservativeResult
= ConservativeResult
.intersectWith(
5646 ConstantRange(APInt(BitWidth
, 0),
5647 APInt::getSignedMinValue(BitWidth
)));
5649 ConservativeResult
= ConservativeResult
.intersectWith(
5650 ConstantRange(APInt::getSignedMinValue(BitWidth
),
5651 APInt(BitWidth
, 1)));
5654 // TODO: non-affine addrec
5655 if (AddRec
->isAffine()) {
5656 const SCEV
*MaxBECount
= getMaxBackedgeTakenCount(AddRec
->getLoop());
5657 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
5658 getTypeSizeInBits(MaxBECount
->getType()) <= BitWidth
) {
5659 auto RangeFromAffine
= getRangeForAffineAR(
5660 AddRec
->getStart(), AddRec
->getStepRecurrence(*this), MaxBECount
,
5662 if (!RangeFromAffine
.isFullSet())
5663 ConservativeResult
=
5664 ConservativeResult
.intersectWith(RangeFromAffine
);
5666 auto RangeFromFactoring
= getRangeViaFactoring(
5667 AddRec
->getStart(), AddRec
->getStepRecurrence(*this), MaxBECount
,
5669 if (!RangeFromFactoring
.isFullSet())
5670 ConservativeResult
=
5671 ConservativeResult
.intersectWith(RangeFromFactoring
);
5675 return setRange(AddRec
, SignHint
, std::move(ConservativeResult
));
5678 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
5679 // Check if the IR explicitly contains !range metadata.
5680 Optional
<ConstantRange
> MDRange
= GetRangeFromMetadata(U
->getValue());
5681 if (MDRange
.hasValue())
5682 ConservativeResult
= ConservativeResult
.intersectWith(MDRange
.getValue());
5684 // Split here to avoid paying the compile-time cost of calling both
5685 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted
5687 const DataLayout
&DL
= getDataLayout();
5688 if (SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
) {
5689 // For a SCEVUnknown, ask ValueTracking.
5690 KnownBits Known
= computeKnownBits(U
->getValue(), DL
, 0, &AC
, nullptr, &DT
);
5691 if (Known
.One
!= ~Known
.Zero
+ 1)
5692 ConservativeResult
=
5693 ConservativeResult
.intersectWith(ConstantRange(Known
.One
,
5696 assert(SignHint
== ScalarEvolution::HINT_RANGE_SIGNED
&&
5697 "generalize as needed!");
5698 unsigned NS
= ComputeNumSignBits(U
->getValue(), DL
, 0, &AC
, nullptr, &DT
);
5700 ConservativeResult
= ConservativeResult
.intersectWith(
5701 ConstantRange(APInt::getSignedMinValue(BitWidth
).ashr(NS
- 1),
5702 APInt::getSignedMaxValue(BitWidth
).ashr(NS
- 1) + 1));
5705 // A range of Phi is a subset of union of all ranges of its input.
5706 if (const PHINode
*Phi
= dyn_cast
<PHINode
>(U
->getValue())) {
5707 // Make sure that we do not run over cycled Phis.
5708 if (PendingPhiRanges
.insert(Phi
).second
) {
5709 ConstantRange
RangeFromOps(BitWidth
, /*isFullSet=*/false);
5710 for (auto &Op
: Phi
->operands()) {
5711 auto OpRange
= getRangeRef(getSCEV(Op
), SignHint
);
5712 RangeFromOps
= RangeFromOps
.unionWith(OpRange
);
5713 // No point to continue if we already have a full set.
5714 if (RangeFromOps
.isFullSet())
5717 ConservativeResult
= ConservativeResult
.intersectWith(RangeFromOps
);
5718 bool Erased
= PendingPhiRanges
.erase(Phi
);
5719 assert(Erased
&& "Failed to erase Phi properly?");
5724 return setRange(U
, SignHint
, std::move(ConservativeResult
));
5727 return setRange(S
, SignHint
, std::move(ConservativeResult
));
5730 // Given a StartRange, Step and MaxBECount for an expression compute a range of
5731 // values that the expression can take. Initially, the expression has a value
5732 // from StartRange and then is changed by Step up to MaxBECount times. Signed
5733 // argument defines if we treat Step as signed or unsigned.
5734 static ConstantRange
getRangeForAffineARHelper(APInt Step
,
5735 const ConstantRange
&StartRange
,
5736 const APInt
&MaxBECount
,
5737 unsigned BitWidth
, bool Signed
) {
5738 // If either Step or MaxBECount is 0, then the expression won't change, and we
5739 // just need to return the initial range.
5740 if (Step
== 0 || MaxBECount
== 0)
5743 // If we don't know anything about the initial value (i.e. StartRange is
5744 // FullRange), then we don't know anything about the final range either.
5745 // Return FullRange.
5746 if (StartRange
.isFullSet())
5747 return ConstantRange(BitWidth
, /* isFullSet = */ true);
5749 // If Step is signed and negative, then we use its absolute value, but we also
5750 // note that we're moving in the opposite direction.
5751 bool Descending
= Signed
&& Step
.isNegative();
5754 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this:
5755 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128.
5756 // This equations hold true due to the well-defined wrap-around behavior of
5760 // Check if Offset is more than full span of BitWidth. If it is, the
5761 // expression is guaranteed to overflow.
5762 if (APInt::getMaxValue(StartRange
.getBitWidth()).udiv(Step
).ult(MaxBECount
))
5763 return ConstantRange(BitWidth
, /* isFullSet = */ true);
5765 // Offset is by how much the expression can change. Checks above guarantee no
5767 APInt Offset
= Step
* MaxBECount
;
5769 // Minimum value of the final range will match the minimal value of StartRange
5770 // if the expression is increasing and will be decreased by Offset otherwise.
5771 // Maximum value of the final range will match the maximal value of StartRange
5772 // if the expression is decreasing and will be increased by Offset otherwise.
5773 APInt StartLower
= StartRange
.getLower();
5774 APInt StartUpper
= StartRange
.getUpper() - 1;
5775 APInt MovedBoundary
= Descending
? (StartLower
- std::move(Offset
))
5776 : (StartUpper
+ std::move(Offset
));
5778 // It's possible that the new minimum/maximum value will fall into the initial
5779 // range (due to wrap around). This means that the expression can take any
5780 // value in this bitwidth, and we have to return full range.
5781 if (StartRange
.contains(MovedBoundary
))
5782 return ConstantRange(BitWidth
, /* isFullSet = */ true);
5785 Descending
? std::move(MovedBoundary
) : std::move(StartLower
);
5787 Descending
? std::move(StartUpper
) : std::move(MovedBoundary
);
5790 // If we end up with full range, return a proper full range.
5791 if (NewLower
== NewUpper
)
5792 return ConstantRange(BitWidth
, /* isFullSet = */ true);
5794 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
5795 return ConstantRange(std::move(NewLower
), std::move(NewUpper
));
5798 ConstantRange
ScalarEvolution::getRangeForAffineAR(const SCEV
*Start
,
5800 const SCEV
*MaxBECount
,
5801 unsigned BitWidth
) {
5802 assert(!isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
5803 getTypeSizeInBits(MaxBECount
->getType()) <= BitWidth
&&
5806 MaxBECount
= getNoopOrZeroExtend(MaxBECount
, Start
->getType());
5807 APInt MaxBECountValue
= getUnsignedRangeMax(MaxBECount
);
5809 // First, consider step signed.
5810 ConstantRange StartSRange
= getSignedRange(Start
);
5811 ConstantRange StepSRange
= getSignedRange(Step
);
5813 // If Step can be both positive and negative, we need to find ranges for the
5814 // maximum absolute step values in both directions and union them.
5816 getRangeForAffineARHelper(StepSRange
.getSignedMin(), StartSRange
,
5817 MaxBECountValue
, BitWidth
, /* Signed = */ true);
5818 SR
= SR
.unionWith(getRangeForAffineARHelper(StepSRange
.getSignedMax(),
5819 StartSRange
, MaxBECountValue
,
5820 BitWidth
, /* Signed = */ true));
5822 // Next, consider step unsigned.
5823 ConstantRange UR
= getRangeForAffineARHelper(
5824 getUnsignedRangeMax(Step
), getUnsignedRange(Start
),
5825 MaxBECountValue
, BitWidth
, /* Signed = */ false);
5827 // Finally, intersect signed and unsigned ranges.
5828 return SR
.intersectWith(UR
);
5831 ConstantRange
ScalarEvolution::getRangeViaFactoring(const SCEV
*Start
,
5833 const SCEV
*MaxBECount
,
5834 unsigned BitWidth
) {
5835 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q})
5836 // == RangeOf({A,+,P}) union RangeOf({B,+,Q})
5838 struct SelectPattern
{
5839 Value
*Condition
= nullptr;
5843 explicit SelectPattern(ScalarEvolution
&SE
, unsigned BitWidth
,
5845 Optional
<unsigned> CastOp
;
5846 APInt
Offset(BitWidth
, 0);
5848 assert(SE
.getTypeSizeInBits(S
->getType()) == BitWidth
&&
5851 // Peel off a constant offset:
5852 if (auto *SA
= dyn_cast
<SCEVAddExpr
>(S
)) {
5853 // In the future we could consider being smarter here and handle
5854 // {Start+Step,+,Step} too.
5855 if (SA
->getNumOperands() != 2 || !isa
<SCEVConstant
>(SA
->getOperand(0)))
5858 Offset
= cast
<SCEVConstant
>(SA
->getOperand(0))->getAPInt();
5859 S
= SA
->getOperand(1);
5862 // Peel off a cast operation
5863 if (auto *SCast
= dyn_cast
<SCEVCastExpr
>(S
)) {
5864 CastOp
= SCast
->getSCEVType();
5865 S
= SCast
->getOperand();
5868 using namespace llvm::PatternMatch
;
5870 auto *SU
= dyn_cast
<SCEVUnknown
>(S
);
5871 const APInt
*TrueVal
, *FalseVal
;
5873 !match(SU
->getValue(), m_Select(m_Value(Condition
), m_APInt(TrueVal
),
5874 m_APInt(FalseVal
)))) {
5875 Condition
= nullptr;
5879 TrueValue
= *TrueVal
;
5880 FalseValue
= *FalseVal
;
5882 // Re-apply the cast we peeled off earlier
5883 if (CastOp
.hasValue())
5886 llvm_unreachable("Unknown SCEV cast type!");
5889 TrueValue
= TrueValue
.trunc(BitWidth
);
5890 FalseValue
= FalseValue
.trunc(BitWidth
);
5893 TrueValue
= TrueValue
.zext(BitWidth
);
5894 FalseValue
= FalseValue
.zext(BitWidth
);
5897 TrueValue
= TrueValue
.sext(BitWidth
);
5898 FalseValue
= FalseValue
.sext(BitWidth
);
5902 // Re-apply the constant offset we peeled off earlier
5903 TrueValue
+= Offset
;
5904 FalseValue
+= Offset
;
5907 bool isRecognized() { return Condition
!= nullptr; }
5910 SelectPattern
StartPattern(*this, BitWidth
, Start
);
5911 if (!StartPattern
.isRecognized())
5912 return ConstantRange(BitWidth
, /* isFullSet = */ true);
5914 SelectPattern
StepPattern(*this, BitWidth
, Step
);
5915 if (!StepPattern
.isRecognized())
5916 return ConstantRange(BitWidth
, /* isFullSet = */ true);
5918 if (StartPattern
.Condition
!= StepPattern
.Condition
) {
5919 // We don't handle this case today; but we could, by considering four
5920 // possibilities below instead of two. I'm not sure if there are cases where
5921 // that will help over what getRange already does, though.
5922 return ConstantRange(BitWidth
, /* isFullSet = */ true);
5925 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to
5926 // construct arbitrary general SCEV expressions here. This function is called
5927 // from deep in the call stack, and calling getSCEV (on a sext instruction,
5928 // say) can end up caching a suboptimal value.
5930 // FIXME: without the explicit `this` receiver below, MSVC errors out with
5931 // C2352 and C2512 (otherwise it isn't needed).
5933 const SCEV
*TrueStart
= this->getConstant(StartPattern
.TrueValue
);
5934 const SCEV
*TrueStep
= this->getConstant(StepPattern
.TrueValue
);
5935 const SCEV
*FalseStart
= this->getConstant(StartPattern
.FalseValue
);
5936 const SCEV
*FalseStep
= this->getConstant(StepPattern
.FalseValue
);
5938 ConstantRange TrueRange
=
5939 this->getRangeForAffineAR(TrueStart
, TrueStep
, MaxBECount
, BitWidth
);
5940 ConstantRange FalseRange
=
5941 this->getRangeForAffineAR(FalseStart
, FalseStep
, MaxBECount
, BitWidth
);
5943 return TrueRange
.unionWith(FalseRange
);
5946 SCEV::NoWrapFlags
ScalarEvolution::getNoWrapFlagsFromUB(const Value
*V
) {
5947 if (isa
<ConstantExpr
>(V
)) return SCEV::FlagAnyWrap
;
5948 const BinaryOperator
*BinOp
= cast
<BinaryOperator
>(V
);
5950 // Return early if there are no flags to propagate to the SCEV.
5951 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
5952 if (BinOp
->hasNoUnsignedWrap())
5953 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNUW
);
5954 if (BinOp
->hasNoSignedWrap())
5955 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNSW
);
5956 if (Flags
== SCEV::FlagAnyWrap
)
5957 return SCEV::FlagAnyWrap
;
5959 return isSCEVExprNeverPoison(BinOp
) ? Flags
: SCEV::FlagAnyWrap
;
5962 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction
*I
) {
5963 // Here we check that I is in the header of the innermost loop containing I,
5964 // since we only deal with instructions in the loop header. The actual loop we
5965 // need to check later will come from an add recurrence, but getting that
5966 // requires computing the SCEV of the operands, which can be expensive. This
5967 // check we can do cheaply to rule out some cases early.
5968 Loop
*InnermostContainingLoop
= LI
.getLoopFor(I
->getParent());
5969 if (InnermostContainingLoop
== nullptr ||
5970 InnermostContainingLoop
->getHeader() != I
->getParent())
5973 // Only proceed if we can prove that I does not yield poison.
5974 if (!programUndefinedIfFullPoison(I
))
5977 // At this point we know that if I is executed, then it does not wrap
5978 // according to at least one of NSW or NUW. If I is not executed, then we do
5979 // not know if the calculation that I represents would wrap. Multiple
5980 // instructions can map to the same SCEV. If we apply NSW or NUW from I to
5981 // the SCEV, we must guarantee no wrapping for that SCEV also when it is
5982 // derived from other instructions that map to the same SCEV. We cannot make
5983 // that guarantee for cases where I is not executed. So we need to find the
5984 // loop that I is considered in relation to and prove that I is executed for
5985 // every iteration of that loop. That implies that the value that I
5986 // calculates does not wrap anywhere in the loop, so then we can apply the
5987 // flags to the SCEV.
5989 // We check isLoopInvariant to disambiguate in case we are adding recurrences
5990 // from different loops, so that we know which loop to prove that I is
5992 for (unsigned OpIndex
= 0; OpIndex
< I
->getNumOperands(); ++OpIndex
) {
5993 // I could be an extractvalue from a call to an overflow intrinsic.
5994 // TODO: We can do better here in some cases.
5995 if (!isSCEVable(I
->getOperand(OpIndex
)->getType()))
5997 const SCEV
*Op
= getSCEV(I
->getOperand(OpIndex
));
5998 if (auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(Op
)) {
5999 bool AllOtherOpsLoopInvariant
= true;
6000 for (unsigned OtherOpIndex
= 0; OtherOpIndex
< I
->getNumOperands();
6002 if (OtherOpIndex
!= OpIndex
) {
6003 const SCEV
*OtherOp
= getSCEV(I
->getOperand(OtherOpIndex
));
6004 if (!isLoopInvariant(OtherOp
, AddRec
->getLoop())) {
6005 AllOtherOpsLoopInvariant
= false;
6010 if (AllOtherOpsLoopInvariant
&&
6011 isGuaranteedToExecuteForEveryIteration(I
, AddRec
->getLoop()))
6018 bool ScalarEvolution::isAddRecNeverPoison(const Instruction
*I
, const Loop
*L
) {
6019 // If we know that \c I can never be poison period, then that's enough.
6020 if (isSCEVExprNeverPoison(I
))
6023 // For an add recurrence specifically, we assume that infinite loops without
6024 // side effects are undefined behavior, and then reason as follows:
6026 // If the add recurrence is poison in any iteration, it is poison on all
6027 // future iterations (since incrementing poison yields poison). If the result
6028 // of the add recurrence is fed into the loop latch condition and the loop
6029 // does not contain any throws or exiting blocks other than the latch, we now
6030 // have the ability to "choose" whether the backedge is taken or not (by
6031 // choosing a sufficiently evil value for the poison feeding into the branch)
6032 // for every iteration including and after the one in which \p I first became
6033 // poison. There are two possibilities (let's call the iteration in which \p
6034 // I first became poison as K):
6036 // 1. In the set of iterations including and after K, the loop body executes
6037 // no side effects. In this case executing the backege an infinte number
6038 // of times will yield undefined behavior.
6040 // 2. In the set of iterations including and after K, the loop body executes
6041 // at least one side effect. In this case, that specific instance of side
6042 // effect is control dependent on poison, which also yields undefined
6045 auto *ExitingBB
= L
->getExitingBlock();
6046 auto *LatchBB
= L
->getLoopLatch();
6047 if (!ExitingBB
|| !LatchBB
|| ExitingBB
!= LatchBB
)
6050 SmallPtrSet
<const Instruction
*, 16> Pushed
;
6051 SmallVector
<const Instruction
*, 8> PoisonStack
;
6053 // We start by assuming \c I, the post-inc add recurrence, is poison. Only
6054 // things that are known to be fully poison under that assumption go on the
6057 PoisonStack
.push_back(I
);
6059 bool LatchControlDependentOnPoison
= false;
6060 while (!PoisonStack
.empty() && !LatchControlDependentOnPoison
) {
6061 const Instruction
*Poison
= PoisonStack
.pop_back_val();
6063 for (auto *PoisonUser
: Poison
->users()) {
6064 if (propagatesFullPoison(cast
<Instruction
>(PoisonUser
))) {
6065 if (Pushed
.insert(cast
<Instruction
>(PoisonUser
)).second
)
6066 PoisonStack
.push_back(cast
<Instruction
>(PoisonUser
));
6067 } else if (auto *BI
= dyn_cast
<BranchInst
>(PoisonUser
)) {
6068 assert(BI
->isConditional() && "Only possibility!");
6069 if (BI
->getParent() == LatchBB
) {
6070 LatchControlDependentOnPoison
= true;
6077 return LatchControlDependentOnPoison
&& loopHasNoAbnormalExits(L
);
6080 ScalarEvolution::LoopProperties
6081 ScalarEvolution::getLoopProperties(const Loop
*L
) {
6082 using LoopProperties
= ScalarEvolution::LoopProperties
;
6084 auto Itr
= LoopPropertiesCache
.find(L
);
6085 if (Itr
== LoopPropertiesCache
.end()) {
6086 auto HasSideEffects
= [](Instruction
*I
) {
6087 if (auto *SI
= dyn_cast
<StoreInst
>(I
))
6088 return !SI
->isSimple();
6090 return I
->mayHaveSideEffects();
6093 LoopProperties LP
= {/* HasNoAbnormalExits */ true,
6094 /*HasNoSideEffects*/ true};
6096 for (auto *BB
: L
->getBlocks())
6097 for (auto &I
: *BB
) {
6098 if (!isGuaranteedToTransferExecutionToSuccessor(&I
))
6099 LP
.HasNoAbnormalExits
= false;
6100 if (HasSideEffects(&I
))
6101 LP
.HasNoSideEffects
= false;
6102 if (!LP
.HasNoAbnormalExits
&& !LP
.HasNoSideEffects
)
6103 break; // We're already as pessimistic as we can get.
6106 auto InsertPair
= LoopPropertiesCache
.insert({L
, LP
});
6107 assert(InsertPair
.second
&& "We just checked!");
6108 Itr
= InsertPair
.first
;
6114 const SCEV
*ScalarEvolution::createSCEV(Value
*V
) {
6115 if (!isSCEVable(V
->getType()))
6116 return getUnknown(V
);
6118 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
6119 // Don't attempt to analyze instructions in blocks that aren't
6120 // reachable. Such instructions don't matter, and they aren't required
6121 // to obey basic rules for definitions dominating uses which this
6122 // analysis depends on.
6123 if (!DT
.isReachableFromEntry(I
->getParent()))
6124 return getUnknown(V
);
6125 } else if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
))
6126 return getConstant(CI
);
6127 else if (isa
<ConstantPointerNull
>(V
))
6128 return getZero(V
->getType());
6129 else if (GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(V
))
6130 return GA
->isInterposable() ? getUnknown(V
) : getSCEV(GA
->getAliasee());
6131 else if (!isa
<ConstantExpr
>(V
))
6132 return getUnknown(V
);
6134 Operator
*U
= cast
<Operator
>(V
);
6135 if (auto BO
= MatchBinaryOp(U
, DT
)) {
6136 switch (BO
->Opcode
) {
6137 case Instruction::Add
: {
6138 // The simple thing to do would be to just call getSCEV on both operands
6139 // and call getAddExpr with the result. However if we're looking at a
6140 // bunch of things all added together, this can be quite inefficient,
6141 // because it leads to N-1 getAddExpr calls for N ultimate operands.
6142 // Instead, gather up all the operands and make a single getAddExpr call.
6143 // LLVM IR canonical form means we need only traverse the left operands.
6144 SmallVector
<const SCEV
*, 4> AddOps
;
6147 if (auto *OpSCEV
= getExistingSCEV(BO
->Op
)) {
6148 AddOps
.push_back(OpSCEV
);
6152 // If a NUW or NSW flag can be applied to the SCEV for this
6153 // addition, then compute the SCEV for this addition by itself
6154 // with a separate call to getAddExpr. We need to do that
6155 // instead of pushing the operands of the addition onto AddOps,
6156 // since the flags are only known to apply to this particular
6157 // addition - they may not apply to other additions that can be
6158 // formed with operands from AddOps.
6159 const SCEV
*RHS
= getSCEV(BO
->RHS
);
6160 SCEV::NoWrapFlags Flags
= getNoWrapFlagsFromUB(BO
->Op
);
6161 if (Flags
!= SCEV::FlagAnyWrap
) {
6162 const SCEV
*LHS
= getSCEV(BO
->LHS
);
6163 if (BO
->Opcode
== Instruction::Sub
)
6164 AddOps
.push_back(getMinusSCEV(LHS
, RHS
, Flags
));
6166 AddOps
.push_back(getAddExpr(LHS
, RHS
, Flags
));
6171 if (BO
->Opcode
== Instruction::Sub
)
6172 AddOps
.push_back(getNegativeSCEV(getSCEV(BO
->RHS
)));
6174 AddOps
.push_back(getSCEV(BO
->RHS
));
6176 auto NewBO
= MatchBinaryOp(BO
->LHS
, DT
);
6177 if (!NewBO
|| (NewBO
->Opcode
!= Instruction::Add
&&
6178 NewBO
->Opcode
!= Instruction::Sub
)) {
6179 AddOps
.push_back(getSCEV(BO
->LHS
));
6185 return getAddExpr(AddOps
);
6188 case Instruction::Mul
: {
6189 SmallVector
<const SCEV
*, 4> MulOps
;
6192 if (auto *OpSCEV
= getExistingSCEV(BO
->Op
)) {
6193 MulOps
.push_back(OpSCEV
);
6197 SCEV::NoWrapFlags Flags
= getNoWrapFlagsFromUB(BO
->Op
);
6198 if (Flags
!= SCEV::FlagAnyWrap
) {
6200 getMulExpr(getSCEV(BO
->LHS
), getSCEV(BO
->RHS
), Flags
));
6205 MulOps
.push_back(getSCEV(BO
->RHS
));
6206 auto NewBO
= MatchBinaryOp(BO
->LHS
, DT
);
6207 if (!NewBO
|| NewBO
->Opcode
!= Instruction::Mul
) {
6208 MulOps
.push_back(getSCEV(BO
->LHS
));
6214 return getMulExpr(MulOps
);
6216 case Instruction::UDiv
:
6217 return getUDivExpr(getSCEV(BO
->LHS
), getSCEV(BO
->RHS
));
6218 case Instruction::URem
:
6219 return getURemExpr(getSCEV(BO
->LHS
), getSCEV(BO
->RHS
));
6220 case Instruction::Sub
: {
6221 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
6223 Flags
= getNoWrapFlagsFromUB(BO
->Op
);
6224 return getMinusSCEV(getSCEV(BO
->LHS
), getSCEV(BO
->RHS
), Flags
);
6226 case Instruction::And
:
6227 // For an expression like x&255 that merely masks off the high bits,
6228 // use zext(trunc(x)) as the SCEV expression.
6229 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
6231 return getSCEV(BO
->RHS
);
6232 if (CI
->isMinusOne())
6233 return getSCEV(BO
->LHS
);
6234 const APInt
&A
= CI
->getValue();
6236 // Instcombine's ShrinkDemandedConstant may strip bits out of
6237 // constants, obscuring what would otherwise be a low-bits mask.
6238 // Use computeKnownBits to compute what ShrinkDemandedConstant
6239 // knew about to reconstruct a low-bits mask value.
6240 unsigned LZ
= A
.countLeadingZeros();
6241 unsigned TZ
= A
.countTrailingZeros();
6242 unsigned BitWidth
= A
.getBitWidth();
6243 KnownBits
Known(BitWidth
);
6244 computeKnownBits(BO
->LHS
, Known
, getDataLayout(),
6245 0, &AC
, nullptr, &DT
);
6247 APInt EffectiveMask
=
6248 APInt::getLowBitsSet(BitWidth
, BitWidth
- LZ
- TZ
).shl(TZ
);
6249 if ((LZ
!= 0 || TZ
!= 0) && !((~A
& ~Known
.Zero
) & EffectiveMask
)) {
6250 const SCEV
*MulCount
= getConstant(APInt::getOneBitSet(BitWidth
, TZ
));
6251 const SCEV
*LHS
= getSCEV(BO
->LHS
);
6252 const SCEV
*ShiftedLHS
= nullptr;
6253 if (auto *LHSMul
= dyn_cast
<SCEVMulExpr
>(LHS
)) {
6254 if (auto *OpC
= dyn_cast
<SCEVConstant
>(LHSMul
->getOperand(0))) {
6255 // For an expression like (x * 8) & 8, simplify the multiply.
6256 unsigned MulZeros
= OpC
->getAPInt().countTrailingZeros();
6257 unsigned GCD
= std::min(MulZeros
, TZ
);
6258 APInt DivAmt
= APInt::getOneBitSet(BitWidth
, TZ
- GCD
);
6259 SmallVector
<const SCEV
*, 4> MulOps
;
6260 MulOps
.push_back(getConstant(OpC
->getAPInt().lshr(GCD
)));
6261 MulOps
.append(LHSMul
->op_begin() + 1, LHSMul
->op_end());
6262 auto *NewMul
= getMulExpr(MulOps
, LHSMul
->getNoWrapFlags());
6263 ShiftedLHS
= getUDivExpr(NewMul
, getConstant(DivAmt
));
6267 ShiftedLHS
= getUDivExpr(LHS
, MulCount
);
6270 getTruncateExpr(ShiftedLHS
,
6271 IntegerType::get(getContext(), BitWidth
- LZ
- TZ
)),
6272 BO
->LHS
->getType()),
6278 case Instruction::Or
:
6279 // If the RHS of the Or is a constant, we may have something like:
6280 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
6281 // optimizations will transparently handle this case.
6283 // In order for this transformation to be safe, the LHS must be of the
6284 // form X*(2^n) and the Or constant must be less than 2^n.
6285 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
6286 const SCEV
*LHS
= getSCEV(BO
->LHS
);
6287 const APInt
&CIVal
= CI
->getValue();
6288 if (GetMinTrailingZeros(LHS
) >=
6289 (CIVal
.getBitWidth() - CIVal
.countLeadingZeros())) {
6290 // Build a plain add SCEV.
6291 const SCEV
*S
= getAddExpr(LHS
, getSCEV(CI
));
6292 // If the LHS of the add was an addrec and it has no-wrap flags,
6293 // transfer the no-wrap flags, since an or won't introduce a wrap.
6294 if (const SCEVAddRecExpr
*NewAR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
6295 const SCEVAddRecExpr
*OldAR
= cast
<SCEVAddRecExpr
>(LHS
);
6296 const_cast<SCEVAddRecExpr
*>(NewAR
)->setNoWrapFlags(
6297 OldAR
->getNoWrapFlags());
6304 case Instruction::Xor
:
6305 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
6306 // If the RHS of xor is -1, then this is a not operation.
6307 if (CI
->isMinusOne())
6308 return getNotSCEV(getSCEV(BO
->LHS
));
6310 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
6311 // This is a variant of the check for xor with -1, and it handles
6312 // the case where instcombine has trimmed non-demanded bits out
6313 // of an xor with -1.
6314 if (auto *LBO
= dyn_cast
<BinaryOperator
>(BO
->LHS
))
6315 if (ConstantInt
*LCI
= dyn_cast
<ConstantInt
>(LBO
->getOperand(1)))
6316 if (LBO
->getOpcode() == Instruction::And
&&
6317 LCI
->getValue() == CI
->getValue())
6318 if (const SCEVZeroExtendExpr
*Z
=
6319 dyn_cast
<SCEVZeroExtendExpr
>(getSCEV(BO
->LHS
))) {
6320 Type
*UTy
= BO
->LHS
->getType();
6321 const SCEV
*Z0
= Z
->getOperand();
6322 Type
*Z0Ty
= Z0
->getType();
6323 unsigned Z0TySize
= getTypeSizeInBits(Z0Ty
);
6325 // If C is a low-bits mask, the zero extend is serving to
6326 // mask off the high bits. Complement the operand and
6327 // re-apply the zext.
6328 if (CI
->getValue().isMask(Z0TySize
))
6329 return getZeroExtendExpr(getNotSCEV(Z0
), UTy
);
6331 // If C is a single bit, it may be in the sign-bit position
6332 // before the zero-extend. In this case, represent the xor
6333 // using an add, which is equivalent, and re-apply the zext.
6334 APInt Trunc
= CI
->getValue().trunc(Z0TySize
);
6335 if (Trunc
.zext(getTypeSizeInBits(UTy
)) == CI
->getValue() &&
6337 return getZeroExtendExpr(getAddExpr(Z0
, getConstant(Trunc
)),
6343 case Instruction::Shl
:
6344 // Turn shift left of a constant amount into a multiply.
6345 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
6346 uint32_t BitWidth
= cast
<IntegerType
>(SA
->getType())->getBitWidth();
6348 // If the shift count is not less than the bitwidth, the result of
6349 // the shift is undefined. Don't try to analyze it, because the
6350 // resolution chosen here may differ from the resolution chosen in
6351 // other parts of the compiler.
6352 if (SA
->getValue().uge(BitWidth
))
6355 // It is currently not resolved how to interpret NSW for left
6356 // shift by BitWidth - 1, so we avoid applying flags in that
6357 // case. Remove this check (or this comment) once the situation
6359 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html
6360 // and http://reviews.llvm.org/D8890 .
6361 auto Flags
= SCEV::FlagAnyWrap
;
6362 if (BO
->Op
&& SA
->getValue().ult(BitWidth
- 1))
6363 Flags
= getNoWrapFlagsFromUB(BO
->Op
);
6365 Constant
*X
= ConstantInt::get(
6366 getContext(), APInt::getOneBitSet(BitWidth
, SA
->getZExtValue()));
6367 return getMulExpr(getSCEV(BO
->LHS
), getSCEV(X
), Flags
);
6371 case Instruction::AShr
: {
6372 // AShr X, C, where C is a constant.
6373 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
);
6377 Type
*OuterTy
= BO
->LHS
->getType();
6378 uint64_t BitWidth
= getTypeSizeInBits(OuterTy
);
6379 // If the shift count is not less than the bitwidth, the result of
6380 // the shift is undefined. Don't try to analyze it, because the
6381 // resolution chosen here may differ from the resolution chosen in
6382 // other parts of the compiler.
6383 if (CI
->getValue().uge(BitWidth
))
6387 return getSCEV(BO
->LHS
); // shift by zero --> noop
6389 uint64_t AShrAmt
= CI
->getZExtValue();
6390 Type
*TruncTy
= IntegerType::get(getContext(), BitWidth
- AShrAmt
);
6392 Operator
*L
= dyn_cast
<Operator
>(BO
->LHS
);
6393 if (L
&& L
->getOpcode() == Instruction::Shl
) {
6396 // Both n and m are constant.
6398 const SCEV
*ShlOp0SCEV
= getSCEV(L
->getOperand(0));
6399 if (L
->getOperand(1) == BO
->RHS
)
6400 // For a two-shift sext-inreg, i.e. n = m,
6401 // use sext(trunc(x)) as the SCEV expression.
6402 return getSignExtendExpr(
6403 getTruncateExpr(ShlOp0SCEV
, TruncTy
), OuterTy
);
6405 ConstantInt
*ShlAmtCI
= dyn_cast
<ConstantInt
>(L
->getOperand(1));
6406 if (ShlAmtCI
&& ShlAmtCI
->getValue().ult(BitWidth
)) {
6407 uint64_t ShlAmt
= ShlAmtCI
->getZExtValue();
6408 if (ShlAmt
> AShrAmt
) {
6409 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV
6410 // expression. We already checked that ShlAmt < BitWidth, so
6411 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as
6412 // ShlAmt - AShrAmt < Amt.
6413 APInt Mul
= APInt::getOneBitSet(BitWidth
- AShrAmt
,
6415 return getSignExtendExpr(
6416 getMulExpr(getTruncateExpr(ShlOp0SCEV
, TruncTy
),
6417 getConstant(Mul
)), OuterTy
);
6426 switch (U
->getOpcode()) {
6427 case Instruction::Trunc
:
6428 return getTruncateExpr(getSCEV(U
->getOperand(0)), U
->getType());
6430 case Instruction::ZExt
:
6431 return getZeroExtendExpr(getSCEV(U
->getOperand(0)), U
->getType());
6433 case Instruction::SExt
:
6434 if (auto BO
= MatchBinaryOp(U
->getOperand(0), DT
)) {
6435 // The NSW flag of a subtract does not always survive the conversion to
6436 // A + (-1)*B. By pushing sign extension onto its operands we are much
6437 // more likely to preserve NSW and allow later AddRec optimisations.
6439 // NOTE: This is effectively duplicating this logic from getSignExtend:
6440 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
6441 // but by that point the NSW information has potentially been lost.
6442 if (BO
->Opcode
== Instruction::Sub
&& BO
->IsNSW
) {
6443 Type
*Ty
= U
->getType();
6444 auto *V1
= getSignExtendExpr(getSCEV(BO
->LHS
), Ty
);
6445 auto *V2
= getSignExtendExpr(getSCEV(BO
->RHS
), Ty
);
6446 return getMinusSCEV(V1
, V2
, SCEV::FlagNSW
);
6449 return getSignExtendExpr(getSCEV(U
->getOperand(0)), U
->getType());
6451 case Instruction::BitCast
:
6452 // BitCasts are no-op casts so we just eliminate the cast.
6453 if (isSCEVable(U
->getType()) && isSCEVable(U
->getOperand(0)->getType()))
6454 return getSCEV(U
->getOperand(0));
6457 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
6458 // lead to pointer expressions which cannot safely be expanded to GEPs,
6459 // because ScalarEvolution doesn't respect the GEP aliasing rules when
6460 // simplifying integer expressions.
6462 case Instruction::GetElementPtr
:
6463 return createNodeForGEP(cast
<GEPOperator
>(U
));
6465 case Instruction::PHI
:
6466 return createNodeForPHI(cast
<PHINode
>(U
));
6468 case Instruction::Select
:
6469 // U can also be a select constant expr, which let fall through. Since
6470 // createNodeForSelect only works for a condition that is an `ICmpInst`, and
6471 // constant expressions cannot have instructions as operands, we'd have
6472 // returned getUnknown for a select constant expressions anyway.
6473 if (isa
<Instruction
>(U
))
6474 return createNodeForSelectOrPHI(cast
<Instruction
>(U
), U
->getOperand(0),
6475 U
->getOperand(1), U
->getOperand(2));
6478 case Instruction::Call
:
6479 case Instruction::Invoke
:
6480 if (Value
*RV
= CallSite(U
).getReturnedArgOperand())
6485 return getUnknown(V
);
6488 //===----------------------------------------------------------------------===//
6489 // Iteration Count Computation Code
6492 static unsigned getConstantTripCount(const SCEVConstant
*ExitCount
) {
6496 ConstantInt
*ExitConst
= ExitCount
->getValue();
6498 // Guard against huge trip counts.
6499 if (ExitConst
->getValue().getActiveBits() > 32)
6502 // In case of integer overflow, this returns 0, which is correct.
6503 return ((unsigned)ExitConst
->getZExtValue()) + 1;
6506 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop
*L
) {
6507 if (BasicBlock
*ExitingBB
= L
->getExitingBlock())
6508 return getSmallConstantTripCount(L
, ExitingBB
);
6510 // No trip count information for multiple exits.
6514 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop
*L
,
6515 BasicBlock
*ExitingBlock
) {
6516 assert(ExitingBlock
&& "Must pass a non-null exiting block!");
6517 assert(L
->isLoopExiting(ExitingBlock
) &&
6518 "Exiting block must actually branch out of the loop!");
6519 const SCEVConstant
*ExitCount
=
6520 dyn_cast
<SCEVConstant
>(getExitCount(L
, ExitingBlock
));
6521 return getConstantTripCount(ExitCount
);
6524 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop
*L
) {
6525 const auto *MaxExitCount
=
6526 dyn_cast
<SCEVConstant
>(getMaxBackedgeTakenCount(L
));
6527 return getConstantTripCount(MaxExitCount
);
6530 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop
*L
) {
6531 if (BasicBlock
*ExitingBB
= L
->getExitingBlock())
6532 return getSmallConstantTripMultiple(L
, ExitingBB
);
6534 // No trip multiple information for multiple exits.
6538 /// Returns the largest constant divisor of the trip count of this loop as a
6539 /// normal unsigned value, if possible. This means that the actual trip count is
6540 /// always a multiple of the returned value (don't forget the trip count could
6541 /// very well be zero as well!).
6543 /// Returns 1 if the trip count is unknown or not guaranteed to be the
6544 /// multiple of a constant (which is also the case if the trip count is simply
6545 /// constant, use getSmallConstantTripCount for that case), Will also return 1
6546 /// if the trip count is very large (>= 2^32).
6548 /// As explained in the comments for getSmallConstantTripCount, this assumes
6549 /// that control exits the loop via ExitingBlock.
6551 ScalarEvolution::getSmallConstantTripMultiple(const Loop
*L
,
6552 BasicBlock
*ExitingBlock
) {
6553 assert(ExitingBlock
&& "Must pass a non-null exiting block!");
6554 assert(L
->isLoopExiting(ExitingBlock
) &&
6555 "Exiting block must actually branch out of the loop!");
6556 const SCEV
*ExitCount
= getExitCount(L
, ExitingBlock
);
6557 if (ExitCount
== getCouldNotCompute())
6560 // Get the trip count from the BE count by adding 1.
6561 const SCEV
*TCExpr
= getAddExpr(ExitCount
, getOne(ExitCount
->getType()));
6563 const SCEVConstant
*TC
= dyn_cast
<SCEVConstant
>(TCExpr
);
6565 // Attempt to factor more general cases. Returns the greatest power of
6566 // two divisor. If overflow happens, the trip count expression is still
6567 // divisible by the greatest power of 2 divisor returned.
6568 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr
));
6570 ConstantInt
*Result
= TC
->getValue();
6572 // Guard against huge trip counts (this requires checking
6573 // for zero to handle the case where the trip count == -1 and the
6575 if (!Result
|| Result
->getValue().getActiveBits() > 32 ||
6576 Result
->getValue().getActiveBits() == 0)
6579 return (unsigned)Result
->getZExtValue();
6582 /// Get the expression for the number of loop iterations for which this loop is
6583 /// guaranteed not to exit via ExitingBlock. Otherwise return
6584 /// SCEVCouldNotCompute.
6585 const SCEV
*ScalarEvolution::getExitCount(const Loop
*L
,
6586 BasicBlock
*ExitingBlock
) {
6587 return getBackedgeTakenInfo(L
).getExact(ExitingBlock
, this);
6591 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop
*L
,
6592 SCEVUnionPredicate
&Preds
) {
6593 return getPredicatedBackedgeTakenInfo(L
).getExact(L
, this, &Preds
);
6596 const SCEV
*ScalarEvolution::getBackedgeTakenCount(const Loop
*L
) {
6597 return getBackedgeTakenInfo(L
).getExact(L
, this);
6600 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is
6601 /// known never to be less than the actual backedge taken count.
6602 const SCEV
*ScalarEvolution::getMaxBackedgeTakenCount(const Loop
*L
) {
6603 return getBackedgeTakenInfo(L
).getMax(this);
6606 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop
*L
) {
6607 return getBackedgeTakenInfo(L
).isMaxOrZero(this);
6610 /// Push PHI nodes in the header of the given loop onto the given Worklist.
6612 PushLoopPHIs(const Loop
*L
, SmallVectorImpl
<Instruction
*> &Worklist
) {
6613 BasicBlock
*Header
= L
->getHeader();
6615 // Push all Loop-header PHIs onto the Worklist stack.
6616 for (PHINode
&PN
: Header
->phis())
6617 Worklist
.push_back(&PN
);
6620 const ScalarEvolution::BackedgeTakenInfo
&
6621 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop
*L
) {
6622 auto &BTI
= getBackedgeTakenInfo(L
);
6623 if (BTI
.hasFullInfo())
6626 auto Pair
= PredicatedBackedgeTakenCounts
.insert({L
, BackedgeTakenInfo()});
6629 return Pair
.first
->second
;
6631 BackedgeTakenInfo Result
=
6632 computeBackedgeTakenCount(L
, /*AllowPredicates=*/true);
6634 return PredicatedBackedgeTakenCounts
.find(L
)->second
= std::move(Result
);
6637 const ScalarEvolution::BackedgeTakenInfo
&
6638 ScalarEvolution::getBackedgeTakenInfo(const Loop
*L
) {
6639 // Initially insert an invalid entry for this loop. If the insertion
6640 // succeeds, proceed to actually compute a backedge-taken count and
6641 // update the value. The temporary CouldNotCompute value tells SCEV
6642 // code elsewhere that it shouldn't attempt to request a new
6643 // backedge-taken count, which could result in infinite recursion.
6644 std::pair
<DenseMap
<const Loop
*, BackedgeTakenInfo
>::iterator
, bool> Pair
=
6645 BackedgeTakenCounts
.insert({L
, BackedgeTakenInfo()});
6647 return Pair
.first
->second
;
6649 // computeBackedgeTakenCount may allocate memory for its result. Inserting it
6650 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
6651 // must be cleared in this scope.
6652 BackedgeTakenInfo Result
= computeBackedgeTakenCount(L
);
6654 // In product build, there are no usage of statistic.
6655 (void)NumTripCountsComputed
;
6656 (void)NumTripCountsNotComputed
;
6657 #if LLVM_ENABLE_STATS || !defined(NDEBUG)
6658 const SCEV
*BEExact
= Result
.getExact(L
, this);
6659 if (BEExact
!= getCouldNotCompute()) {
6660 assert(isLoopInvariant(BEExact
, L
) &&
6661 isLoopInvariant(Result
.getMax(this), L
) &&
6662 "Computed backedge-taken count isn't loop invariant for loop!");
6663 ++NumTripCountsComputed
;
6665 else if (Result
.getMax(this) == getCouldNotCompute() &&
6666 isa
<PHINode
>(L
->getHeader()->begin())) {
6667 // Only count loops that have phi nodes as not being computable.
6668 ++NumTripCountsNotComputed
;
6670 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG)
6672 // Now that we know more about the trip count for this loop, forget any
6673 // existing SCEV values for PHI nodes in this loop since they are only
6674 // conservative estimates made without the benefit of trip count
6675 // information. This is similar to the code in forgetLoop, except that
6676 // it handles SCEVUnknown PHI nodes specially.
6677 if (Result
.hasAnyInfo()) {
6678 SmallVector
<Instruction
*, 16> Worklist
;
6679 PushLoopPHIs(L
, Worklist
);
6681 SmallPtrSet
<Instruction
*, 8> Discovered
;
6682 while (!Worklist
.empty()) {
6683 Instruction
*I
= Worklist
.pop_back_val();
6685 ValueExprMapType::iterator It
=
6686 ValueExprMap
.find_as(static_cast<Value
*>(I
));
6687 if (It
!= ValueExprMap
.end()) {
6688 const SCEV
*Old
= It
->second
;
6690 // SCEVUnknown for a PHI either means that it has an unrecognized
6691 // structure, or it's a PHI that's in the progress of being computed
6692 // by createNodeForPHI. In the former case, additional loop trip
6693 // count information isn't going to change anything. In the later
6694 // case, createNodeForPHI will perform the necessary updates on its
6695 // own when it gets to that point.
6696 if (!isa
<PHINode
>(I
) || !isa
<SCEVUnknown
>(Old
)) {
6697 eraseValueFromMap(It
->first
);
6698 forgetMemoizedResults(Old
);
6700 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
6701 ConstantEvolutionLoopExitValue
.erase(PN
);
6704 // Since we don't need to invalidate anything for correctness and we're
6705 // only invalidating to make SCEV's results more precise, we get to stop
6706 // early to avoid invalidating too much. This is especially important in
6709 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node
6717 // where both loop0 and loop1's backedge taken count uses the SCEV
6718 // expression for %v. If we don't have the early stop below then in cases
6719 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip
6720 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip
6721 // count for loop1, effectively nullifying SCEV's trip count cache.
6722 for (auto *U
: I
->users())
6723 if (auto *I
= dyn_cast
<Instruction
>(U
)) {
6724 auto *LoopForUser
= LI
.getLoopFor(I
->getParent());
6725 if (LoopForUser
&& L
->contains(LoopForUser
) &&
6726 Discovered
.insert(I
).second
)
6727 Worklist
.push_back(I
);
6732 // Re-lookup the insert position, since the call to
6733 // computeBackedgeTakenCount above could result in a
6734 // recusive call to getBackedgeTakenInfo (on a different
6735 // loop), which would invalidate the iterator computed
6737 return BackedgeTakenCounts
.find(L
)->second
= std::move(Result
);
6740 void ScalarEvolution::forgetLoop(const Loop
*L
) {
6741 // Drop any stored trip count value.
6742 auto RemoveLoopFromBackedgeMap
=
6743 [](DenseMap
<const Loop
*, BackedgeTakenInfo
> &Map
, const Loop
*L
) {
6744 auto BTCPos
= Map
.find(L
);
6745 if (BTCPos
!= Map
.end()) {
6746 BTCPos
->second
.clear();
6751 SmallVector
<const Loop
*, 16> LoopWorklist(1, L
);
6752 SmallVector
<Instruction
*, 32> Worklist
;
6753 SmallPtrSet
<Instruction
*, 16> Visited
;
6755 // Iterate over all the loops and sub-loops to drop SCEV information.
6756 while (!LoopWorklist
.empty()) {
6757 auto *CurrL
= LoopWorklist
.pop_back_val();
6759 RemoveLoopFromBackedgeMap(BackedgeTakenCounts
, CurrL
);
6760 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts
, CurrL
);
6762 // Drop information about predicated SCEV rewrites for this loop.
6763 for (auto I
= PredicatedSCEVRewrites
.begin();
6764 I
!= PredicatedSCEVRewrites
.end();) {
6765 std::pair
<const SCEV
*, const Loop
*> Entry
= I
->first
;
6766 if (Entry
.second
== CurrL
)
6767 PredicatedSCEVRewrites
.erase(I
++);
6772 auto LoopUsersItr
= LoopUsers
.find(CurrL
);
6773 if (LoopUsersItr
!= LoopUsers
.end()) {
6774 for (auto *S
: LoopUsersItr
->second
)
6775 forgetMemoizedResults(S
);
6776 LoopUsers
.erase(LoopUsersItr
);
6779 // Drop information about expressions based on loop-header PHIs.
6780 PushLoopPHIs(CurrL
, Worklist
);
6782 while (!Worklist
.empty()) {
6783 Instruction
*I
= Worklist
.pop_back_val();
6784 if (!Visited
.insert(I
).second
)
6787 ValueExprMapType::iterator It
=
6788 ValueExprMap
.find_as(static_cast<Value
*>(I
));
6789 if (It
!= ValueExprMap
.end()) {
6790 eraseValueFromMap(It
->first
);
6791 forgetMemoizedResults(It
->second
);
6792 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
6793 ConstantEvolutionLoopExitValue
.erase(PN
);
6796 PushDefUseChildren(I
, Worklist
);
6799 LoopPropertiesCache
.erase(CurrL
);
6800 // Forget all contained loops too, to avoid dangling entries in the
6801 // ValuesAtScopes map.
6802 LoopWorklist
.append(CurrL
->begin(), CurrL
->end());
6806 void ScalarEvolution::forgetTopmostLoop(const Loop
*L
) {
6807 while (Loop
*Parent
= L
->getParentLoop())
6812 void ScalarEvolution::forgetValue(Value
*V
) {
6813 Instruction
*I
= dyn_cast
<Instruction
>(V
);
6816 // Drop information about expressions based on loop-header PHIs.
6817 SmallVector
<Instruction
*, 16> Worklist
;
6818 Worklist
.push_back(I
);
6820 SmallPtrSet
<Instruction
*, 8> Visited
;
6821 while (!Worklist
.empty()) {
6822 I
= Worklist
.pop_back_val();
6823 if (!Visited
.insert(I
).second
)
6826 ValueExprMapType::iterator It
=
6827 ValueExprMap
.find_as(static_cast<Value
*>(I
));
6828 if (It
!= ValueExprMap
.end()) {
6829 eraseValueFromMap(It
->first
);
6830 forgetMemoizedResults(It
->second
);
6831 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
6832 ConstantEvolutionLoopExitValue
.erase(PN
);
6835 PushDefUseChildren(I
, Worklist
);
6839 /// Get the exact loop backedge taken count considering all loop exits. A
6840 /// computable result can only be returned for loops with all exiting blocks
6841 /// dominating the latch. howFarToZero assumes that the limit of each loop test
6842 /// is never skipped. This is a valid assumption as long as the loop exits via
6843 /// that test. For precise results, it is the caller's responsibility to specify
6844 /// the relevant loop exiting block using getExact(ExitingBlock, SE).
6846 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop
*L
, ScalarEvolution
*SE
,
6847 SCEVUnionPredicate
*Preds
) const {
6848 // If any exits were not computable, the loop is not computable.
6849 if (!isComplete() || ExitNotTaken
.empty())
6850 return SE
->getCouldNotCompute();
6852 const BasicBlock
*Latch
= L
->getLoopLatch();
6853 // All exiting blocks we have collected must dominate the only backedge.
6855 return SE
->getCouldNotCompute();
6857 // All exiting blocks we have gathered dominate loop's latch, so exact trip
6858 // count is simply a minimum out of all these calculated exit counts.
6859 SmallVector
<const SCEV
*, 2> Ops
;
6860 for (auto &ENT
: ExitNotTaken
) {
6861 const SCEV
*BECount
= ENT
.ExactNotTaken
;
6862 assert(BECount
!= SE
->getCouldNotCompute() && "Bad exit SCEV!");
6863 assert(SE
->DT
.dominates(ENT
.ExitingBlock
, Latch
) &&
6864 "We should only have known counts for exiting blocks that dominate "
6867 Ops
.push_back(BECount
);
6869 if (Preds
&& !ENT
.hasAlwaysTruePredicate())
6870 Preds
->add(ENT
.Predicate
.get());
6872 assert((Preds
|| ENT
.hasAlwaysTruePredicate()) &&
6873 "Predicate should be always true!");
6876 return SE
->getUMinFromMismatchedTypes(Ops
);
6879 /// Get the exact not taken count for this loop exit.
6881 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock
*ExitingBlock
,
6882 ScalarEvolution
*SE
) const {
6883 for (auto &ENT
: ExitNotTaken
)
6884 if (ENT
.ExitingBlock
== ExitingBlock
&& ENT
.hasAlwaysTruePredicate())
6885 return ENT
.ExactNotTaken
;
6887 return SE
->getCouldNotCompute();
6890 /// getMax - Get the max backedge taken count for the loop.
6892 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution
*SE
) const {
6893 auto PredicateNotAlwaysTrue
= [](const ExitNotTakenInfo
&ENT
) {
6894 return !ENT
.hasAlwaysTruePredicate();
6897 if (any_of(ExitNotTaken
, PredicateNotAlwaysTrue
) || !getMax())
6898 return SE
->getCouldNotCompute();
6900 assert((isa
<SCEVCouldNotCompute
>(getMax()) || isa
<SCEVConstant
>(getMax())) &&
6901 "No point in having a non-constant max backedge taken count!");
6905 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution
*SE
) const {
6906 auto PredicateNotAlwaysTrue
= [](const ExitNotTakenInfo
&ENT
) {
6907 return !ENT
.hasAlwaysTruePredicate();
6909 return MaxOrZero
&& !any_of(ExitNotTaken
, PredicateNotAlwaysTrue
);
6912 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV
*S
,
6913 ScalarEvolution
*SE
) const {
6914 if (getMax() && getMax() != SE
->getCouldNotCompute() &&
6915 SE
->hasOperand(getMax(), S
))
6918 for (auto &ENT
: ExitNotTaken
)
6919 if (ENT
.ExactNotTaken
!= SE
->getCouldNotCompute() &&
6920 SE
->hasOperand(ENT
.ExactNotTaken
, S
))
6926 ScalarEvolution::ExitLimit::ExitLimit(const SCEV
*E
)
6927 : ExactNotTaken(E
), MaxNotTaken(E
) {
6928 assert((isa
<SCEVCouldNotCompute
>(MaxNotTaken
) ||
6929 isa
<SCEVConstant
>(MaxNotTaken
)) &&
6930 "No point in having a non-constant max backedge taken count!");
6933 ScalarEvolution::ExitLimit::ExitLimit(
6934 const SCEV
*E
, const SCEV
*M
, bool MaxOrZero
,
6935 ArrayRef
<const SmallPtrSetImpl
<const SCEVPredicate
*> *> PredSetList
)
6936 : ExactNotTaken(E
), MaxNotTaken(M
), MaxOrZero(MaxOrZero
) {
6937 assert((isa
<SCEVCouldNotCompute
>(ExactNotTaken
) ||
6938 !isa
<SCEVCouldNotCompute
>(MaxNotTaken
)) &&
6939 "Exact is not allowed to be less precise than Max");
6940 assert((isa
<SCEVCouldNotCompute
>(MaxNotTaken
) ||
6941 isa
<SCEVConstant
>(MaxNotTaken
)) &&
6942 "No point in having a non-constant max backedge taken count!");
6943 for (auto *PredSet
: PredSetList
)
6944 for (auto *P
: *PredSet
)
6948 ScalarEvolution::ExitLimit::ExitLimit(
6949 const SCEV
*E
, const SCEV
*M
, bool MaxOrZero
,
6950 const SmallPtrSetImpl
<const SCEVPredicate
*> &PredSet
)
6951 : ExitLimit(E
, M
, MaxOrZero
, {&PredSet
}) {
6952 assert((isa
<SCEVCouldNotCompute
>(MaxNotTaken
) ||
6953 isa
<SCEVConstant
>(MaxNotTaken
)) &&
6954 "No point in having a non-constant max backedge taken count!");
6957 ScalarEvolution::ExitLimit::ExitLimit(const SCEV
*E
, const SCEV
*M
,
6959 : ExitLimit(E
, M
, MaxOrZero
, None
) {
6960 assert((isa
<SCEVCouldNotCompute
>(MaxNotTaken
) ||
6961 isa
<SCEVConstant
>(MaxNotTaken
)) &&
6962 "No point in having a non-constant max backedge taken count!");
6965 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
6966 /// computable exit into a persistent ExitNotTakenInfo array.
6967 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
6968 SmallVectorImpl
<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo
>
6970 bool Complete
, const SCEV
*MaxCount
, bool MaxOrZero
)
6971 : MaxAndComplete(MaxCount
, Complete
), MaxOrZero(MaxOrZero
) {
6972 using EdgeExitInfo
= ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo
;
6974 ExitNotTaken
.reserve(ExitCounts
.size());
6976 ExitCounts
.begin(), ExitCounts
.end(), std::back_inserter(ExitNotTaken
),
6977 [&](const EdgeExitInfo
&EEI
) {
6978 BasicBlock
*ExitBB
= EEI
.first
;
6979 const ExitLimit
&EL
= EEI
.second
;
6980 if (EL
.Predicates
.empty())
6981 return ExitNotTakenInfo(ExitBB
, EL
.ExactNotTaken
, nullptr);
6983 std::unique_ptr
<SCEVUnionPredicate
> Predicate(new SCEVUnionPredicate
);
6984 for (auto *Pred
: EL
.Predicates
)
6985 Predicate
->add(Pred
);
6987 return ExitNotTakenInfo(ExitBB
, EL
.ExactNotTaken
, std::move(Predicate
));
6989 assert((isa
<SCEVCouldNotCompute
>(MaxCount
) || isa
<SCEVConstant
>(MaxCount
)) &&
6990 "No point in having a non-constant max backedge taken count!");
6993 /// Invalidate this result and free the ExitNotTakenInfo array.
6994 void ScalarEvolution::BackedgeTakenInfo::clear() {
6995 ExitNotTaken
.clear();
6998 /// Compute the number of times the backedge of the specified loop will execute.
6999 ScalarEvolution::BackedgeTakenInfo
7000 ScalarEvolution::computeBackedgeTakenCount(const Loop
*L
,
7001 bool AllowPredicates
) {
7002 SmallVector
<BasicBlock
*, 8> ExitingBlocks
;
7003 L
->getExitingBlocks(ExitingBlocks
);
7005 using EdgeExitInfo
= ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo
;
7007 SmallVector
<EdgeExitInfo
, 4> ExitCounts
;
7008 bool CouldComputeBECount
= true;
7009 BasicBlock
*Latch
= L
->getLoopLatch(); // may be NULL.
7010 const SCEV
*MustExitMaxBECount
= nullptr;
7011 const SCEV
*MayExitMaxBECount
= nullptr;
7012 bool MustExitMaxOrZero
= false;
7014 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
7015 // and compute maxBECount.
7016 // Do a union of all the predicates here.
7017 for (unsigned i
= 0, e
= ExitingBlocks
.size(); i
!= e
; ++i
) {
7018 BasicBlock
*ExitBB
= ExitingBlocks
[i
];
7019 ExitLimit EL
= computeExitLimit(L
, ExitBB
, AllowPredicates
);
7021 assert((AllowPredicates
|| EL
.Predicates
.empty()) &&
7022 "Predicated exit limit when predicates are not allowed!");
7024 // 1. For each exit that can be computed, add an entry to ExitCounts.
7025 // CouldComputeBECount is true only if all exits can be computed.
7026 if (EL
.ExactNotTaken
== getCouldNotCompute())
7027 // We couldn't compute an exact value for this exit, so
7028 // we won't be able to compute an exact value for the loop.
7029 CouldComputeBECount
= false;
7031 ExitCounts
.emplace_back(ExitBB
, EL
);
7033 // 2. Derive the loop's MaxBECount from each exit's max number of
7034 // non-exiting iterations. Partition the loop exits into two kinds:
7035 // LoopMustExits and LoopMayExits.
7037 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
7038 // is a LoopMayExit. If any computable LoopMustExit is found, then
7039 // MaxBECount is the minimum EL.MaxNotTaken of computable
7040 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum
7041 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any
7042 // computable EL.MaxNotTaken.
7043 if (EL
.MaxNotTaken
!= getCouldNotCompute() && Latch
&&
7044 DT
.dominates(ExitBB
, Latch
)) {
7045 if (!MustExitMaxBECount
) {
7046 MustExitMaxBECount
= EL
.MaxNotTaken
;
7047 MustExitMaxOrZero
= EL
.MaxOrZero
;
7049 MustExitMaxBECount
=
7050 getUMinFromMismatchedTypes(MustExitMaxBECount
, EL
.MaxNotTaken
);
7052 } else if (MayExitMaxBECount
!= getCouldNotCompute()) {
7053 if (!MayExitMaxBECount
|| EL
.MaxNotTaken
== getCouldNotCompute())
7054 MayExitMaxBECount
= EL
.MaxNotTaken
;
7057 getUMaxFromMismatchedTypes(MayExitMaxBECount
, EL
.MaxNotTaken
);
7061 const SCEV
*MaxBECount
= MustExitMaxBECount
? MustExitMaxBECount
:
7062 (MayExitMaxBECount
? MayExitMaxBECount
: getCouldNotCompute());
7063 // The loop backedge will be taken the maximum or zero times if there's
7064 // a single exit that must be taken the maximum or zero times.
7065 bool MaxOrZero
= (MustExitMaxOrZero
&& ExitingBlocks
.size() == 1);
7066 return BackedgeTakenInfo(std::move(ExitCounts
), CouldComputeBECount
,
7067 MaxBECount
, MaxOrZero
);
7070 ScalarEvolution::ExitLimit
7071 ScalarEvolution::computeExitLimit(const Loop
*L
, BasicBlock
*ExitingBlock
,
7072 bool AllowPredicates
) {
7073 assert(L
->contains(ExitingBlock
) && "Exit count for non-loop block?");
7074 // If our exiting block does not dominate the latch, then its connection with
7075 // loop's exit limit may be far from trivial.
7076 const BasicBlock
*Latch
= L
->getLoopLatch();
7077 if (!Latch
|| !DT
.dominates(ExitingBlock
, Latch
))
7078 return getCouldNotCompute();
7080 bool IsOnlyExit
= (L
->getExitingBlock() != nullptr);
7081 TerminatorInst
*Term
= ExitingBlock
->getTerminator();
7082 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(Term
)) {
7083 assert(BI
->isConditional() && "If unconditional, it can't be in loop!");
7084 bool ExitIfTrue
= !L
->contains(BI
->getSuccessor(0));
7085 assert(ExitIfTrue
== L
->contains(BI
->getSuccessor(1)) &&
7086 "It should have one successor in loop and one exit block!");
7087 // Proceed to the next level to examine the exit condition expression.
7088 return computeExitLimitFromCond(
7089 L
, BI
->getCondition(), ExitIfTrue
,
7090 /*ControlsExit=*/IsOnlyExit
, AllowPredicates
);
7093 if (SwitchInst
*SI
= dyn_cast
<SwitchInst
>(Term
)) {
7094 // For switch, make sure that there is a single exit from the loop.
7095 BasicBlock
*Exit
= nullptr;
7096 for (auto *SBB
: successors(ExitingBlock
))
7097 if (!L
->contains(SBB
)) {
7098 if (Exit
) // Multiple exit successors.
7099 return getCouldNotCompute();
7102 assert(Exit
&& "Exiting block must have at least one exit");
7103 return computeExitLimitFromSingleExitSwitch(L
, SI
, Exit
,
7104 /*ControlsExit=*/IsOnlyExit
);
7107 return getCouldNotCompute();
7110 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromCond(
7111 const Loop
*L
, Value
*ExitCond
, bool ExitIfTrue
,
7112 bool ControlsExit
, bool AllowPredicates
) {
7113 ScalarEvolution::ExitLimitCacheTy
Cache(L
, ExitIfTrue
, AllowPredicates
);
7114 return computeExitLimitFromCondCached(Cache
, L
, ExitCond
, ExitIfTrue
,
7115 ControlsExit
, AllowPredicates
);
7118 Optional
<ScalarEvolution::ExitLimit
>
7119 ScalarEvolution::ExitLimitCache::find(const Loop
*L
, Value
*ExitCond
,
7120 bool ExitIfTrue
, bool ControlsExit
,
7121 bool AllowPredicates
) {
7123 (void)this->ExitIfTrue
;
7124 (void)this->AllowPredicates
;
7126 assert(this->L
== L
&& this->ExitIfTrue
== ExitIfTrue
&&
7127 this->AllowPredicates
== AllowPredicates
&&
7128 "Variance in assumed invariant key components!");
7129 auto Itr
= TripCountMap
.find({ExitCond
, ControlsExit
});
7130 if (Itr
== TripCountMap
.end())
7135 void ScalarEvolution::ExitLimitCache::insert(const Loop
*L
, Value
*ExitCond
,
7138 bool AllowPredicates
,
7139 const ExitLimit
&EL
) {
7140 assert(this->L
== L
&& this->ExitIfTrue
== ExitIfTrue
&&
7141 this->AllowPredicates
== AllowPredicates
&&
7142 "Variance in assumed invariant key components!");
7144 auto InsertResult
= TripCountMap
.insert({{ExitCond
, ControlsExit
}, EL
});
7145 assert(InsertResult
.second
&& "Expected successful insertion!");
7150 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromCondCached(
7151 ExitLimitCacheTy
&Cache
, const Loop
*L
, Value
*ExitCond
, bool ExitIfTrue
,
7152 bool ControlsExit
, bool AllowPredicates
) {
7155 Cache
.find(L
, ExitCond
, ExitIfTrue
, ControlsExit
, AllowPredicates
))
7158 ExitLimit EL
= computeExitLimitFromCondImpl(Cache
, L
, ExitCond
, ExitIfTrue
,
7159 ControlsExit
, AllowPredicates
);
7160 Cache
.insert(L
, ExitCond
, ExitIfTrue
, ControlsExit
, AllowPredicates
, EL
);
7164 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromCondImpl(
7165 ExitLimitCacheTy
&Cache
, const Loop
*L
, Value
*ExitCond
, bool ExitIfTrue
,
7166 bool ControlsExit
, bool AllowPredicates
) {
7167 // Check if the controlling expression for this loop is an And or Or.
7168 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(ExitCond
)) {
7169 if (BO
->getOpcode() == Instruction::And
) {
7170 // Recurse on the operands of the and.
7171 bool EitherMayExit
= !ExitIfTrue
;
7172 ExitLimit EL0
= computeExitLimitFromCondCached(
7173 Cache
, L
, BO
->getOperand(0), ExitIfTrue
,
7174 ControlsExit
&& !EitherMayExit
, AllowPredicates
);
7175 ExitLimit EL1
= computeExitLimitFromCondCached(
7176 Cache
, L
, BO
->getOperand(1), ExitIfTrue
,
7177 ControlsExit
&& !EitherMayExit
, AllowPredicates
);
7178 const SCEV
*BECount
= getCouldNotCompute();
7179 const SCEV
*MaxBECount
= getCouldNotCompute();
7180 if (EitherMayExit
) {
7181 // Both conditions must be true for the loop to continue executing.
7182 // Choose the less conservative count.
7183 if (EL0
.ExactNotTaken
== getCouldNotCompute() ||
7184 EL1
.ExactNotTaken
== getCouldNotCompute())
7185 BECount
= getCouldNotCompute();
7188 getUMinFromMismatchedTypes(EL0
.ExactNotTaken
, EL1
.ExactNotTaken
);
7189 if (EL0
.MaxNotTaken
== getCouldNotCompute())
7190 MaxBECount
= EL1
.MaxNotTaken
;
7191 else if (EL1
.MaxNotTaken
== getCouldNotCompute())
7192 MaxBECount
= EL0
.MaxNotTaken
;
7195 getUMinFromMismatchedTypes(EL0
.MaxNotTaken
, EL1
.MaxNotTaken
);
7197 // Both conditions must be true at the same time for the loop to exit.
7198 // For now, be conservative.
7199 if (EL0
.MaxNotTaken
== EL1
.MaxNotTaken
)
7200 MaxBECount
= EL0
.MaxNotTaken
;
7201 if (EL0
.ExactNotTaken
== EL1
.ExactNotTaken
)
7202 BECount
= EL0
.ExactNotTaken
;
7205 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
7206 // to be more aggressive when computing BECount than when computing
7207 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and
7208 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
7210 if (isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
7211 !isa
<SCEVCouldNotCompute
>(BECount
))
7212 MaxBECount
= getConstant(getUnsignedRangeMax(BECount
));
7214 return ExitLimit(BECount
, MaxBECount
, false,
7215 {&EL0
.Predicates
, &EL1
.Predicates
});
7217 if (BO
->getOpcode() == Instruction::Or
) {
7218 // Recurse on the operands of the or.
7219 bool EitherMayExit
= ExitIfTrue
;
7220 ExitLimit EL0
= computeExitLimitFromCondCached(
7221 Cache
, L
, BO
->getOperand(0), ExitIfTrue
,
7222 ControlsExit
&& !EitherMayExit
, AllowPredicates
);
7223 ExitLimit EL1
= computeExitLimitFromCondCached(
7224 Cache
, L
, BO
->getOperand(1), ExitIfTrue
,
7225 ControlsExit
&& !EitherMayExit
, AllowPredicates
);
7226 const SCEV
*BECount
= getCouldNotCompute();
7227 const SCEV
*MaxBECount
= getCouldNotCompute();
7228 if (EitherMayExit
) {
7229 // Both conditions must be false for the loop to continue executing.
7230 // Choose the less conservative count.
7231 if (EL0
.ExactNotTaken
== getCouldNotCompute() ||
7232 EL1
.ExactNotTaken
== getCouldNotCompute())
7233 BECount
= getCouldNotCompute();
7236 getUMinFromMismatchedTypes(EL0
.ExactNotTaken
, EL1
.ExactNotTaken
);
7237 if (EL0
.MaxNotTaken
== getCouldNotCompute())
7238 MaxBECount
= EL1
.MaxNotTaken
;
7239 else if (EL1
.MaxNotTaken
== getCouldNotCompute())
7240 MaxBECount
= EL0
.MaxNotTaken
;
7243 getUMinFromMismatchedTypes(EL0
.MaxNotTaken
, EL1
.MaxNotTaken
);
7245 // Both conditions must be false at the same time for the loop to exit.
7246 // For now, be conservative.
7247 if (EL0
.MaxNotTaken
== EL1
.MaxNotTaken
)
7248 MaxBECount
= EL0
.MaxNotTaken
;
7249 if (EL0
.ExactNotTaken
== EL1
.ExactNotTaken
)
7250 BECount
= EL0
.ExactNotTaken
;
7253 return ExitLimit(BECount
, MaxBECount
, false,
7254 {&EL0
.Predicates
, &EL1
.Predicates
});
7258 // With an icmp, it may be feasible to compute an exact backedge-taken count.
7259 // Proceed to the next level to examine the icmp.
7260 if (ICmpInst
*ExitCondICmp
= dyn_cast
<ICmpInst
>(ExitCond
)) {
7262 computeExitLimitFromICmp(L
, ExitCondICmp
, ExitIfTrue
, ControlsExit
);
7263 if (EL
.hasFullInfo() || !AllowPredicates
)
7266 // Try again, but use SCEV predicates this time.
7267 return computeExitLimitFromICmp(L
, ExitCondICmp
, ExitIfTrue
, ControlsExit
,
7268 /*AllowPredicates=*/true);
7271 // Check for a constant condition. These are normally stripped out by
7272 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
7273 // preserve the CFG and is temporarily leaving constant conditions
7275 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(ExitCond
)) {
7276 if (ExitIfTrue
== !CI
->getZExtValue())
7277 // The backedge is always taken.
7278 return getCouldNotCompute();
7280 // The backedge is never taken.
7281 return getZero(CI
->getType());
7284 // If it's not an integer or pointer comparison then compute it the hard way.
7285 return computeExitCountExhaustively(L
, ExitCond
, ExitIfTrue
);
7288 ScalarEvolution::ExitLimit
7289 ScalarEvolution::computeExitLimitFromICmp(const Loop
*L
,
7293 bool AllowPredicates
) {
7294 // If the condition was exit on true, convert the condition to exit on false
7295 ICmpInst::Predicate Pred
;
7297 Pred
= ExitCond
->getPredicate();
7299 Pred
= ExitCond
->getInversePredicate();
7300 const ICmpInst::Predicate OriginalPred
= Pred
;
7302 // Handle common loops like: for (X = "string"; *X; ++X)
7303 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(ExitCond
->getOperand(0)))
7304 if (Constant
*RHS
= dyn_cast
<Constant
>(ExitCond
->getOperand(1))) {
7306 computeLoadConstantCompareExitLimit(LI
, RHS
, L
, Pred
);
7307 if (ItCnt
.hasAnyInfo())
7311 const SCEV
*LHS
= getSCEV(ExitCond
->getOperand(0));
7312 const SCEV
*RHS
= getSCEV(ExitCond
->getOperand(1));
7314 // Try to evaluate any dependencies out of the loop.
7315 LHS
= getSCEVAtScope(LHS
, L
);
7316 RHS
= getSCEVAtScope(RHS
, L
);
7318 // At this point, we would like to compute how many iterations of the
7319 // loop the predicate will return true for these inputs.
7320 if (isLoopInvariant(LHS
, L
) && !isLoopInvariant(RHS
, L
)) {
7321 // If there is a loop-invariant, force it into the RHS.
7322 std::swap(LHS
, RHS
);
7323 Pred
= ICmpInst::getSwappedPredicate(Pred
);
7326 // Simplify the operands before analyzing them.
7327 (void)SimplifyICmpOperands(Pred
, LHS
, RHS
);
7329 // If we have a comparison of a chrec against a constant, try to use value
7330 // ranges to answer this query.
7331 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
))
7332 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(LHS
))
7333 if (AddRec
->getLoop() == L
) {
7334 // Form the constant range.
7335 ConstantRange CompRange
=
7336 ConstantRange::makeExactICmpRegion(Pred
, RHSC
->getAPInt());
7338 const SCEV
*Ret
= AddRec
->getNumIterationsInRange(CompRange
, *this);
7339 if (!isa
<SCEVCouldNotCompute
>(Ret
)) return Ret
;
7343 case ICmpInst::ICMP_NE
: { // while (X != Y)
7344 // Convert to: while (X-Y != 0)
7345 ExitLimit EL
= howFarToZero(getMinusSCEV(LHS
, RHS
), L
, ControlsExit
,
7347 if (EL
.hasAnyInfo()) return EL
;
7350 case ICmpInst::ICMP_EQ
: { // while (X == Y)
7351 // Convert to: while (X-Y == 0)
7352 ExitLimit EL
= howFarToNonZero(getMinusSCEV(LHS
, RHS
), L
);
7353 if (EL
.hasAnyInfo()) return EL
;
7356 case ICmpInst::ICMP_SLT
:
7357 case ICmpInst::ICMP_ULT
: { // while (X < Y)
7358 bool IsSigned
= Pred
== ICmpInst::ICMP_SLT
;
7359 ExitLimit EL
= howManyLessThans(LHS
, RHS
, L
, IsSigned
, ControlsExit
,
7361 if (EL
.hasAnyInfo()) return EL
;
7364 case ICmpInst::ICMP_SGT
:
7365 case ICmpInst::ICMP_UGT
: { // while (X > Y)
7366 bool IsSigned
= Pred
== ICmpInst::ICMP_SGT
;
7368 howManyGreaterThans(LHS
, RHS
, L
, IsSigned
, ControlsExit
,
7370 if (EL
.hasAnyInfo()) return EL
;
7377 auto *ExhaustiveCount
=
7378 computeExitCountExhaustively(L
, ExitCond
, ExitIfTrue
);
7380 if (!isa
<SCEVCouldNotCompute
>(ExhaustiveCount
))
7381 return ExhaustiveCount
;
7383 return computeShiftCompareExitLimit(ExitCond
->getOperand(0),
7384 ExitCond
->getOperand(1), L
, OriginalPred
);
7387 ScalarEvolution::ExitLimit
7388 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop
*L
,
7390 BasicBlock
*ExitingBlock
,
7391 bool ControlsExit
) {
7392 assert(!L
->contains(ExitingBlock
) && "Not an exiting block!");
7394 // Give up if the exit is the default dest of a switch.
7395 if (Switch
->getDefaultDest() == ExitingBlock
)
7396 return getCouldNotCompute();
7398 assert(L
->contains(Switch
->getDefaultDest()) &&
7399 "Default case must not exit the loop!");
7400 const SCEV
*LHS
= getSCEVAtScope(Switch
->getCondition(), L
);
7401 const SCEV
*RHS
= getConstant(Switch
->findCaseDest(ExitingBlock
));
7403 // while (X != Y) --> while (X-Y != 0)
7404 ExitLimit EL
= howFarToZero(getMinusSCEV(LHS
, RHS
), L
, ControlsExit
);
7405 if (EL
.hasAnyInfo())
7408 return getCouldNotCompute();
7411 static ConstantInt
*
7412 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr
*AddRec
, ConstantInt
*C
,
7413 ScalarEvolution
&SE
) {
7414 const SCEV
*InVal
= SE
.getConstant(C
);
7415 const SCEV
*Val
= AddRec
->evaluateAtIteration(InVal
, SE
);
7416 assert(isa
<SCEVConstant
>(Val
) &&
7417 "Evaluation of SCEV at constant didn't fold correctly?");
7418 return cast
<SCEVConstant
>(Val
)->getValue();
7421 /// Given an exit condition of 'icmp op load X, cst', try to see if we can
7422 /// compute the backedge execution count.
7423 ScalarEvolution::ExitLimit
7424 ScalarEvolution::computeLoadConstantCompareExitLimit(
7428 ICmpInst::Predicate predicate
) {
7429 if (LI
->isVolatile()) return getCouldNotCompute();
7431 // Check to see if the loaded pointer is a getelementptr of a global.
7432 // TODO: Use SCEV instead of manually grubbing with GEPs.
7433 GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(LI
->getOperand(0));
7434 if (!GEP
) return getCouldNotCompute();
7436 // Make sure that it is really a constant global we are gepping, with an
7437 // initializer, and make sure the first IDX is really 0.
7438 GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(GEP
->getOperand(0));
7439 if (!GV
|| !GV
->isConstant() || !GV
->hasDefinitiveInitializer() ||
7440 GEP
->getNumOperands() < 3 || !isa
<Constant
>(GEP
->getOperand(1)) ||
7441 !cast
<Constant
>(GEP
->getOperand(1))->isNullValue())
7442 return getCouldNotCompute();
7444 // Okay, we allow one non-constant index into the GEP instruction.
7445 Value
*VarIdx
= nullptr;
7446 std::vector
<Constant
*> Indexes
;
7447 unsigned VarIdxNum
= 0;
7448 for (unsigned i
= 2, e
= GEP
->getNumOperands(); i
!= e
; ++i
)
7449 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
))) {
7450 Indexes
.push_back(CI
);
7451 } else if (!isa
<ConstantInt
>(GEP
->getOperand(i
))) {
7452 if (VarIdx
) return getCouldNotCompute(); // Multiple non-constant idx's.
7453 VarIdx
= GEP
->getOperand(i
);
7455 Indexes
.push_back(nullptr);
7458 // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
7460 return getCouldNotCompute();
7462 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
7463 // Check to see if X is a loop variant variable value now.
7464 const SCEV
*Idx
= getSCEV(VarIdx
);
7465 Idx
= getSCEVAtScope(Idx
, L
);
7467 // We can only recognize very limited forms of loop index expressions, in
7468 // particular, only affine AddRec's like {C1,+,C2}.
7469 const SCEVAddRecExpr
*IdxExpr
= dyn_cast
<SCEVAddRecExpr
>(Idx
);
7470 if (!IdxExpr
|| !IdxExpr
->isAffine() || isLoopInvariant(IdxExpr
, L
) ||
7471 !isa
<SCEVConstant
>(IdxExpr
->getOperand(0)) ||
7472 !isa
<SCEVConstant
>(IdxExpr
->getOperand(1)))
7473 return getCouldNotCompute();
7475 unsigned MaxSteps
= MaxBruteForceIterations
;
7476 for (unsigned IterationNum
= 0; IterationNum
!= MaxSteps
; ++IterationNum
) {
7477 ConstantInt
*ItCst
= ConstantInt::get(
7478 cast
<IntegerType
>(IdxExpr
->getType()), IterationNum
);
7479 ConstantInt
*Val
= EvaluateConstantChrecAtConstant(IdxExpr
, ItCst
, *this);
7481 // Form the GEP offset.
7482 Indexes
[VarIdxNum
] = Val
;
7484 Constant
*Result
= ConstantFoldLoadThroughGEPIndices(GV
->getInitializer(),
7486 if (!Result
) break; // Cannot compute!
7488 // Evaluate the condition for this iteration.
7489 Result
= ConstantExpr::getICmp(predicate
, Result
, RHS
);
7490 if (!isa
<ConstantInt
>(Result
)) break; // Couldn't decide for sure
7491 if (cast
<ConstantInt
>(Result
)->getValue().isMinValue()) {
7492 ++NumArrayLenItCounts
;
7493 return getConstant(ItCst
); // Found terminating iteration!
7496 return getCouldNotCompute();
7499 ScalarEvolution::ExitLimit
ScalarEvolution::computeShiftCompareExitLimit(
7500 Value
*LHS
, Value
*RHSV
, const Loop
*L
, ICmpInst::Predicate Pred
) {
7501 ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(RHSV
);
7503 return getCouldNotCompute();
7505 const BasicBlock
*Latch
= L
->getLoopLatch();
7507 return getCouldNotCompute();
7509 const BasicBlock
*Predecessor
= L
->getLoopPredecessor();
7511 return getCouldNotCompute();
7513 // Return true if V is of the form "LHS `shift_op` <positive constant>".
7514 // Return LHS in OutLHS and shift_opt in OutOpCode.
7515 auto MatchPositiveShift
=
7516 [](Value
*V
, Value
*&OutLHS
, Instruction::BinaryOps
&OutOpCode
) {
7518 using namespace PatternMatch
;
7520 ConstantInt
*ShiftAmt
;
7521 if (match(V
, m_LShr(m_Value(OutLHS
), m_ConstantInt(ShiftAmt
))))
7522 OutOpCode
= Instruction::LShr
;
7523 else if (match(V
, m_AShr(m_Value(OutLHS
), m_ConstantInt(ShiftAmt
))))
7524 OutOpCode
= Instruction::AShr
;
7525 else if (match(V
, m_Shl(m_Value(OutLHS
), m_ConstantInt(ShiftAmt
))))
7526 OutOpCode
= Instruction::Shl
;
7530 return ShiftAmt
->getValue().isStrictlyPositive();
7533 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in
7536 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ]
7537 // %iv.shifted = lshr i32 %iv, <positive constant>
7539 // Return true on a successful match. Return the corresponding PHI node (%iv
7540 // above) in PNOut and the opcode of the shift operation in OpCodeOut.
7541 auto MatchShiftRecurrence
=
7542 [&](Value
*V
, PHINode
*&PNOut
, Instruction::BinaryOps
&OpCodeOut
) {
7543 Optional
<Instruction::BinaryOps
> PostShiftOpCode
;
7546 Instruction::BinaryOps OpC
;
7549 // If we encounter a shift instruction, "peel off" the shift operation,
7550 // and remember that we did so. Later when we inspect %iv's backedge
7551 // value, we will make sure that the backedge value uses the same
7554 // Note: the peeled shift operation does not have to be the same
7555 // instruction as the one feeding into the PHI's backedge value. We only
7556 // really care about it being the same *kind* of shift instruction --
7557 // that's all that is required for our later inferences to hold.
7558 if (MatchPositiveShift(LHS
, V
, OpC
)) {
7559 PostShiftOpCode
= OpC
;
7564 PNOut
= dyn_cast
<PHINode
>(LHS
);
7565 if (!PNOut
|| PNOut
->getParent() != L
->getHeader())
7568 Value
*BEValue
= PNOut
->getIncomingValueForBlock(Latch
);
7572 // The backedge value for the PHI node must be a shift by a positive
7574 MatchPositiveShift(BEValue
, OpLHS
, OpCodeOut
) &&
7576 // of the PHI node itself
7579 // and the kind of shift should be match the kind of shift we peeled
7581 (!PostShiftOpCode
.hasValue() || *PostShiftOpCode
== OpCodeOut
);
7585 Instruction::BinaryOps OpCode
;
7586 if (!MatchShiftRecurrence(LHS
, PN
, OpCode
))
7587 return getCouldNotCompute();
7589 const DataLayout
&DL
= getDataLayout();
7591 // The key rationale for this optimization is that for some kinds of shift
7592 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1
7593 // within a finite number of iterations. If the condition guarding the
7594 // backedge (in the sense that the backedge is taken if the condition is true)
7595 // is false for the value the shift recurrence stabilizes to, then we know
7596 // that the backedge is taken only a finite number of times.
7598 ConstantInt
*StableValue
= nullptr;
7601 llvm_unreachable("Impossible case!");
7603 case Instruction::AShr
: {
7604 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
7605 // bitwidth(K) iterations.
7606 Value
*FirstValue
= PN
->getIncomingValueForBlock(Predecessor
);
7607 KnownBits Known
= computeKnownBits(FirstValue
, DL
, 0, nullptr,
7608 Predecessor
->getTerminator(), &DT
);
7609 auto *Ty
= cast
<IntegerType
>(RHS
->getType());
7610 if (Known
.isNonNegative())
7611 StableValue
= ConstantInt::get(Ty
, 0);
7612 else if (Known
.isNegative())
7613 StableValue
= ConstantInt::get(Ty
, -1, true);
7615 return getCouldNotCompute();
7619 case Instruction::LShr
:
7620 case Instruction::Shl
:
7621 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>}
7622 // stabilize to 0 in at most bitwidth(K) iterations.
7623 StableValue
= ConstantInt::get(cast
<IntegerType
>(RHS
->getType()), 0);
7628 ConstantFoldCompareInstOperands(Pred
, StableValue
, RHS
, DL
, &TLI
);
7629 assert(Result
->getType()->isIntegerTy(1) &&
7630 "Otherwise cannot be an operand to a branch instruction");
7632 if (Result
->isZeroValue()) {
7633 unsigned BitWidth
= getTypeSizeInBits(RHS
->getType());
7634 const SCEV
*UpperBound
=
7635 getConstant(getEffectiveSCEVType(RHS
->getType()), BitWidth
);
7636 return ExitLimit(getCouldNotCompute(), UpperBound
, false);
7639 return getCouldNotCompute();
7642 /// Return true if we can constant fold an instruction of the specified type,
7643 /// assuming that all operands were constants.
7644 static bool CanConstantFold(const Instruction
*I
) {
7645 if (isa
<BinaryOperator
>(I
) || isa
<CmpInst
>(I
) ||
7646 isa
<SelectInst
>(I
) || isa
<CastInst
>(I
) || isa
<GetElementPtrInst
>(I
) ||
7650 if (const CallInst
*CI
= dyn_cast
<CallInst
>(I
))
7651 if (const Function
*F
= CI
->getCalledFunction())
7652 return canConstantFoldCallTo(CI
, F
);
7656 /// Determine whether this instruction can constant evolve within this loop
7657 /// assuming its operands can all constant evolve.
7658 static bool canConstantEvolve(Instruction
*I
, const Loop
*L
) {
7659 // An instruction outside of the loop can't be derived from a loop PHI.
7660 if (!L
->contains(I
)) return false;
7662 if (isa
<PHINode
>(I
)) {
7663 // We don't currently keep track of the control flow needed to evaluate
7664 // PHIs, so we cannot handle PHIs inside of loops.
7665 return L
->getHeader() == I
->getParent();
7668 // If we won't be able to constant fold this expression even if the operands
7669 // are constants, bail early.
7670 return CanConstantFold(I
);
7673 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
7674 /// recursing through each instruction operand until reaching a loop header phi.
7676 getConstantEvolvingPHIOperands(Instruction
*UseInst
, const Loop
*L
,
7677 DenseMap
<Instruction
*, PHINode
*> &PHIMap
,
7679 if (Depth
> MaxConstantEvolvingDepth
)
7682 // Otherwise, we can evaluate this instruction if all of its operands are
7683 // constant or derived from a PHI node themselves.
7684 PHINode
*PHI
= nullptr;
7685 for (Value
*Op
: UseInst
->operands()) {
7686 if (isa
<Constant
>(Op
)) continue;
7688 Instruction
*OpInst
= dyn_cast
<Instruction
>(Op
);
7689 if (!OpInst
|| !canConstantEvolve(OpInst
, L
)) return nullptr;
7691 PHINode
*P
= dyn_cast
<PHINode
>(OpInst
);
7693 // If this operand is already visited, reuse the prior result.
7694 // We may have P != PHI if this is the deepest point at which the
7695 // inconsistent paths meet.
7696 P
= PHIMap
.lookup(OpInst
);
7698 // Recurse and memoize the results, whether a phi is found or not.
7699 // This recursive call invalidates pointers into PHIMap.
7700 P
= getConstantEvolvingPHIOperands(OpInst
, L
, PHIMap
, Depth
+ 1);
7704 return nullptr; // Not evolving from PHI
7705 if (PHI
&& PHI
!= P
)
7706 return nullptr; // Evolving from multiple different PHIs.
7709 // This is a expression evolving from a constant PHI!
7713 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
7714 /// in the loop that V is derived from. We allow arbitrary operations along the
7715 /// way, but the operands of an operation must either be constants or a value
7716 /// derived from a constant PHI. If this expression does not fit with these
7717 /// constraints, return null.
7718 static PHINode
*getConstantEvolvingPHI(Value
*V
, const Loop
*L
) {
7719 Instruction
*I
= dyn_cast
<Instruction
>(V
);
7720 if (!I
|| !canConstantEvolve(I
, L
)) return nullptr;
7722 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
7725 // Record non-constant instructions contained by the loop.
7726 DenseMap
<Instruction
*, PHINode
*> PHIMap
;
7727 return getConstantEvolvingPHIOperands(I
, L
, PHIMap
, 0);
7730 /// EvaluateExpression - Given an expression that passes the
7731 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
7732 /// in the loop has the value PHIVal. If we can't fold this expression for some
7733 /// reason, return null.
7734 static Constant
*EvaluateExpression(Value
*V
, const Loop
*L
,
7735 DenseMap
<Instruction
*, Constant
*> &Vals
,
7736 const DataLayout
&DL
,
7737 const TargetLibraryInfo
*TLI
) {
7738 // Convenient constant check, but redundant for recursive calls.
7739 if (Constant
*C
= dyn_cast
<Constant
>(V
)) return C
;
7740 Instruction
*I
= dyn_cast
<Instruction
>(V
);
7741 if (!I
) return nullptr;
7743 if (Constant
*C
= Vals
.lookup(I
)) return C
;
7745 // An instruction inside the loop depends on a value outside the loop that we
7746 // weren't given a mapping for, or a value such as a call inside the loop.
7747 if (!canConstantEvolve(I
, L
)) return nullptr;
7749 // An unmapped PHI can be due to a branch or another loop inside this loop,
7750 // or due to this not being the initial iteration through a loop where we
7751 // couldn't compute the evolution of this particular PHI last time.
7752 if (isa
<PHINode
>(I
)) return nullptr;
7754 std::vector
<Constant
*> Operands(I
->getNumOperands());
7756 for (unsigned i
= 0, e
= I
->getNumOperands(); i
!= e
; ++i
) {
7757 Instruction
*Operand
= dyn_cast
<Instruction
>(I
->getOperand(i
));
7759 Operands
[i
] = dyn_cast
<Constant
>(I
->getOperand(i
));
7760 if (!Operands
[i
]) return nullptr;
7763 Constant
*C
= EvaluateExpression(Operand
, L
, Vals
, DL
, TLI
);
7765 if (!C
) return nullptr;
7769 if (CmpInst
*CI
= dyn_cast
<CmpInst
>(I
))
7770 return ConstantFoldCompareInstOperands(CI
->getPredicate(), Operands
[0],
7771 Operands
[1], DL
, TLI
);
7772 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
7773 if (!LI
->isVolatile())
7774 return ConstantFoldLoadFromConstPtr(Operands
[0], LI
->getType(), DL
);
7776 return ConstantFoldInstOperands(I
, Operands
, DL
, TLI
);
7780 // If every incoming value to PN except the one for BB is a specific Constant,
7781 // return that, else return nullptr.
7782 static Constant
*getOtherIncomingValue(PHINode
*PN
, BasicBlock
*BB
) {
7783 Constant
*IncomingVal
= nullptr;
7785 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
7786 if (PN
->getIncomingBlock(i
) == BB
)
7789 auto *CurrentVal
= dyn_cast
<Constant
>(PN
->getIncomingValue(i
));
7793 if (IncomingVal
!= CurrentVal
) {
7796 IncomingVal
= CurrentVal
;
7803 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
7804 /// in the header of its containing loop, we know the loop executes a
7805 /// constant number of times, and the PHI node is just a recurrence
7806 /// involving constants, fold it.
7808 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode
*PN
,
7811 auto I
= ConstantEvolutionLoopExitValue
.find(PN
);
7812 if (I
!= ConstantEvolutionLoopExitValue
.end())
7815 if (BEs
.ugt(MaxBruteForceIterations
))
7816 return ConstantEvolutionLoopExitValue
[PN
] = nullptr; // Not going to evaluate it.
7818 Constant
*&RetVal
= ConstantEvolutionLoopExitValue
[PN
];
7820 DenseMap
<Instruction
*, Constant
*> CurrentIterVals
;
7821 BasicBlock
*Header
= L
->getHeader();
7822 assert(PN
->getParent() == Header
&& "Can't evaluate PHI not in loop header!");
7824 BasicBlock
*Latch
= L
->getLoopLatch();
7828 for (PHINode
&PHI
: Header
->phis()) {
7829 if (auto *StartCST
= getOtherIncomingValue(&PHI
, Latch
))
7830 CurrentIterVals
[&PHI
] = StartCST
;
7832 if (!CurrentIterVals
.count(PN
))
7833 return RetVal
= nullptr;
7835 Value
*BEValue
= PN
->getIncomingValueForBlock(Latch
);
7837 // Execute the loop symbolically to determine the exit value.
7838 assert(BEs
.getActiveBits() < CHAR_BIT
* sizeof(unsigned) &&
7839 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
7841 unsigned NumIterations
= BEs
.getZExtValue(); // must be in range
7842 unsigned IterationNum
= 0;
7843 const DataLayout
&DL
= getDataLayout();
7844 for (; ; ++IterationNum
) {
7845 if (IterationNum
== NumIterations
)
7846 return RetVal
= CurrentIterVals
[PN
]; // Got exit value!
7848 // Compute the value of the PHIs for the next iteration.
7849 // EvaluateExpression adds non-phi values to the CurrentIterVals map.
7850 DenseMap
<Instruction
*, Constant
*> NextIterVals
;
7852 EvaluateExpression(BEValue
, L
, CurrentIterVals
, DL
, &TLI
);
7854 return nullptr; // Couldn't evaluate!
7855 NextIterVals
[PN
] = NextPHI
;
7857 bool StoppedEvolving
= NextPHI
== CurrentIterVals
[PN
];
7859 // Also evaluate the other PHI nodes. However, we don't get to stop if we
7860 // cease to be able to evaluate one of them or if they stop evolving,
7861 // because that doesn't necessarily prevent us from computing PN.
7862 SmallVector
<std::pair
<PHINode
*, Constant
*>, 8> PHIsToCompute
;
7863 for (const auto &I
: CurrentIterVals
) {
7864 PHINode
*PHI
= dyn_cast
<PHINode
>(I
.first
);
7865 if (!PHI
|| PHI
== PN
|| PHI
->getParent() != Header
) continue;
7866 PHIsToCompute
.emplace_back(PHI
, I
.second
);
7868 // We use two distinct loops because EvaluateExpression may invalidate any
7869 // iterators into CurrentIterVals.
7870 for (const auto &I
: PHIsToCompute
) {
7871 PHINode
*PHI
= I
.first
;
7872 Constant
*&NextPHI
= NextIterVals
[PHI
];
7873 if (!NextPHI
) { // Not already computed.
7874 Value
*BEValue
= PHI
->getIncomingValueForBlock(Latch
);
7875 NextPHI
= EvaluateExpression(BEValue
, L
, CurrentIterVals
, DL
, &TLI
);
7877 if (NextPHI
!= I
.second
)
7878 StoppedEvolving
= false;
7881 // If all entries in CurrentIterVals == NextIterVals then we can stop
7882 // iterating, the loop can't continue to change.
7883 if (StoppedEvolving
)
7884 return RetVal
= CurrentIterVals
[PN
];
7886 CurrentIterVals
.swap(NextIterVals
);
7890 const SCEV
*ScalarEvolution::computeExitCountExhaustively(const Loop
*L
,
7893 PHINode
*PN
= getConstantEvolvingPHI(Cond
, L
);
7894 if (!PN
) return getCouldNotCompute();
7896 // If the loop is canonicalized, the PHI will have exactly two entries.
7897 // That's the only form we support here.
7898 if (PN
->getNumIncomingValues() != 2) return getCouldNotCompute();
7900 DenseMap
<Instruction
*, Constant
*> CurrentIterVals
;
7901 BasicBlock
*Header
= L
->getHeader();
7902 assert(PN
->getParent() == Header
&& "Can't evaluate PHI not in loop header!");
7904 BasicBlock
*Latch
= L
->getLoopLatch();
7905 assert(Latch
&& "Should follow from NumIncomingValues == 2!");
7907 for (PHINode
&PHI
: Header
->phis()) {
7908 if (auto *StartCST
= getOtherIncomingValue(&PHI
, Latch
))
7909 CurrentIterVals
[&PHI
] = StartCST
;
7911 if (!CurrentIterVals
.count(PN
))
7912 return getCouldNotCompute();
7914 // Okay, we find a PHI node that defines the trip count of this loop. Execute
7915 // the loop symbolically to determine when the condition gets a value of
7917 unsigned MaxIterations
= MaxBruteForceIterations
; // Limit analysis.
7918 const DataLayout
&DL
= getDataLayout();
7919 for (unsigned IterationNum
= 0; IterationNum
!= MaxIterations
;++IterationNum
){
7920 auto *CondVal
= dyn_cast_or_null
<ConstantInt
>(
7921 EvaluateExpression(Cond
, L
, CurrentIterVals
, DL
, &TLI
));
7923 // Couldn't symbolically evaluate.
7924 if (!CondVal
) return getCouldNotCompute();
7926 if (CondVal
->getValue() == uint64_t(ExitWhen
)) {
7927 ++NumBruteForceTripCountsComputed
;
7928 return getConstant(Type::getInt32Ty(getContext()), IterationNum
);
7931 // Update all the PHI nodes for the next iteration.
7932 DenseMap
<Instruction
*, Constant
*> NextIterVals
;
7934 // Create a list of which PHIs we need to compute. We want to do this before
7935 // calling EvaluateExpression on them because that may invalidate iterators
7936 // into CurrentIterVals.
7937 SmallVector
<PHINode
*, 8> PHIsToCompute
;
7938 for (const auto &I
: CurrentIterVals
) {
7939 PHINode
*PHI
= dyn_cast
<PHINode
>(I
.first
);
7940 if (!PHI
|| PHI
->getParent() != Header
) continue;
7941 PHIsToCompute
.push_back(PHI
);
7943 for (PHINode
*PHI
: PHIsToCompute
) {
7944 Constant
*&NextPHI
= NextIterVals
[PHI
];
7945 if (NextPHI
) continue; // Already computed!
7947 Value
*BEValue
= PHI
->getIncomingValueForBlock(Latch
);
7948 NextPHI
= EvaluateExpression(BEValue
, L
, CurrentIterVals
, DL
, &TLI
);
7950 CurrentIterVals
.swap(NextIterVals
);
7953 // Too many iterations were needed to evaluate.
7954 return getCouldNotCompute();
7957 const SCEV
*ScalarEvolution::getSCEVAtScope(const SCEV
*V
, const Loop
*L
) {
7958 SmallVector
<std::pair
<const Loop
*, const SCEV
*>, 2> &Values
=
7960 // Check to see if we've folded this expression at this loop before.
7961 for (auto &LS
: Values
)
7963 return LS
.second
? LS
.second
: V
;
7965 Values
.emplace_back(L
, nullptr);
7967 // Otherwise compute it.
7968 const SCEV
*C
= computeSCEVAtScope(V
, L
);
7969 for (auto &LS
: reverse(ValuesAtScopes
[V
]))
7970 if (LS
.first
== L
) {
7977 /// This builds up a Constant using the ConstantExpr interface. That way, we
7978 /// will return Constants for objects which aren't represented by a
7979 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
7980 /// Returns NULL if the SCEV isn't representable as a Constant.
7981 static Constant
*BuildConstantFromSCEV(const SCEV
*V
) {
7982 switch (static_cast<SCEVTypes
>(V
->getSCEVType())) {
7983 case scCouldNotCompute
:
7987 return cast
<SCEVConstant
>(V
)->getValue();
7989 return dyn_cast
<Constant
>(cast
<SCEVUnknown
>(V
)->getValue());
7990 case scSignExtend
: {
7991 const SCEVSignExtendExpr
*SS
= cast
<SCEVSignExtendExpr
>(V
);
7992 if (Constant
*CastOp
= BuildConstantFromSCEV(SS
->getOperand()))
7993 return ConstantExpr::getSExt(CastOp
, SS
->getType());
7996 case scZeroExtend
: {
7997 const SCEVZeroExtendExpr
*SZ
= cast
<SCEVZeroExtendExpr
>(V
);
7998 if (Constant
*CastOp
= BuildConstantFromSCEV(SZ
->getOperand()))
7999 return ConstantExpr::getZExt(CastOp
, SZ
->getType());
8003 const SCEVTruncateExpr
*ST
= cast
<SCEVTruncateExpr
>(V
);
8004 if (Constant
*CastOp
= BuildConstantFromSCEV(ST
->getOperand()))
8005 return ConstantExpr::getTrunc(CastOp
, ST
->getType());
8009 const SCEVAddExpr
*SA
= cast
<SCEVAddExpr
>(V
);
8010 if (Constant
*C
= BuildConstantFromSCEV(SA
->getOperand(0))) {
8011 if (PointerType
*PTy
= dyn_cast
<PointerType
>(C
->getType())) {
8012 unsigned AS
= PTy
->getAddressSpace();
8013 Type
*DestPtrTy
= Type::getInt8PtrTy(C
->getContext(), AS
);
8014 C
= ConstantExpr::getBitCast(C
, DestPtrTy
);
8016 for (unsigned i
= 1, e
= SA
->getNumOperands(); i
!= e
; ++i
) {
8017 Constant
*C2
= BuildConstantFromSCEV(SA
->getOperand(i
));
8018 if (!C2
) return nullptr;
8021 if (!C
->getType()->isPointerTy() && C2
->getType()->isPointerTy()) {
8022 unsigned AS
= C2
->getType()->getPointerAddressSpace();
8024 Type
*DestPtrTy
= Type::getInt8PtrTy(C
->getContext(), AS
);
8025 // The offsets have been converted to bytes. We can add bytes to an
8026 // i8* by GEP with the byte count in the first index.
8027 C
= ConstantExpr::getBitCast(C
, DestPtrTy
);
8030 // Don't bother trying to sum two pointers. We probably can't
8031 // statically compute a load that results from it anyway.
8032 if (C2
->getType()->isPointerTy())
8035 if (PointerType
*PTy
= dyn_cast
<PointerType
>(C
->getType())) {
8036 if (PTy
->getElementType()->isStructTy())
8037 C2
= ConstantExpr::getIntegerCast(
8038 C2
, Type::getInt32Ty(C
->getContext()), true);
8039 C
= ConstantExpr::getGetElementPtr(PTy
->getElementType(), C
, C2
);
8041 C
= ConstantExpr::getAdd(C
, C2
);
8048 const SCEVMulExpr
*SM
= cast
<SCEVMulExpr
>(V
);
8049 if (Constant
*C
= BuildConstantFromSCEV(SM
->getOperand(0))) {
8050 // Don't bother with pointers at all.
8051 if (C
->getType()->isPointerTy()) return nullptr;
8052 for (unsigned i
= 1, e
= SM
->getNumOperands(); i
!= e
; ++i
) {
8053 Constant
*C2
= BuildConstantFromSCEV(SM
->getOperand(i
));
8054 if (!C2
|| C2
->getType()->isPointerTy()) return nullptr;
8055 C
= ConstantExpr::getMul(C
, C2
);
8062 const SCEVUDivExpr
*SU
= cast
<SCEVUDivExpr
>(V
);
8063 if (Constant
*LHS
= BuildConstantFromSCEV(SU
->getLHS()))
8064 if (Constant
*RHS
= BuildConstantFromSCEV(SU
->getRHS()))
8065 if (LHS
->getType() == RHS
->getType())
8066 return ConstantExpr::getUDiv(LHS
, RHS
);
8071 break; // TODO: smax, umax.
8076 const SCEV
*ScalarEvolution::computeSCEVAtScope(const SCEV
*V
, const Loop
*L
) {
8077 if (isa
<SCEVConstant
>(V
)) return V
;
8079 // If this instruction is evolved from a constant-evolving PHI, compute the
8080 // exit value from the loop without using SCEVs.
8081 if (const SCEVUnknown
*SU
= dyn_cast
<SCEVUnknown
>(V
)) {
8082 if (Instruction
*I
= dyn_cast
<Instruction
>(SU
->getValue())) {
8083 const Loop
*LI
= this->LI
[I
->getParent()];
8084 if (LI
&& LI
->getParentLoop() == L
) // Looking for loop exit value.
8085 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
8086 if (PN
->getParent() == LI
->getHeader()) {
8087 // Okay, there is no closed form solution for the PHI node. Check
8088 // to see if the loop that contains it has a known backedge-taken
8089 // count. If so, we may be able to force computation of the exit
8091 const SCEV
*BackedgeTakenCount
= getBackedgeTakenCount(LI
);
8092 if (const SCEVConstant
*BTCC
=
8093 dyn_cast
<SCEVConstant
>(BackedgeTakenCount
)) {
8095 // This trivial case can show up in some degenerate cases where
8096 // the incoming IR has not yet been fully simplified.
8097 if (BTCC
->getValue()->isZero()) {
8098 Value
*InitValue
= nullptr;
8099 bool MultipleInitValues
= false;
8100 for (unsigned i
= 0; i
< PN
->getNumIncomingValues(); i
++) {
8101 if (!LI
->contains(PN
->getIncomingBlock(i
))) {
8103 InitValue
= PN
->getIncomingValue(i
);
8104 else if (InitValue
!= PN
->getIncomingValue(i
)) {
8105 MultipleInitValues
= true;
8109 if (!MultipleInitValues
&& InitValue
)
8110 return getSCEV(InitValue
);
8113 // Okay, we know how many times the containing loop executes. If
8114 // this is a constant evolving PHI node, get the final value at
8115 // the specified iteration number.
8117 getConstantEvolutionLoopExitValue(PN
, BTCC
->getAPInt(), LI
);
8118 if (RV
) return getSCEV(RV
);
8122 // Okay, this is an expression that we cannot symbolically evaluate
8123 // into a SCEV. Check to see if it's possible to symbolically evaluate
8124 // the arguments into constants, and if so, try to constant propagate the
8125 // result. This is particularly useful for computing loop exit values.
8126 if (CanConstantFold(I
)) {
8127 SmallVector
<Constant
*, 4> Operands
;
8128 bool MadeImprovement
= false;
8129 for (Value
*Op
: I
->operands()) {
8130 if (Constant
*C
= dyn_cast
<Constant
>(Op
)) {
8131 Operands
.push_back(C
);
8135 // If any of the operands is non-constant and if they are
8136 // non-integer and non-pointer, don't even try to analyze them
8137 // with scev techniques.
8138 if (!isSCEVable(Op
->getType()))
8141 const SCEV
*OrigV
= getSCEV(Op
);
8142 const SCEV
*OpV
= getSCEVAtScope(OrigV
, L
);
8143 MadeImprovement
|= OrigV
!= OpV
;
8145 Constant
*C
= BuildConstantFromSCEV(OpV
);
8147 if (C
->getType() != Op
->getType())
8148 C
= ConstantExpr::getCast(CastInst::getCastOpcode(C
, false,
8152 Operands
.push_back(C
);
8155 // Check to see if getSCEVAtScope actually made an improvement.
8156 if (MadeImprovement
) {
8157 Constant
*C
= nullptr;
8158 const DataLayout
&DL
= getDataLayout();
8159 if (const CmpInst
*CI
= dyn_cast
<CmpInst
>(I
))
8160 C
= ConstantFoldCompareInstOperands(CI
->getPredicate(), Operands
[0],
8161 Operands
[1], DL
, &TLI
);
8162 else if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
8163 if (!LI
->isVolatile())
8164 C
= ConstantFoldLoadFromConstPtr(Operands
[0], LI
->getType(), DL
);
8166 C
= ConstantFoldInstOperands(I
, Operands
, DL
, &TLI
);
8173 // This is some other type of SCEVUnknown, just return it.
8177 if (const SCEVCommutativeExpr
*Comm
= dyn_cast
<SCEVCommutativeExpr
>(V
)) {
8178 // Avoid performing the look-up in the common case where the specified
8179 // expression has no loop-variant portions.
8180 for (unsigned i
= 0, e
= Comm
->getNumOperands(); i
!= e
; ++i
) {
8181 const SCEV
*OpAtScope
= getSCEVAtScope(Comm
->getOperand(i
), L
);
8182 if (OpAtScope
!= Comm
->getOperand(i
)) {
8183 // Okay, at least one of these operands is loop variant but might be
8184 // foldable. Build a new instance of the folded commutative expression.
8185 SmallVector
<const SCEV
*, 8> NewOps(Comm
->op_begin(),
8186 Comm
->op_begin()+i
);
8187 NewOps
.push_back(OpAtScope
);
8189 for (++i
; i
!= e
; ++i
) {
8190 OpAtScope
= getSCEVAtScope(Comm
->getOperand(i
), L
);
8191 NewOps
.push_back(OpAtScope
);
8193 if (isa
<SCEVAddExpr
>(Comm
))
8194 return getAddExpr(NewOps
);
8195 if (isa
<SCEVMulExpr
>(Comm
))
8196 return getMulExpr(NewOps
);
8197 if (isa
<SCEVSMaxExpr
>(Comm
))
8198 return getSMaxExpr(NewOps
);
8199 if (isa
<SCEVUMaxExpr
>(Comm
))
8200 return getUMaxExpr(NewOps
);
8201 llvm_unreachable("Unknown commutative SCEV type!");
8204 // If we got here, all operands are loop invariant.
8208 if (const SCEVUDivExpr
*Div
= dyn_cast
<SCEVUDivExpr
>(V
)) {
8209 const SCEV
*LHS
= getSCEVAtScope(Div
->getLHS(), L
);
8210 const SCEV
*RHS
= getSCEVAtScope(Div
->getRHS(), L
);
8211 if (LHS
== Div
->getLHS() && RHS
== Div
->getRHS())
8212 return Div
; // must be loop invariant
8213 return getUDivExpr(LHS
, RHS
);
8216 // If this is a loop recurrence for a loop that does not contain L, then we
8217 // are dealing with the final value computed by the loop.
8218 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(V
)) {
8219 // First, attempt to evaluate each operand.
8220 // Avoid performing the look-up in the common case where the specified
8221 // expression has no loop-variant portions.
8222 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
) {
8223 const SCEV
*OpAtScope
= getSCEVAtScope(AddRec
->getOperand(i
), L
);
8224 if (OpAtScope
== AddRec
->getOperand(i
))
8227 // Okay, at least one of these operands is loop variant but might be
8228 // foldable. Build a new instance of the folded commutative expression.
8229 SmallVector
<const SCEV
*, 8> NewOps(AddRec
->op_begin(),
8230 AddRec
->op_begin()+i
);
8231 NewOps
.push_back(OpAtScope
);
8232 for (++i
; i
!= e
; ++i
)
8233 NewOps
.push_back(getSCEVAtScope(AddRec
->getOperand(i
), L
));
8235 const SCEV
*FoldedRec
=
8236 getAddRecExpr(NewOps
, AddRec
->getLoop(),
8237 AddRec
->getNoWrapFlags(SCEV::FlagNW
));
8238 AddRec
= dyn_cast
<SCEVAddRecExpr
>(FoldedRec
);
8239 // The addrec may be folded to a nonrecurrence, for example, if the
8240 // induction variable is multiplied by zero after constant folding. Go
8241 // ahead and return the folded value.
8247 // If the scope is outside the addrec's loop, evaluate it by using the
8248 // loop exit value of the addrec.
8249 if (!AddRec
->getLoop()->contains(L
)) {
8250 // To evaluate this recurrence, we need to know how many times the AddRec
8251 // loop iterates. Compute this now.
8252 const SCEV
*BackedgeTakenCount
= getBackedgeTakenCount(AddRec
->getLoop());
8253 if (BackedgeTakenCount
== getCouldNotCompute()) return AddRec
;
8255 // Then, evaluate the AddRec.
8256 return AddRec
->evaluateAtIteration(BackedgeTakenCount
, *this);
8262 if (const SCEVZeroExtendExpr
*Cast
= dyn_cast
<SCEVZeroExtendExpr
>(V
)) {
8263 const SCEV
*Op
= getSCEVAtScope(Cast
->getOperand(), L
);
8264 if (Op
== Cast
->getOperand())
8265 return Cast
; // must be loop invariant
8266 return getZeroExtendExpr(Op
, Cast
->getType());
8269 if (const SCEVSignExtendExpr
*Cast
= dyn_cast
<SCEVSignExtendExpr
>(V
)) {
8270 const SCEV
*Op
= getSCEVAtScope(Cast
->getOperand(), L
);
8271 if (Op
== Cast
->getOperand())
8272 return Cast
; // must be loop invariant
8273 return getSignExtendExpr(Op
, Cast
->getType());
8276 if (const SCEVTruncateExpr
*Cast
= dyn_cast
<SCEVTruncateExpr
>(V
)) {
8277 const SCEV
*Op
= getSCEVAtScope(Cast
->getOperand(), L
);
8278 if (Op
== Cast
->getOperand())
8279 return Cast
; // must be loop invariant
8280 return getTruncateExpr(Op
, Cast
->getType());
8283 llvm_unreachable("Unknown SCEV type!");
8286 const SCEV
*ScalarEvolution::getSCEVAtScope(Value
*V
, const Loop
*L
) {
8287 return getSCEVAtScope(getSCEV(V
), L
);
8290 const SCEV
*ScalarEvolution::stripInjectiveFunctions(const SCEV
*S
) const {
8291 if (const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(S
))
8292 return stripInjectiveFunctions(ZExt
->getOperand());
8293 if (const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(S
))
8294 return stripInjectiveFunctions(SExt
->getOperand());
8298 /// Finds the minimum unsigned root of the following equation:
8300 /// A * X = B (mod N)
8302 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
8303 /// A and B isn't important.
8305 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
8306 static const SCEV
*SolveLinEquationWithOverflow(const APInt
&A
, const SCEV
*B
,
8307 ScalarEvolution
&SE
) {
8308 uint32_t BW
= A
.getBitWidth();
8309 assert(BW
== SE
.getTypeSizeInBits(B
->getType()));
8310 assert(A
!= 0 && "A must be non-zero.");
8314 // The gcd of A and N may have only one prime factor: 2. The number of
8315 // trailing zeros in A is its multiplicity
8316 uint32_t Mult2
= A
.countTrailingZeros();
8319 // 2. Check if B is divisible by D.
8321 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
8322 // is not less than multiplicity of this prime factor for D.
8323 if (SE
.GetMinTrailingZeros(B
) < Mult2
)
8324 return SE
.getCouldNotCompute();
8326 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
8329 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent
8330 // (N / D) in general. The inverse itself always fits into BW bits, though,
8331 // so we immediately truncate it.
8332 APInt AD
= A
.lshr(Mult2
).zext(BW
+ 1); // AD = A / D
8333 APInt
Mod(BW
+ 1, 0);
8334 Mod
.setBit(BW
- Mult2
); // Mod = N / D
8335 APInt I
= AD
.multiplicativeInverse(Mod
).trunc(BW
);
8337 // 4. Compute the minimum unsigned root of the equation:
8338 // I * (B / D) mod (N / D)
8339 // To simplify the computation, we factor out the divide by D:
8340 // (I * B mod N) / D
8341 const SCEV
*D
= SE
.getConstant(APInt::getOneBitSet(BW
, Mult2
));
8342 return SE
.getUDivExactExpr(SE
.getMulExpr(B
, SE
.getConstant(I
)), D
);
8345 /// For a given quadratic addrec, generate coefficients of the corresponding
8346 /// quadratic equation, multiplied by a common value to ensure that they are
8348 /// The returned value is a tuple { A, B, C, M, BitWidth }, where
8349 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C
8350 /// were multiplied by, and BitWidth is the bit width of the original addrec
8352 /// This function returns None if the addrec coefficients are not compile-
8354 static Optional
<std::tuple
<APInt
, APInt
, APInt
, APInt
, unsigned>>
8355 GetQuadraticEquation(const SCEVAddRecExpr
*AddRec
) {
8356 assert(AddRec
->getNumOperands() == 3 && "This is not a quadratic chrec!");
8357 const SCEVConstant
*LC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(0));
8358 const SCEVConstant
*MC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(1));
8359 const SCEVConstant
*NC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(2));
8360 LLVM_DEBUG(dbgs() << __func__
<< ": analyzing quadratic addrec: "
8361 << *AddRec
<< '\n');
8363 // We currently can only solve this if the coefficients are constants.
8364 if (!LC
|| !MC
|| !NC
) {
8365 LLVM_DEBUG(dbgs() << __func__
<< ": coefficients are not constant\n");
8369 APInt L
= LC
->getAPInt();
8370 APInt M
= MC
->getAPInt();
8371 APInt N
= NC
->getAPInt();
8372 assert(!N
.isNullValue() && "This is not a quadratic addrec");
8374 unsigned BitWidth
= LC
->getAPInt().getBitWidth();
8375 unsigned NewWidth
= BitWidth
+ 1;
8376 LLVM_DEBUG(dbgs() << __func__
<< ": addrec coeff bw: "
8377 << BitWidth
<< '\n');
8378 // The sign-extension (as opposed to a zero-extension) here matches the
8379 // extension used in SolveQuadraticEquationWrap (with the same motivation).
8380 N
= N
.sext(NewWidth
);
8381 M
= M
.sext(NewWidth
);
8382 L
= L
.sext(NewWidth
);
8384 // The increments are M, M+N, M+2N, ..., so the accumulated values are
8385 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is,
8386 // L+M, L+2M+N, L+3M+3N, ...
8387 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N.
8389 // The equation Acc = 0 is then
8390 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0.
8391 // In a quadratic form it becomes:
8392 // N n^2 + (2M-N) n + 2L = 0.
8395 APInt B
= 2 * M
- A
;
8397 APInt T
= APInt(NewWidth
, 2);
8398 LLVM_DEBUG(dbgs() << __func__
<< ": equation " << A
<< "x^2 + " << B
8399 << "x + " << C
<< ", coeff bw: " << NewWidth
8400 << ", multiplied by " << T
<< '\n');
8401 return std::make_tuple(A
, B
, C
, T
, BitWidth
);
8404 /// Helper function to compare optional APInts:
8405 /// (a) if X and Y both exist, return min(X, Y),
8406 /// (b) if neither X nor Y exist, return None,
8407 /// (c) if exactly one of X and Y exists, return that value.
8408 static Optional
<APInt
> MinOptional(Optional
<APInt
> X
, Optional
<APInt
> Y
) {
8409 if (X
.hasValue() && Y
.hasValue()) {
8410 unsigned W
= std::max(X
->getBitWidth(), Y
->getBitWidth());
8411 APInt XW
= X
->sextOrSelf(W
);
8412 APInt YW
= Y
->sextOrSelf(W
);
8413 return XW
.slt(YW
) ? *X
: *Y
;
8415 if (!X
.hasValue() && !Y
.hasValue())
8417 return X
.hasValue() ? *X
: *Y
;
8420 /// Helper function to truncate an optional APInt to a given BitWidth.
8421 /// When solving addrec-related equations, it is preferable to return a value
8422 /// that has the same bit width as the original addrec's coefficients. If the
8423 /// solution fits in the original bit width, truncate it (except for i1).
8424 /// Returning a value of a different bit width may inhibit some optimizations.
8426 /// In general, a solution to a quadratic equation generated from an addrec
8427 /// may require BW+1 bits, where BW is the bit width of the addrec's
8428 /// coefficients. The reason is that the coefficients of the quadratic
8429 /// equation are BW+1 bits wide (to avoid truncation when converting from
8430 /// the addrec to the equation).
8431 static Optional
<APInt
> TruncIfPossible(Optional
<APInt
> X
, unsigned BitWidth
) {
8434 unsigned W
= X
->getBitWidth();
8435 if (BitWidth
> 1 && BitWidth
< W
&& X
->isIntN(BitWidth
))
8436 return X
->trunc(BitWidth
);
8440 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n
8441 /// iterations. The values L, M, N are assumed to be signed, and they
8442 /// should all have the same bit widths.
8443 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW,
8444 /// where BW is the bit width of the addrec's coefficients.
8445 /// If the calculated value is a BW-bit integer (for BW > 1), it will be
8446 /// returned as such, otherwise the bit width of the returned value may
8447 /// be greater than BW.
8449 /// This function returns None if
8450 /// (a) the addrec coefficients are not constant, or
8451 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases
8452 /// like x^2 = 5, no integer solutions exist, in other cases an integer
8453 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it.
8454 static Optional
<APInt
>
8455 SolveQuadraticAddRecExact(const SCEVAddRecExpr
*AddRec
, ScalarEvolution
&SE
) {
8458 auto T
= GetQuadraticEquation(AddRec
);
8462 std::tie(A
, B
, C
, M
, BitWidth
) = *T
;
8463 LLVM_DEBUG(dbgs() << __func__
<< ": solving for unsigned overflow\n");
8464 Optional
<APInt
> X
= APIntOps::SolveQuadraticEquationWrap(A
, B
, C
, BitWidth
+1);
8468 ConstantInt
*CX
= ConstantInt::get(SE
.getContext(), *X
);
8469 ConstantInt
*V
= EvaluateConstantChrecAtConstant(AddRec
, CX
, SE
);
8473 return TruncIfPossible(X
, BitWidth
);
8476 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n
8477 /// iterations. The values M, N are assumed to be signed, and they
8478 /// should all have the same bit widths.
8479 /// Find the least n such that c(n) does not belong to the given range,
8480 /// while c(n-1) does.
8482 /// This function returns None if
8483 /// (a) the addrec coefficients are not constant, or
8484 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the
8485 /// bounds of the range.
8486 static Optional
<APInt
>
8487 SolveQuadraticAddRecRange(const SCEVAddRecExpr
*AddRec
,
8488 const ConstantRange
&Range
, ScalarEvolution
&SE
) {
8489 assert(AddRec
->getOperand(0)->isZero() &&
8490 "Starting value of addrec should be 0");
8491 LLVM_DEBUG(dbgs() << __func__
<< ": solving boundary crossing for range "
8492 << Range
<< ", addrec " << *AddRec
<< '\n');
8493 // This case is handled in getNumIterationsInRange. Here we can assume that
8494 // we start in the range.
8495 assert(Range
.contains(APInt(SE
.getTypeSizeInBits(AddRec
->getType()), 0)) &&
8496 "Addrec's initial value should be in range");
8500 auto T
= GetQuadraticEquation(AddRec
);
8504 // Be careful about the return value: there can be two reasons for not
8505 // returning an actual number. First, if no solutions to the equations
8506 // were found, and second, if the solutions don't leave the given range.
8507 // The first case means that the actual solution is "unknown", the second
8508 // means that it's known, but not valid. If the solution is unknown, we
8509 // cannot make any conclusions.
8510 // Return a pair: the optional solution and a flag indicating if the
8511 // solution was found.
8512 auto SolveForBoundary
= [&](APInt Bound
) -> std::pair
<Optional
<APInt
>,bool> {
8513 // Solve for signed overflow and unsigned overflow, pick the lower
8515 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary "
8516 << Bound
<< " (before multiplying by " << M
<< ")\n");
8517 Bound
*= M
; // The quadratic equation multiplier.
8519 Optional
<APInt
> SO
= None
;
8521 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
8522 "signed overflow\n");
8523 SO
= APIntOps::SolveQuadraticEquationWrap(A
, B
, -Bound
, BitWidth
);
8525 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
8526 "unsigned overflow\n");
8527 Optional
<APInt
> UO
= APIntOps::SolveQuadraticEquationWrap(A
, B
, -Bound
,
8530 auto LeavesRange
= [&] (const APInt
&X
) {
8531 ConstantInt
*C0
= ConstantInt::get(SE
.getContext(), X
);
8532 ConstantInt
*V0
= EvaluateConstantChrecAtConstant(AddRec
, C0
, SE
);
8533 if (Range
.contains(V0
->getValue()))
8535 // X should be at least 1, so X-1 is non-negative.
8536 ConstantInt
*C1
= ConstantInt::get(SE
.getContext(), X
-1);
8537 ConstantInt
*V1
= EvaluateConstantChrecAtConstant(AddRec
, C1
, SE
);
8538 if (Range
.contains(V1
->getValue()))
8543 // If SolveQuadraticEquationWrap returns None, it means that there can
8544 // be a solution, but the function failed to find it. We cannot treat it
8545 // as "no solution".
8546 if (!SO
.hasValue() || !UO
.hasValue())
8547 return { None
, false };
8549 // Check the smaller value first to see if it leaves the range.
8550 // At this point, both SO and UO must have values.
8551 Optional
<APInt
> Min
= MinOptional(SO
, UO
);
8552 if (LeavesRange(*Min
))
8553 return { Min
, true };
8554 Optional
<APInt
> Max
= Min
== SO
? UO
: SO
;
8555 if (LeavesRange(*Max
))
8556 return { Max
, true };
8558 // Solutions were found, but were eliminated, hence the "true".
8559 return { None
, true };
8562 std::tie(A
, B
, C
, M
, BitWidth
) = *T
;
8563 // Lower bound is inclusive, subtract 1 to represent the exiting value.
8564 APInt Lower
= Range
.getLower().sextOrSelf(A
.getBitWidth()) - 1;
8565 APInt Upper
= Range
.getUpper().sextOrSelf(A
.getBitWidth());
8566 auto SL
= SolveForBoundary(Lower
);
8567 auto SU
= SolveForBoundary(Upper
);
8568 // If any of the solutions was unknown, no meaninigful conclusions can
8570 if (!SL
.second
|| !SU
.second
)
8573 // Claim: The correct solution is not some value between Min and Max.
8575 // Justification: Assuming that Min and Max are different values, one of
8576 // them is when the first signed overflow happens, the other is when the
8577 // first unsigned overflow happens. Crossing the range boundary is only
8578 // possible via an overflow (treating 0 as a special case of it, modeling
8579 // an overflow as crossing k*2^W for some k).
8581 // The interesting case here is when Min was eliminated as an invalid
8582 // solution, but Max was not. The argument is that if there was another
8583 // overflow between Min and Max, it would also have been eliminated if
8584 // it was considered.
8586 // For a given boundary, it is possible to have two overflows of the same
8587 // type (signed/unsigned) without having the other type in between: this
8588 // can happen when the vertex of the parabola is between the iterations
8589 // corresponding to the overflows. This is only possible when the two
8590 // overflows cross k*2^W for the same k. In such case, if the second one
8591 // left the range (and was the first one to do so), the first overflow
8592 // would have to enter the range, which would mean that either we had left
8593 // the range before or that we started outside of it. Both of these cases
8594 // are contradictions.
8596 // Claim: In the case where SolveForBoundary returns None, the correct
8597 // solution is not some value between the Max for this boundary and the
8598 // Min of the other boundary.
8600 // Justification: Assume that we had such Max_A and Min_B corresponding
8601 // to range boundaries A and B and such that Max_A < Min_B. If there was
8602 // a solution between Max_A and Min_B, it would have to be caused by an
8603 // overflow corresponding to either A or B. It cannot correspond to B,
8604 // since Min_B is the first occurrence of such an overflow. If it
8605 // corresponded to A, it would have to be either a signed or an unsigned
8606 // overflow that is larger than both eliminated overflows for A. But
8607 // between the eliminated overflows and this overflow, the values would
8608 // cover the entire value space, thus crossing the other boundary, which
8609 // is a contradiction.
8611 return TruncIfPossible(MinOptional(SL
.first
, SU
.first
), BitWidth
);
8614 ScalarEvolution::ExitLimit
8615 ScalarEvolution::howFarToZero(const SCEV
*V
, const Loop
*L
, bool ControlsExit
,
8616 bool AllowPredicates
) {
8618 // This is only used for loops with a "x != y" exit test. The exit condition
8619 // is now expressed as a single expression, V = x-y. So the exit test is
8620 // effectively V != 0. We know and take advantage of the fact that this
8621 // expression only being used in a comparison by zero context.
8623 SmallPtrSet
<const SCEVPredicate
*, 4> Predicates
;
8624 // If the value is a constant
8625 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(V
)) {
8626 // If the value is already zero, the branch will execute zero times.
8627 if (C
->getValue()->isZero()) return C
;
8628 return getCouldNotCompute(); // Otherwise it will loop infinitely.
8631 const SCEVAddRecExpr
*AddRec
=
8632 dyn_cast
<SCEVAddRecExpr
>(stripInjectiveFunctions(V
));
8634 if (!AddRec
&& AllowPredicates
)
8635 // Try to make this an AddRec using runtime tests, in the first X
8636 // iterations of this loop, where X is the SCEV expression found by the
8638 AddRec
= convertSCEVToAddRecWithPredicates(V
, L
, Predicates
);
8640 if (!AddRec
|| AddRec
->getLoop() != L
)
8641 return getCouldNotCompute();
8643 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
8644 // the quadratic equation to solve it.
8645 if (AddRec
->isQuadratic() && AddRec
->getType()->isIntegerTy()) {
8646 // We can only use this value if the chrec ends up with an exact zero
8647 // value at this index. When solving for "X*X != 5", for example, we
8648 // should not accept a root of 2.
8649 if (auto S
= SolveQuadraticAddRecExact(AddRec
, *this)) {
8650 const auto *R
= cast
<SCEVConstant
>(getConstant(S
.getValue()));
8651 return ExitLimit(R
, R
, false, Predicates
);
8653 return getCouldNotCompute();
8656 // Otherwise we can only handle this if it is affine.
8657 if (!AddRec
->isAffine())
8658 return getCouldNotCompute();
8660 // If this is an affine expression, the execution count of this branch is
8661 // the minimum unsigned root of the following equation:
8663 // Start + Step*N = 0 (mod 2^BW)
8667 // Step*N = -Start (mod 2^BW)
8669 // where BW is the common bit width of Start and Step.
8671 // Get the initial value for the loop.
8672 const SCEV
*Start
= getSCEVAtScope(AddRec
->getStart(), L
->getParentLoop());
8673 const SCEV
*Step
= getSCEVAtScope(AddRec
->getOperand(1), L
->getParentLoop());
8675 // For now we handle only constant steps.
8677 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
8678 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
8679 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
8680 // We have not yet seen any such cases.
8681 const SCEVConstant
*StepC
= dyn_cast
<SCEVConstant
>(Step
);
8682 if (!StepC
|| StepC
->getValue()->isZero())
8683 return getCouldNotCompute();
8685 // For positive steps (counting up until unsigned overflow):
8686 // N = -Start/Step (as unsigned)
8687 // For negative steps (counting down to zero):
8689 // First compute the unsigned distance from zero in the direction of Step.
8690 bool CountDown
= StepC
->getAPInt().isNegative();
8691 const SCEV
*Distance
= CountDown
? Start
: getNegativeSCEV(Start
);
8693 // Handle unitary steps, which cannot wraparound.
8694 // 1*N = -Start; -1*N = Start (mod 2^BW), so:
8695 // N = Distance (as unsigned)
8696 if (StepC
->getValue()->isOne() || StepC
->getValue()->isMinusOne()) {
8697 APInt MaxBECount
= getUnsignedRangeMax(Distance
);
8699 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
8700 // we end up with a loop whose backedge-taken count is n - 1. Detect this
8701 // case, and see if we can improve the bound.
8703 // Explicitly handling this here is necessary because getUnsignedRange
8704 // isn't context-sensitive; it doesn't know that we only care about the
8705 // range inside the loop.
8706 const SCEV
*Zero
= getZero(Distance
->getType());
8707 const SCEV
*One
= getOne(Distance
->getType());
8708 const SCEV
*DistancePlusOne
= getAddExpr(Distance
, One
);
8709 if (isLoopEntryGuardedByCond(L
, ICmpInst::ICMP_NE
, DistancePlusOne
, Zero
)) {
8710 // If Distance + 1 doesn't overflow, we can compute the maximum distance
8711 // as "unsigned_max(Distance + 1) - 1".
8712 ConstantRange CR
= getUnsignedRange(DistancePlusOne
);
8713 MaxBECount
= APIntOps::umin(MaxBECount
, CR
.getUnsignedMax() - 1);
8715 return ExitLimit(Distance
, getConstant(MaxBECount
), false, Predicates
);
8718 // If the condition controls loop exit (the loop exits only if the expression
8719 // is true) and the addition is no-wrap we can use unsigned divide to
8720 // compute the backedge count. In this case, the step may not divide the
8721 // distance, but we don't care because if the condition is "missed" the loop
8722 // will have undefined behavior due to wrapping.
8723 if (ControlsExit
&& AddRec
->hasNoSelfWrap() &&
8724 loopHasNoAbnormalExits(AddRec
->getLoop())) {
8726 getUDivExpr(Distance
, CountDown
? getNegativeSCEV(Step
) : Step
);
8728 Exact
== getCouldNotCompute()
8730 : getConstant(getUnsignedRangeMax(Exact
));
8731 return ExitLimit(Exact
, Max
, false, Predicates
);
8734 // Solve the general equation.
8735 const SCEV
*E
= SolveLinEquationWithOverflow(StepC
->getAPInt(),
8736 getNegativeSCEV(Start
), *this);
8737 const SCEV
*M
= E
== getCouldNotCompute()
8739 : getConstant(getUnsignedRangeMax(E
));
8740 return ExitLimit(E
, M
, false, Predicates
);
8743 ScalarEvolution::ExitLimit
8744 ScalarEvolution::howFarToNonZero(const SCEV
*V
, const Loop
*L
) {
8745 // Loops that look like: while (X == 0) are very strange indeed. We don't
8746 // handle them yet except for the trivial case. This could be expanded in the
8747 // future as needed.
8749 // If the value is a constant, check to see if it is known to be non-zero
8750 // already. If so, the backedge will execute zero times.
8751 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(V
)) {
8752 if (!C
->getValue()->isZero())
8753 return getZero(C
->getType());
8754 return getCouldNotCompute(); // Otherwise it will loop infinitely.
8757 // We could implement others, but I really doubt anyone writes loops like
8758 // this, and if they did, they would already be constant folded.
8759 return getCouldNotCompute();
8762 std::pair
<BasicBlock
*, BasicBlock
*>
8763 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock
*BB
) {
8764 // If the block has a unique predecessor, then there is no path from the
8765 // predecessor to the block that does not go through the direct edge
8766 // from the predecessor to the block.
8767 if (BasicBlock
*Pred
= BB
->getSinglePredecessor())
8770 // A loop's header is defined to be a block that dominates the loop.
8771 // If the header has a unique predecessor outside the loop, it must be
8772 // a block that has exactly one successor that can reach the loop.
8773 if (Loop
*L
= LI
.getLoopFor(BB
))
8774 return {L
->getLoopPredecessor(), L
->getHeader()};
8776 return {nullptr, nullptr};
8779 /// SCEV structural equivalence is usually sufficient for testing whether two
8780 /// expressions are equal, however for the purposes of looking for a condition
8781 /// guarding a loop, it can be useful to be a little more general, since a
8782 /// front-end may have replicated the controlling expression.
8783 static bool HasSameValue(const SCEV
*A
, const SCEV
*B
) {
8784 // Quick check to see if they are the same SCEV.
8785 if (A
== B
) return true;
8787 auto ComputesEqualValues
= [](const Instruction
*A
, const Instruction
*B
) {
8788 // Not all instructions that are "identical" compute the same value. For
8789 // instance, two distinct alloca instructions allocating the same type are
8790 // identical and do not read memory; but compute distinct values.
8791 return A
->isIdenticalTo(B
) && (isa
<BinaryOperator
>(A
) || isa
<GetElementPtrInst
>(A
));
8794 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
8795 // two different instructions with the same value. Check for this case.
8796 if (const SCEVUnknown
*AU
= dyn_cast
<SCEVUnknown
>(A
))
8797 if (const SCEVUnknown
*BU
= dyn_cast
<SCEVUnknown
>(B
))
8798 if (const Instruction
*AI
= dyn_cast
<Instruction
>(AU
->getValue()))
8799 if (const Instruction
*BI
= dyn_cast
<Instruction
>(BU
->getValue()))
8800 if (ComputesEqualValues(AI
, BI
))
8803 // Otherwise assume they may have a different value.
8807 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate
&Pred
,
8808 const SCEV
*&LHS
, const SCEV
*&RHS
,
8810 bool Changed
= false;
8812 // If we hit the max recursion limit bail out.
8816 // Canonicalize a constant to the right side.
8817 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(LHS
)) {
8818 // Check for both operands constant.
8819 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
8820 if (ConstantExpr::getICmp(Pred
,
8822 RHSC
->getValue())->isNullValue())
8823 goto trivially_false
;
8825 goto trivially_true
;
8827 // Otherwise swap the operands to put the constant on the right.
8828 std::swap(LHS
, RHS
);
8829 Pred
= ICmpInst::getSwappedPredicate(Pred
);
8833 // If we're comparing an addrec with a value which is loop-invariant in the
8834 // addrec's loop, put the addrec on the left. Also make a dominance check,
8835 // as both operands could be addrecs loop-invariant in each other's loop.
8836 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(RHS
)) {
8837 const Loop
*L
= AR
->getLoop();
8838 if (isLoopInvariant(LHS
, L
) && properlyDominates(LHS
, L
->getHeader())) {
8839 std::swap(LHS
, RHS
);
8840 Pred
= ICmpInst::getSwappedPredicate(Pred
);
8845 // If there's a constant operand, canonicalize comparisons with boundary
8846 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
8847 if (const SCEVConstant
*RC
= dyn_cast
<SCEVConstant
>(RHS
)) {
8848 const APInt
&RA
= RC
->getAPInt();
8850 bool SimplifiedByConstantRange
= false;
8852 if (!ICmpInst::isEquality(Pred
)) {
8853 ConstantRange ExactCR
= ConstantRange::makeExactICmpRegion(Pred
, RA
);
8854 if (ExactCR
.isFullSet())
8855 goto trivially_true
;
8856 else if (ExactCR
.isEmptySet())
8857 goto trivially_false
;
8860 CmpInst::Predicate NewPred
;
8861 if (ExactCR
.getEquivalentICmp(NewPred
, NewRHS
) &&
8862 ICmpInst::isEquality(NewPred
)) {
8863 // We were able to convert an inequality to an equality.
8865 RHS
= getConstant(NewRHS
);
8866 Changed
= SimplifiedByConstantRange
= true;
8870 if (!SimplifiedByConstantRange
) {
8874 case ICmpInst::ICMP_EQ
:
8875 case ICmpInst::ICMP_NE
:
8876 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
8878 if (const SCEVAddExpr
*AE
= dyn_cast
<SCEVAddExpr
>(LHS
))
8879 if (const SCEVMulExpr
*ME
=
8880 dyn_cast
<SCEVMulExpr
>(AE
->getOperand(0)))
8881 if (AE
->getNumOperands() == 2 && ME
->getNumOperands() == 2 &&
8882 ME
->getOperand(0)->isAllOnesValue()) {
8883 RHS
= AE
->getOperand(1);
8884 LHS
= ME
->getOperand(1);
8890 // The "Should have been caught earlier!" messages refer to the fact
8891 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above
8892 // should have fired on the corresponding cases, and canonicalized the
8893 // check to trivially_true or trivially_false.
8895 case ICmpInst::ICMP_UGE
:
8896 assert(!RA
.isMinValue() && "Should have been caught earlier!");
8897 Pred
= ICmpInst::ICMP_UGT
;
8898 RHS
= getConstant(RA
- 1);
8901 case ICmpInst::ICMP_ULE
:
8902 assert(!RA
.isMaxValue() && "Should have been caught earlier!");
8903 Pred
= ICmpInst::ICMP_ULT
;
8904 RHS
= getConstant(RA
+ 1);
8907 case ICmpInst::ICMP_SGE
:
8908 assert(!RA
.isMinSignedValue() && "Should have been caught earlier!");
8909 Pred
= ICmpInst::ICMP_SGT
;
8910 RHS
= getConstant(RA
- 1);
8913 case ICmpInst::ICMP_SLE
:
8914 assert(!RA
.isMaxSignedValue() && "Should have been caught earlier!");
8915 Pred
= ICmpInst::ICMP_SLT
;
8916 RHS
= getConstant(RA
+ 1);
8923 // Check for obvious equality.
8924 if (HasSameValue(LHS
, RHS
)) {
8925 if (ICmpInst::isTrueWhenEqual(Pred
))
8926 goto trivially_true
;
8927 if (ICmpInst::isFalseWhenEqual(Pred
))
8928 goto trivially_false
;
8931 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
8932 // adding or subtracting 1 from one of the operands.
8934 case ICmpInst::ICMP_SLE
:
8935 if (!getSignedRangeMax(RHS
).isMaxSignedValue()) {
8936 RHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), RHS
,
8938 Pred
= ICmpInst::ICMP_SLT
;
8940 } else if (!getSignedRangeMin(LHS
).isMinSignedValue()) {
8941 LHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), LHS
,
8943 Pred
= ICmpInst::ICMP_SLT
;
8947 case ICmpInst::ICMP_SGE
:
8948 if (!getSignedRangeMin(RHS
).isMinSignedValue()) {
8949 RHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), RHS
,
8951 Pred
= ICmpInst::ICMP_SGT
;
8953 } else if (!getSignedRangeMax(LHS
).isMaxSignedValue()) {
8954 LHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), LHS
,
8956 Pred
= ICmpInst::ICMP_SGT
;
8960 case ICmpInst::ICMP_ULE
:
8961 if (!getUnsignedRangeMax(RHS
).isMaxValue()) {
8962 RHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), RHS
,
8964 Pred
= ICmpInst::ICMP_ULT
;
8966 } else if (!getUnsignedRangeMin(LHS
).isMinValue()) {
8967 LHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), LHS
);
8968 Pred
= ICmpInst::ICMP_ULT
;
8972 case ICmpInst::ICMP_UGE
:
8973 if (!getUnsignedRangeMin(RHS
).isMinValue()) {
8974 RHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), RHS
);
8975 Pred
= ICmpInst::ICMP_UGT
;
8977 } else if (!getUnsignedRangeMax(LHS
).isMaxValue()) {
8978 LHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), LHS
,
8980 Pred
= ICmpInst::ICMP_UGT
;
8988 // TODO: More simplifications are possible here.
8990 // Recursively simplify until we either hit a recursion limit or nothing
8993 return SimplifyICmpOperands(Pred
, LHS
, RHS
, Depth
+1);
8999 LHS
= RHS
= getConstant(ConstantInt::getFalse(getContext()));
9000 Pred
= ICmpInst::ICMP_EQ
;
9005 LHS
= RHS
= getConstant(ConstantInt::getFalse(getContext()));
9006 Pred
= ICmpInst::ICMP_NE
;
9010 bool ScalarEvolution::isKnownNegative(const SCEV
*S
) {
9011 return getSignedRangeMax(S
).isNegative();
9014 bool ScalarEvolution::isKnownPositive(const SCEV
*S
) {
9015 return getSignedRangeMin(S
).isStrictlyPositive();
9018 bool ScalarEvolution::isKnownNonNegative(const SCEV
*S
) {
9019 return !getSignedRangeMin(S
).isNegative();
9022 bool ScalarEvolution::isKnownNonPositive(const SCEV
*S
) {
9023 return !getSignedRangeMax(S
).isStrictlyPositive();
9026 bool ScalarEvolution::isKnownNonZero(const SCEV
*S
) {
9027 return isKnownNegative(S
) || isKnownPositive(S
);
9030 std::pair
<const SCEV
*, const SCEV
*>
9031 ScalarEvolution::SplitIntoInitAndPostInc(const Loop
*L
, const SCEV
*S
) {
9032 // Compute SCEV on entry of loop L.
9033 const SCEV
*Start
= SCEVInitRewriter::rewrite(S
, L
, *this);
9034 if (Start
== getCouldNotCompute())
9035 return { Start
, Start
};
9036 // Compute post increment SCEV for loop L.
9037 const SCEV
*PostInc
= SCEVPostIncRewriter::rewrite(S
, L
, *this);
9038 assert(PostInc
!= getCouldNotCompute() && "Unexpected could not compute");
9039 return { Start
, PostInc
};
9042 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred
,
9043 const SCEV
*LHS
, const SCEV
*RHS
) {
9044 // First collect all loops.
9045 SmallPtrSet
<const Loop
*, 8> LoopsUsed
;
9046 getUsedLoops(LHS
, LoopsUsed
);
9047 getUsedLoops(RHS
, LoopsUsed
);
9049 if (LoopsUsed
.empty())
9052 // Domination relationship must be a linear order on collected loops.
9054 for (auto *L1
: LoopsUsed
)
9055 for (auto *L2
: LoopsUsed
)
9056 assert((DT
.dominates(L1
->getHeader(), L2
->getHeader()) ||
9057 DT
.dominates(L2
->getHeader(), L1
->getHeader())) &&
9058 "Domination relationship is not a linear order");
9062 *std::max_element(LoopsUsed
.begin(), LoopsUsed
.end(),
9063 [&](const Loop
*L1
, const Loop
*L2
) {
9064 return DT
.properlyDominates(L1
->getHeader(), L2
->getHeader());
9067 // Get init and post increment value for LHS.
9068 auto SplitLHS
= SplitIntoInitAndPostInc(MDL
, LHS
);
9069 // if LHS contains unknown non-invariant SCEV then bail out.
9070 if (SplitLHS
.first
== getCouldNotCompute())
9072 assert (SplitLHS
.second
!= getCouldNotCompute() && "Unexpected CNC");
9073 // Get init and post increment value for RHS.
9074 auto SplitRHS
= SplitIntoInitAndPostInc(MDL
, RHS
);
9075 // if RHS contains unknown non-invariant SCEV then bail out.
9076 if (SplitRHS
.first
== getCouldNotCompute())
9078 assert (SplitRHS
.second
!= getCouldNotCompute() && "Unexpected CNC");
9079 // It is possible that init SCEV contains an invariant load but it does
9080 // not dominate MDL and is not available at MDL loop entry, so we should
9082 if (!isAvailableAtLoopEntry(SplitLHS
.first
, MDL
) ||
9083 !isAvailableAtLoopEntry(SplitRHS
.first
, MDL
))
9086 return isLoopEntryGuardedByCond(MDL
, Pred
, SplitLHS
.first
, SplitRHS
.first
) &&
9087 isLoopBackedgeGuardedByCond(MDL
, Pred
, SplitLHS
.second
,
9091 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred
,
9092 const SCEV
*LHS
, const SCEV
*RHS
) {
9093 // Canonicalize the inputs first.
9094 (void)SimplifyICmpOperands(Pred
, LHS
, RHS
);
9096 if (isKnownViaInduction(Pred
, LHS
, RHS
))
9099 if (isKnownPredicateViaSplitting(Pred
, LHS
, RHS
))
9102 // Otherwise see what can be done with some simple reasoning.
9103 return isKnownViaNonRecursiveReasoning(Pred
, LHS
, RHS
);
9106 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred
,
9107 const SCEVAddRecExpr
*LHS
,
9109 const Loop
*L
= LHS
->getLoop();
9110 return isLoopEntryGuardedByCond(L
, Pred
, LHS
->getStart(), RHS
) &&
9111 isLoopBackedgeGuardedByCond(L
, Pred
, LHS
->getPostIncExpr(*this), RHS
);
9114 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr
*LHS
,
9115 ICmpInst::Predicate Pred
,
9117 bool Result
= isMonotonicPredicateImpl(LHS
, Pred
, Increasing
);
9120 // Verify an invariant: inverting the predicate should turn a monotonically
9121 // increasing change to a monotonically decreasing one, and vice versa.
9122 bool IncreasingSwapped
;
9123 bool ResultSwapped
= isMonotonicPredicateImpl(
9124 LHS
, ICmpInst::getSwappedPredicate(Pred
), IncreasingSwapped
);
9126 assert(Result
== ResultSwapped
&& "should be able to analyze both!");
9128 assert(Increasing
== !IncreasingSwapped
&&
9129 "monotonicity should flip as we flip the predicate");
9135 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr
*LHS
,
9136 ICmpInst::Predicate Pred
,
9139 // A zero step value for LHS means the induction variable is essentially a
9140 // loop invariant value. We don't really depend on the predicate actually
9141 // flipping from false to true (for increasing predicates, and the other way
9142 // around for decreasing predicates), all we care about is that *if* the
9143 // predicate changes then it only changes from false to true.
9145 // A zero step value in itself is not very useful, but there may be places
9146 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be
9147 // as general as possible.
9151 return false; // Conservative answer
9153 case ICmpInst::ICMP_UGT
:
9154 case ICmpInst::ICMP_UGE
:
9155 case ICmpInst::ICMP_ULT
:
9156 case ICmpInst::ICMP_ULE
:
9157 if (!LHS
->hasNoUnsignedWrap())
9160 Increasing
= Pred
== ICmpInst::ICMP_UGT
|| Pred
== ICmpInst::ICMP_UGE
;
9163 case ICmpInst::ICMP_SGT
:
9164 case ICmpInst::ICMP_SGE
:
9165 case ICmpInst::ICMP_SLT
:
9166 case ICmpInst::ICMP_SLE
: {
9167 if (!LHS
->hasNoSignedWrap())
9170 const SCEV
*Step
= LHS
->getStepRecurrence(*this);
9172 if (isKnownNonNegative(Step
)) {
9173 Increasing
= Pred
== ICmpInst::ICMP_SGT
|| Pred
== ICmpInst::ICMP_SGE
;
9177 if (isKnownNonPositive(Step
)) {
9178 Increasing
= Pred
== ICmpInst::ICMP_SLT
|| Pred
== ICmpInst::ICMP_SLE
;
9187 llvm_unreachable("switch has default clause!");
9190 bool ScalarEvolution::isLoopInvariantPredicate(
9191 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
, const Loop
*L
,
9192 ICmpInst::Predicate
&InvariantPred
, const SCEV
*&InvariantLHS
,
9193 const SCEV
*&InvariantRHS
) {
9195 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
9196 if (!isLoopInvariant(RHS
, L
)) {
9197 if (!isLoopInvariant(LHS
, L
))
9200 std::swap(LHS
, RHS
);
9201 Pred
= ICmpInst::getSwappedPredicate(Pred
);
9204 const SCEVAddRecExpr
*ArLHS
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
9205 if (!ArLHS
|| ArLHS
->getLoop() != L
)
9209 if (!isMonotonicPredicate(ArLHS
, Pred
, Increasing
))
9212 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
9213 // true as the loop iterates, and the backedge is control dependent on
9214 // "ArLHS `Pred` RHS" == true then we can reason as follows:
9216 // * if the predicate was false in the first iteration then the predicate
9217 // is never evaluated again, since the loop exits without taking the
9219 // * if the predicate was true in the first iteration then it will
9220 // continue to be true for all future iterations since it is
9221 // monotonically increasing.
9223 // For both the above possibilities, we can replace the loop varying
9224 // predicate with its value on the first iteration of the loop (which is
9227 // A similar reasoning applies for a monotonically decreasing predicate, by
9228 // replacing true with false and false with true in the above two bullets.
9230 auto P
= Increasing
? Pred
: ICmpInst::getInversePredicate(Pred
);
9232 if (!isLoopBackedgeGuardedByCond(L
, P
, LHS
, RHS
))
9235 InvariantPred
= Pred
;
9236 InvariantLHS
= ArLHS
->getStart();
9241 bool ScalarEvolution::isKnownPredicateViaConstantRanges(
9242 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
) {
9243 if (HasSameValue(LHS
, RHS
))
9244 return ICmpInst::isTrueWhenEqual(Pred
);
9246 // This code is split out from isKnownPredicate because it is called from
9247 // within isLoopEntryGuardedByCond.
9250 [&](const ConstantRange
&RangeLHS
, const ConstantRange
&RangeRHS
) {
9251 return ConstantRange::makeSatisfyingICmpRegion(Pred
, RangeRHS
)
9252 .contains(RangeLHS
);
9255 // The check at the top of the function catches the case where the values are
9256 // known to be equal.
9257 if (Pred
== CmpInst::ICMP_EQ
)
9260 if (Pred
== CmpInst::ICMP_NE
)
9261 return CheckRanges(getSignedRange(LHS
), getSignedRange(RHS
)) ||
9262 CheckRanges(getUnsignedRange(LHS
), getUnsignedRange(RHS
)) ||
9263 isKnownNonZero(getMinusSCEV(LHS
, RHS
));
9265 if (CmpInst::isSigned(Pred
))
9266 return CheckRanges(getSignedRange(LHS
), getSignedRange(RHS
));
9268 return CheckRanges(getUnsignedRange(LHS
), getUnsignedRange(RHS
));
9271 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred
,
9274 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer.
9275 // Return Y via OutY.
9276 auto MatchBinaryAddToConst
=
9277 [this](const SCEV
*Result
, const SCEV
*X
, APInt
&OutY
,
9278 SCEV::NoWrapFlags ExpectedFlags
) {
9279 const SCEV
*NonConstOp
, *ConstOp
;
9280 SCEV::NoWrapFlags FlagsPresent
;
9282 if (!splitBinaryAdd(Result
, ConstOp
, NonConstOp
, FlagsPresent
) ||
9283 !isa
<SCEVConstant
>(ConstOp
) || NonConstOp
!= X
)
9286 OutY
= cast
<SCEVConstant
>(ConstOp
)->getAPInt();
9287 return (FlagsPresent
& ExpectedFlags
) == ExpectedFlags
;
9296 case ICmpInst::ICMP_SGE
:
9297 std::swap(LHS
, RHS
);
9299 case ICmpInst::ICMP_SLE
:
9300 // X s<= (X + C)<nsw> if C >= 0
9301 if (MatchBinaryAddToConst(RHS
, LHS
, C
, SCEV::FlagNSW
) && C
.isNonNegative())
9304 // (X + C)<nsw> s<= X if C <= 0
9305 if (MatchBinaryAddToConst(LHS
, RHS
, C
, SCEV::FlagNSW
) &&
9306 !C
.isStrictlyPositive())
9310 case ICmpInst::ICMP_SGT
:
9311 std::swap(LHS
, RHS
);
9313 case ICmpInst::ICMP_SLT
:
9314 // X s< (X + C)<nsw> if C > 0
9315 if (MatchBinaryAddToConst(RHS
, LHS
, C
, SCEV::FlagNSW
) &&
9316 C
.isStrictlyPositive())
9319 // (X + C)<nsw> s< X if C < 0
9320 if (MatchBinaryAddToConst(LHS
, RHS
, C
, SCEV::FlagNSW
) && C
.isNegative())
9328 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred
,
9331 if (Pred
!= ICmpInst::ICMP_ULT
|| ProvingSplitPredicate
)
9334 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on
9335 // the stack can result in exponential time complexity.
9336 SaveAndRestore
<bool> Restore(ProvingSplitPredicate
, true);
9338 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L
9340 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use
9341 // isKnownPredicate. isKnownPredicate is more powerful, but also more
9342 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the
9343 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to
9344 // use isKnownPredicate later if needed.
9345 return isKnownNonNegative(RHS
) &&
9346 isKnownPredicate(CmpInst::ICMP_SGE
, LHS
, getZero(LHS
->getType())) &&
9347 isKnownPredicate(CmpInst::ICMP_SLT
, LHS
, RHS
);
9350 bool ScalarEvolution::isImpliedViaGuard(BasicBlock
*BB
,
9351 ICmpInst::Predicate Pred
,
9352 const SCEV
*LHS
, const SCEV
*RHS
) {
9353 // No need to even try if we know the module has no guards.
9357 return any_of(*BB
, [&](Instruction
&I
) {
9358 using namespace llvm::PatternMatch
;
9361 return match(&I
, m_Intrinsic
<Intrinsic::experimental_guard
>(
9362 m_Value(Condition
))) &&
9363 isImpliedCond(Pred
, LHS
, RHS
, Condition
, false);
9367 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
9368 /// protected by a conditional between LHS and RHS. This is used to
9369 /// to eliminate casts.
9371 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop
*L
,
9372 ICmpInst::Predicate Pred
,
9373 const SCEV
*LHS
, const SCEV
*RHS
) {
9374 // Interpret a null as meaning no loop, where there is obviously no guard
9375 // (interprocedural conditions notwithstanding).
9376 if (!L
) return true;
9378 if (isKnownViaNonRecursiveReasoning(Pred
, LHS
, RHS
))
9381 BasicBlock
*Latch
= L
->getLoopLatch();
9385 BranchInst
*LoopContinuePredicate
=
9386 dyn_cast
<BranchInst
>(Latch
->getTerminator());
9387 if (LoopContinuePredicate
&& LoopContinuePredicate
->isConditional() &&
9388 isImpliedCond(Pred
, LHS
, RHS
,
9389 LoopContinuePredicate
->getCondition(),
9390 LoopContinuePredicate
->getSuccessor(0) != L
->getHeader()))
9393 // We don't want more than one activation of the following loops on the stack
9394 // -- that can lead to O(n!) time complexity.
9395 if (WalkingBEDominatingConds
)
9398 SaveAndRestore
<bool> ClearOnExit(WalkingBEDominatingConds
, true);
9400 // See if we can exploit a trip count to prove the predicate.
9401 const auto &BETakenInfo
= getBackedgeTakenInfo(L
);
9402 const SCEV
*LatchBECount
= BETakenInfo
.getExact(Latch
, this);
9403 if (LatchBECount
!= getCouldNotCompute()) {
9404 // We know that Latch branches back to the loop header exactly
9405 // LatchBECount times. This means the backdege condition at Latch is
9406 // equivalent to "{0,+,1} u< LatchBECount".
9407 Type
*Ty
= LatchBECount
->getType();
9408 auto NoWrapFlags
= SCEV::NoWrapFlags(SCEV::FlagNUW
| SCEV::FlagNW
);
9409 const SCEV
*LoopCounter
=
9410 getAddRecExpr(getZero(Ty
), getOne(Ty
), L
, NoWrapFlags
);
9411 if (isImpliedCond(Pred
, LHS
, RHS
, ICmpInst::ICMP_ULT
, LoopCounter
,
9416 // Check conditions due to any @llvm.assume intrinsics.
9417 for (auto &AssumeVH
: AC
.assumptions()) {
9420 auto *CI
= cast
<CallInst
>(AssumeVH
);
9421 if (!DT
.dominates(CI
, Latch
->getTerminator()))
9424 if (isImpliedCond(Pred
, LHS
, RHS
, CI
->getArgOperand(0), false))
9428 // If the loop is not reachable from the entry block, we risk running into an
9429 // infinite loop as we walk up into the dom tree. These loops do not matter
9430 // anyway, so we just return a conservative answer when we see them.
9431 if (!DT
.isReachableFromEntry(L
->getHeader()))
9434 if (isImpliedViaGuard(Latch
, Pred
, LHS
, RHS
))
9437 for (DomTreeNode
*DTN
= DT
[Latch
], *HeaderDTN
= DT
[L
->getHeader()];
9438 DTN
!= HeaderDTN
; DTN
= DTN
->getIDom()) {
9439 assert(DTN
&& "should reach the loop header before reaching the root!");
9441 BasicBlock
*BB
= DTN
->getBlock();
9442 if (isImpliedViaGuard(BB
, Pred
, LHS
, RHS
))
9445 BasicBlock
*PBB
= BB
->getSinglePredecessor();
9449 BranchInst
*ContinuePredicate
= dyn_cast
<BranchInst
>(PBB
->getTerminator());
9450 if (!ContinuePredicate
|| !ContinuePredicate
->isConditional())
9453 Value
*Condition
= ContinuePredicate
->getCondition();
9455 // If we have an edge `E` within the loop body that dominates the only
9456 // latch, the condition guarding `E` also guards the backedge. This
9457 // reasoning works only for loops with a single latch.
9459 BasicBlockEdge
DominatingEdge(PBB
, BB
);
9460 if (DominatingEdge
.isSingleEdge()) {
9461 // We're constructively (and conservatively) enumerating edges within the
9462 // loop body that dominate the latch. The dominator tree better agree
9464 assert(DT
.dominates(DominatingEdge
, Latch
) && "should be!");
9466 if (isImpliedCond(Pred
, LHS
, RHS
, Condition
,
9467 BB
!= ContinuePredicate
->getSuccessor(0)))
9476 ScalarEvolution::isLoopEntryGuardedByCond(const Loop
*L
,
9477 ICmpInst::Predicate Pred
,
9478 const SCEV
*LHS
, const SCEV
*RHS
) {
9479 // Interpret a null as meaning no loop, where there is obviously no guard
9480 // (interprocedural conditions notwithstanding).
9481 if (!L
) return false;
9483 // Both LHS and RHS must be available at loop entry.
9484 assert(isAvailableAtLoopEntry(LHS
, L
) &&
9485 "LHS is not available at Loop Entry");
9486 assert(isAvailableAtLoopEntry(RHS
, L
) &&
9487 "RHS is not available at Loop Entry");
9489 if (isKnownViaNonRecursiveReasoning(Pred
, LHS
, RHS
))
9492 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove
9493 // the facts (a >= b && a != b) separately. A typical situation is when the
9494 // non-strict comparison is known from ranges and non-equality is known from
9495 // dominating predicates. If we are proving strict comparison, we always try
9496 // to prove non-equality and non-strict comparison separately.
9497 auto NonStrictPredicate
= ICmpInst::getNonStrictPredicate(Pred
);
9498 const bool ProvingStrictComparison
= (Pred
!= NonStrictPredicate
);
9499 bool ProvedNonStrictComparison
= false;
9500 bool ProvedNonEquality
= false;
9502 if (ProvingStrictComparison
) {
9503 ProvedNonStrictComparison
=
9504 isKnownViaNonRecursiveReasoning(NonStrictPredicate
, LHS
, RHS
);
9506 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE
, LHS
, RHS
);
9507 if (ProvedNonStrictComparison
&& ProvedNonEquality
)
9511 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard.
9512 auto ProveViaGuard
= [&](BasicBlock
*Block
) {
9513 if (isImpliedViaGuard(Block
, Pred
, LHS
, RHS
))
9515 if (ProvingStrictComparison
) {
9516 if (!ProvedNonStrictComparison
)
9517 ProvedNonStrictComparison
=
9518 isImpliedViaGuard(Block
, NonStrictPredicate
, LHS
, RHS
);
9519 if (!ProvedNonEquality
)
9521 isImpliedViaGuard(Block
, ICmpInst::ICMP_NE
, LHS
, RHS
);
9522 if (ProvedNonStrictComparison
&& ProvedNonEquality
)
9528 // Try to prove (Pred, LHS, RHS) using isImpliedCond.
9529 auto ProveViaCond
= [&](Value
*Condition
, bool Inverse
) {
9530 if (isImpliedCond(Pred
, LHS
, RHS
, Condition
, Inverse
))
9532 if (ProvingStrictComparison
) {
9533 if (!ProvedNonStrictComparison
)
9534 ProvedNonStrictComparison
=
9535 isImpliedCond(NonStrictPredicate
, LHS
, RHS
, Condition
, Inverse
);
9536 if (!ProvedNonEquality
)
9538 isImpliedCond(ICmpInst::ICMP_NE
, LHS
, RHS
, Condition
, Inverse
);
9539 if (ProvedNonStrictComparison
&& ProvedNonEquality
)
9545 // Starting at the loop predecessor, climb up the predecessor chain, as long
9546 // as there are predecessors that can be found that have unique successors
9547 // leading to the original header.
9548 for (std::pair
<BasicBlock
*, BasicBlock
*>
9549 Pair(L
->getLoopPredecessor(), L
->getHeader());
9551 Pair
= getPredecessorWithUniqueSuccessorForBB(Pair
.first
)) {
9553 if (ProveViaGuard(Pair
.first
))
9556 BranchInst
*LoopEntryPredicate
=
9557 dyn_cast
<BranchInst
>(Pair
.first
->getTerminator());
9558 if (!LoopEntryPredicate
||
9559 LoopEntryPredicate
->isUnconditional())
9562 if (ProveViaCond(LoopEntryPredicate
->getCondition(),
9563 LoopEntryPredicate
->getSuccessor(0) != Pair
.second
))
9567 // Check conditions due to any @llvm.assume intrinsics.
9568 for (auto &AssumeVH
: AC
.assumptions()) {
9571 auto *CI
= cast
<CallInst
>(AssumeVH
);
9572 if (!DT
.dominates(CI
, L
->getHeader()))
9575 if (ProveViaCond(CI
->getArgOperand(0), false))
9582 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred
,
9583 const SCEV
*LHS
, const SCEV
*RHS
,
9584 Value
*FoundCondValue
,
9586 if (!PendingLoopPredicates
.insert(FoundCondValue
).second
)
9590 make_scope_exit([&]() { PendingLoopPredicates
.erase(FoundCondValue
); });
9592 // Recursively handle And and Or conditions.
9593 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(FoundCondValue
)) {
9594 if (BO
->getOpcode() == Instruction::And
) {
9596 return isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(0), Inverse
) ||
9597 isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(1), Inverse
);
9598 } else if (BO
->getOpcode() == Instruction::Or
) {
9600 return isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(0), Inverse
) ||
9601 isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(1), Inverse
);
9605 ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(FoundCondValue
);
9606 if (!ICI
) return false;
9608 // Now that we found a conditional branch that dominates the loop or controls
9609 // the loop latch. Check to see if it is the comparison we are looking for.
9610 ICmpInst::Predicate FoundPred
;
9612 FoundPred
= ICI
->getInversePredicate();
9614 FoundPred
= ICI
->getPredicate();
9616 const SCEV
*FoundLHS
= getSCEV(ICI
->getOperand(0));
9617 const SCEV
*FoundRHS
= getSCEV(ICI
->getOperand(1));
9619 return isImpliedCond(Pred
, LHS
, RHS
, FoundPred
, FoundLHS
, FoundRHS
);
9622 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred
, const SCEV
*LHS
,
9624 ICmpInst::Predicate FoundPred
,
9625 const SCEV
*FoundLHS
,
9626 const SCEV
*FoundRHS
) {
9627 // Balance the types.
9628 if (getTypeSizeInBits(LHS
->getType()) <
9629 getTypeSizeInBits(FoundLHS
->getType())) {
9630 if (CmpInst::isSigned(Pred
)) {
9631 LHS
= getSignExtendExpr(LHS
, FoundLHS
->getType());
9632 RHS
= getSignExtendExpr(RHS
, FoundLHS
->getType());
9634 LHS
= getZeroExtendExpr(LHS
, FoundLHS
->getType());
9635 RHS
= getZeroExtendExpr(RHS
, FoundLHS
->getType());
9637 } else if (getTypeSizeInBits(LHS
->getType()) >
9638 getTypeSizeInBits(FoundLHS
->getType())) {
9639 if (CmpInst::isSigned(FoundPred
)) {
9640 FoundLHS
= getSignExtendExpr(FoundLHS
, LHS
->getType());
9641 FoundRHS
= getSignExtendExpr(FoundRHS
, LHS
->getType());
9643 FoundLHS
= getZeroExtendExpr(FoundLHS
, LHS
->getType());
9644 FoundRHS
= getZeroExtendExpr(FoundRHS
, LHS
->getType());
9648 // Canonicalize the query to match the way instcombine will have
9649 // canonicalized the comparison.
9650 if (SimplifyICmpOperands(Pred
, LHS
, RHS
))
9652 return CmpInst::isTrueWhenEqual(Pred
);
9653 if (SimplifyICmpOperands(FoundPred
, FoundLHS
, FoundRHS
))
9654 if (FoundLHS
== FoundRHS
)
9655 return CmpInst::isFalseWhenEqual(FoundPred
);
9657 // Check to see if we can make the LHS or RHS match.
9658 if (LHS
== FoundRHS
|| RHS
== FoundLHS
) {
9659 if (isa
<SCEVConstant
>(RHS
)) {
9660 std::swap(FoundLHS
, FoundRHS
);
9661 FoundPred
= ICmpInst::getSwappedPredicate(FoundPred
);
9663 std::swap(LHS
, RHS
);
9664 Pred
= ICmpInst::getSwappedPredicate(Pred
);
9668 // Check whether the found predicate is the same as the desired predicate.
9669 if (FoundPred
== Pred
)
9670 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
);
9672 // Check whether swapping the found predicate makes it the same as the
9673 // desired predicate.
9674 if (ICmpInst::getSwappedPredicate(FoundPred
) == Pred
) {
9675 if (isa
<SCEVConstant
>(RHS
))
9676 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundRHS
, FoundLHS
);
9678 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred
),
9679 RHS
, LHS
, FoundLHS
, FoundRHS
);
9682 // Unsigned comparison is the same as signed comparison when both the operands
9683 // are non-negative.
9684 if (CmpInst::isUnsigned(FoundPred
) &&
9685 CmpInst::getSignedPredicate(FoundPred
) == Pred
&&
9686 isKnownNonNegative(FoundLHS
) && isKnownNonNegative(FoundRHS
))
9687 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
);
9689 // Check if we can make progress by sharpening ranges.
9690 if (FoundPred
== ICmpInst::ICMP_NE
&&
9691 (isa
<SCEVConstant
>(FoundLHS
) || isa
<SCEVConstant
>(FoundRHS
))) {
9693 const SCEVConstant
*C
= nullptr;
9694 const SCEV
*V
= nullptr;
9696 if (isa
<SCEVConstant
>(FoundLHS
)) {
9697 C
= cast
<SCEVConstant
>(FoundLHS
);
9700 C
= cast
<SCEVConstant
>(FoundRHS
);
9704 // The guarding predicate tells us that C != V. If the known range
9705 // of V is [C, t), we can sharpen the range to [C + 1, t). The
9706 // range we consider has to correspond to same signedness as the
9707 // predicate we're interested in folding.
9709 APInt Min
= ICmpInst::isSigned(Pred
) ?
9710 getSignedRangeMin(V
) : getUnsignedRangeMin(V
);
9712 if (Min
== C
->getAPInt()) {
9713 // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
9714 // This is true even if (Min + 1) wraps around -- in case of
9715 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
9717 APInt SharperMin
= Min
+ 1;
9720 case ICmpInst::ICMP_SGE
:
9721 case ICmpInst::ICMP_UGE
:
9722 // We know V `Pred` SharperMin. If this implies LHS `Pred`
9724 if (isImpliedCondOperands(Pred
, LHS
, RHS
, V
,
9725 getConstant(SharperMin
)))
9729 case ICmpInst::ICMP_SGT
:
9730 case ICmpInst::ICMP_UGT
:
9731 // We know from the range information that (V `Pred` Min ||
9732 // V == Min). We know from the guarding condition that !(V
9733 // == Min). This gives us
9735 // V `Pred` Min || V == Min && !(V == Min)
9738 // If V `Pred` Min implies LHS `Pred` RHS, we're done.
9740 if (isImpliedCondOperands(Pred
, LHS
, RHS
, V
, getConstant(Min
)))
9751 // Check whether the actual condition is beyond sufficient.
9752 if (FoundPred
== ICmpInst::ICMP_EQ
)
9753 if (ICmpInst::isTrueWhenEqual(Pred
))
9754 if (isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
9756 if (Pred
== ICmpInst::ICMP_NE
)
9757 if (!ICmpInst::isTrueWhenEqual(FoundPred
))
9758 if (isImpliedCondOperands(FoundPred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
9761 // Otherwise assume the worst.
9765 bool ScalarEvolution::splitBinaryAdd(const SCEV
*Expr
,
9766 const SCEV
*&L
, const SCEV
*&R
,
9767 SCEV::NoWrapFlags
&Flags
) {
9768 const auto *AE
= dyn_cast
<SCEVAddExpr
>(Expr
);
9769 if (!AE
|| AE
->getNumOperands() != 2)
9772 L
= AE
->getOperand(0);
9773 R
= AE
->getOperand(1);
9774 Flags
= AE
->getNoWrapFlags();
9778 Optional
<APInt
> ScalarEvolution::computeConstantDifference(const SCEV
*More
,
9780 // We avoid subtracting expressions here because this function is usually
9781 // fairly deep in the call stack (i.e. is called many times).
9783 if (isa
<SCEVAddRecExpr
>(Less
) && isa
<SCEVAddRecExpr
>(More
)) {
9784 const auto *LAR
= cast
<SCEVAddRecExpr
>(Less
);
9785 const auto *MAR
= cast
<SCEVAddRecExpr
>(More
);
9787 if (LAR
->getLoop() != MAR
->getLoop())
9790 // We look at affine expressions only; not for correctness but to keep
9791 // getStepRecurrence cheap.
9792 if (!LAR
->isAffine() || !MAR
->isAffine())
9795 if (LAR
->getStepRecurrence(*this) != MAR
->getStepRecurrence(*this))
9798 Less
= LAR
->getStart();
9799 More
= MAR
->getStart();
9804 if (isa
<SCEVConstant
>(Less
) && isa
<SCEVConstant
>(More
)) {
9805 const auto &M
= cast
<SCEVConstant
>(More
)->getAPInt();
9806 const auto &L
= cast
<SCEVConstant
>(Less
)->getAPInt();
9810 SCEV::NoWrapFlags Flags
;
9811 const SCEV
*LLess
= nullptr, *RLess
= nullptr;
9812 const SCEV
*LMore
= nullptr, *RMore
= nullptr;
9813 const SCEVConstant
*C1
= nullptr, *C2
= nullptr;
9814 // Compare (X + C1) vs X.
9815 if (splitBinaryAdd(Less
, LLess
, RLess
, Flags
))
9816 if ((C1
= dyn_cast
<SCEVConstant
>(LLess
)))
9818 return -(C1
->getAPInt());
9820 // Compare X vs (X + C2).
9821 if (splitBinaryAdd(More
, LMore
, RMore
, Flags
))
9822 if ((C2
= dyn_cast
<SCEVConstant
>(LMore
)))
9824 return C2
->getAPInt();
9826 // Compare (X + C1) vs (X + C2).
9827 if (C1
&& C2
&& RLess
== RMore
)
9828 return C2
->getAPInt() - C1
->getAPInt();
9833 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
9834 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
,
9835 const SCEV
*FoundLHS
, const SCEV
*FoundRHS
) {
9836 if (Pred
!= CmpInst::ICMP_SLT
&& Pred
!= CmpInst::ICMP_ULT
)
9839 const auto *AddRecLHS
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
9843 const auto *AddRecFoundLHS
= dyn_cast
<SCEVAddRecExpr
>(FoundLHS
);
9844 if (!AddRecFoundLHS
)
9847 // We'd like to let SCEV reason about control dependencies, so we constrain
9848 // both the inequalities to be about add recurrences on the same loop. This
9849 // way we can use isLoopEntryGuardedByCond later.
9851 const Loop
*L
= AddRecFoundLHS
->getLoop();
9852 if (L
!= AddRecLHS
->getLoop())
9855 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1)
9857 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C)
9860 // Informal proof for (2), assuming (1) [*]:
9862 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**]
9866 // FoundLHS s< FoundRHS s< INT_MIN - C
9867 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ]
9868 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ]
9869 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s<
9870 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ]
9871 // <=> FoundLHS + C s< FoundRHS + C
9873 // [*]: (1) can be proved by ruling out overflow.
9875 // [**]: This can be proved by analyzing all the four possibilities:
9876 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and
9877 // (A s>= 0, B s>= 0).
9880 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C"
9881 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS
9882 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS
9883 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is
9884 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS +
9887 Optional
<APInt
> LDiff
= computeConstantDifference(LHS
, FoundLHS
);
9888 Optional
<APInt
> RDiff
= computeConstantDifference(RHS
, FoundRHS
);
9889 if (!LDiff
|| !RDiff
|| *LDiff
!= *RDiff
)
9892 if (LDiff
->isMinValue())
9895 APInt FoundRHSLimit
;
9897 if (Pred
== CmpInst::ICMP_ULT
) {
9898 FoundRHSLimit
= -(*RDiff
);
9900 assert(Pred
== CmpInst::ICMP_SLT
&& "Checked above!");
9901 FoundRHSLimit
= APInt::getSignedMinValue(getTypeSizeInBits(RHS
->getType())) - *RDiff
;
9904 // Try to prove (1) or (2), as needed.
9905 return isAvailableAtLoopEntry(FoundRHS
, L
) &&
9906 isLoopEntryGuardedByCond(L
, Pred
, FoundRHS
,
9907 getConstant(FoundRHSLimit
));
9910 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred
,
9911 const SCEV
*LHS
, const SCEV
*RHS
,
9912 const SCEV
*FoundLHS
,
9913 const SCEV
*FoundRHS
, unsigned Depth
) {
9914 const PHINode
*LPhi
= nullptr, *RPhi
= nullptr;
9916 auto ClearOnExit
= make_scope_exit([&]() {
9918 bool Erased
= PendingMerges
.erase(LPhi
);
9919 assert(Erased
&& "Failed to erase LPhi!");
9923 bool Erased
= PendingMerges
.erase(RPhi
);
9924 assert(Erased
&& "Failed to erase RPhi!");
9929 // Find respective Phis and check that they are not being pending.
9930 if (const SCEVUnknown
*LU
= dyn_cast
<SCEVUnknown
>(LHS
))
9931 if (auto *Phi
= dyn_cast
<PHINode
>(LU
->getValue())) {
9932 if (!PendingMerges
.insert(Phi
).second
)
9936 if (const SCEVUnknown
*RU
= dyn_cast
<SCEVUnknown
>(RHS
))
9937 if (auto *Phi
= dyn_cast
<PHINode
>(RU
->getValue())) {
9938 // If we detect a loop of Phi nodes being processed by this method, for
9941 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ]
9942 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ]
9944 // we don't want to deal with a case that complex, so return conservative
9946 if (!PendingMerges
.insert(Phi
).second
)
9951 // If none of LHS, RHS is a Phi, nothing to do here.
9955 // If there is a SCEVUnknown Phi we are interested in, make it left.
9957 std::swap(LHS
, RHS
);
9958 std::swap(FoundLHS
, FoundRHS
);
9959 std::swap(LPhi
, RPhi
);
9960 Pred
= ICmpInst::getSwappedPredicate(Pred
);
9963 assert(LPhi
&& "LPhi should definitely be a SCEVUnknown Phi!");
9964 const BasicBlock
*LBB
= LPhi
->getParent();
9965 const SCEVAddRecExpr
*RAR
= dyn_cast
<SCEVAddRecExpr
>(RHS
);
9967 auto ProvedEasily
= [&](const SCEV
*S1
, const SCEV
*S2
) {
9968 return isKnownViaNonRecursiveReasoning(Pred
, S1
, S2
) ||
9969 isImpliedCondOperandsViaRanges(Pred
, S1
, S2
, FoundLHS
, FoundRHS
) ||
9970 isImpliedViaOperations(Pred
, S1
, S2
, FoundLHS
, FoundRHS
, Depth
);
9973 if (RPhi
&& RPhi
->getParent() == LBB
) {
9974 // Case one: RHS is also a SCEVUnknown Phi from the same basic block.
9975 // If we compare two Phis from the same block, and for each entry block
9976 // the predicate is true for incoming values from this block, then the
9977 // predicate is also true for the Phis.
9978 for (const BasicBlock
*IncBB
: predecessors(LBB
)) {
9979 const SCEV
*L
= getSCEV(LPhi
->getIncomingValueForBlock(IncBB
));
9980 const SCEV
*R
= getSCEV(RPhi
->getIncomingValueForBlock(IncBB
));
9981 if (!ProvedEasily(L
, R
))
9984 } else if (RAR
&& RAR
->getLoop()->getHeader() == LBB
) {
9985 // Case two: RHS is also a Phi from the same basic block, and it is an
9986 // AddRec. It means that there is a loop which has both AddRec and Unknown
9987 // PHIs, for it we can compare incoming values of AddRec from above the loop
9988 // and latch with their respective incoming values of LPhi.
9989 // TODO: Generalize to handle loops with many inputs in a header.
9990 if (LPhi
->getNumIncomingValues() != 2) return false;
9992 auto *RLoop
= RAR
->getLoop();
9993 auto *Predecessor
= RLoop
->getLoopPredecessor();
9994 assert(Predecessor
&& "Loop with AddRec with no predecessor?");
9995 const SCEV
*L1
= getSCEV(LPhi
->getIncomingValueForBlock(Predecessor
));
9996 if (!ProvedEasily(L1
, RAR
->getStart()))
9998 auto *Latch
= RLoop
->getLoopLatch();
9999 assert(Latch
&& "Loop with AddRec with no latch?");
10000 const SCEV
*L2
= getSCEV(LPhi
->getIncomingValueForBlock(Latch
));
10001 if (!ProvedEasily(L2
, RAR
->getPostIncExpr(*this)))
10004 // In all other cases go over inputs of LHS and compare each of them to RHS,
10005 // the predicate is true for (LHS, RHS) if it is true for all such pairs.
10006 // At this point RHS is either a non-Phi, or it is a Phi from some block
10007 // different from LBB.
10008 for (const BasicBlock
*IncBB
: predecessors(LBB
)) {
10009 // Check that RHS is available in this block.
10010 if (!dominates(RHS
, IncBB
))
10012 const SCEV
*L
= getSCEV(LPhi
->getIncomingValueForBlock(IncBB
));
10013 if (!ProvedEasily(L
, RHS
))
10020 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred
,
10021 const SCEV
*LHS
, const SCEV
*RHS
,
10022 const SCEV
*FoundLHS
,
10023 const SCEV
*FoundRHS
) {
10024 if (isImpliedCondOperandsViaRanges(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
10027 if (isImpliedCondOperandsViaNoOverflow(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
10030 return isImpliedCondOperandsHelper(Pred
, LHS
, RHS
,
10031 FoundLHS
, FoundRHS
) ||
10032 // ~x < ~y --> x > y
10033 isImpliedCondOperandsHelper(Pred
, LHS
, RHS
,
10034 getNotSCEV(FoundRHS
),
10035 getNotSCEV(FoundLHS
));
10038 /// If Expr computes ~A, return A else return nullptr
10039 static const SCEV
*MatchNotExpr(const SCEV
*Expr
) {
10040 const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Expr
);
10041 if (!Add
|| Add
->getNumOperands() != 2 ||
10042 !Add
->getOperand(0)->isAllOnesValue())
10045 const SCEVMulExpr
*AddRHS
= dyn_cast
<SCEVMulExpr
>(Add
->getOperand(1));
10046 if (!AddRHS
|| AddRHS
->getNumOperands() != 2 ||
10047 !AddRHS
->getOperand(0)->isAllOnesValue())
10050 return AddRHS
->getOperand(1);
10053 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values?
10054 template<typename MaxExprType
>
10055 static bool IsMaxConsistingOf(const SCEV
*MaybeMaxExpr
,
10056 const SCEV
*Candidate
) {
10057 const MaxExprType
*MaxExpr
= dyn_cast
<MaxExprType
>(MaybeMaxExpr
);
10058 if (!MaxExpr
) return false;
10060 return find(MaxExpr
->operands(), Candidate
) != MaxExpr
->op_end();
10063 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values?
10064 template<typename MaxExprType
>
10065 static bool IsMinConsistingOf(ScalarEvolution
&SE
,
10066 const SCEV
*MaybeMinExpr
,
10067 const SCEV
*Candidate
) {
10068 const SCEV
*MaybeMaxExpr
= MatchNotExpr(MaybeMinExpr
);
10072 return IsMaxConsistingOf
<MaxExprType
>(MaybeMaxExpr
, SE
.getNotSCEV(Candidate
));
10075 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution
&SE
,
10076 ICmpInst::Predicate Pred
,
10077 const SCEV
*LHS
, const SCEV
*RHS
) {
10078 // If both sides are affine addrecs for the same loop, with equal
10079 // steps, and we know the recurrences don't wrap, then we only
10080 // need to check the predicate on the starting values.
10082 if (!ICmpInst::isRelational(Pred
))
10085 const SCEVAddRecExpr
*LAR
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
10088 const SCEVAddRecExpr
*RAR
= dyn_cast
<SCEVAddRecExpr
>(RHS
);
10091 if (LAR
->getLoop() != RAR
->getLoop())
10093 if (!LAR
->isAffine() || !RAR
->isAffine())
10096 if (LAR
->getStepRecurrence(SE
) != RAR
->getStepRecurrence(SE
))
10099 SCEV::NoWrapFlags NW
= ICmpInst::isSigned(Pred
) ?
10100 SCEV::FlagNSW
: SCEV::FlagNUW
;
10101 if (!LAR
->getNoWrapFlags(NW
) || !RAR
->getNoWrapFlags(NW
))
10104 return SE
.isKnownPredicate(Pred
, LAR
->getStart(), RAR
->getStart());
10107 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
10109 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution
&SE
,
10110 ICmpInst::Predicate Pred
,
10111 const SCEV
*LHS
, const SCEV
*RHS
) {
10116 case ICmpInst::ICMP_SGE
:
10117 std::swap(LHS
, RHS
);
10119 case ICmpInst::ICMP_SLE
:
10121 // min(A, ...) <= A
10122 IsMinConsistingOf
<SCEVSMaxExpr
>(SE
, LHS
, RHS
) ||
10123 // A <= max(A, ...)
10124 IsMaxConsistingOf
<SCEVSMaxExpr
>(RHS
, LHS
);
10126 case ICmpInst::ICMP_UGE
:
10127 std::swap(LHS
, RHS
);
10129 case ICmpInst::ICMP_ULE
:
10131 // min(A, ...) <= A
10132 IsMinConsistingOf
<SCEVUMaxExpr
>(SE
, LHS
, RHS
) ||
10133 // A <= max(A, ...)
10134 IsMaxConsistingOf
<SCEVUMaxExpr
>(RHS
, LHS
);
10137 llvm_unreachable("covered switch fell through?!");
10140 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred
,
10141 const SCEV
*LHS
, const SCEV
*RHS
,
10142 const SCEV
*FoundLHS
,
10143 const SCEV
*FoundRHS
,
10145 assert(getTypeSizeInBits(LHS
->getType()) ==
10146 getTypeSizeInBits(RHS
->getType()) &&
10147 "LHS and RHS have different sizes?");
10148 assert(getTypeSizeInBits(FoundLHS
->getType()) ==
10149 getTypeSizeInBits(FoundRHS
->getType()) &&
10150 "FoundLHS and FoundRHS have different sizes?");
10151 // We want to avoid hurting the compile time with analysis of too big trees.
10152 if (Depth
> MaxSCEVOperationsImplicationDepth
)
10154 // We only want to work with ICMP_SGT comparison so far.
10155 // TODO: Extend to ICMP_UGT?
10156 if (Pred
== ICmpInst::ICMP_SLT
) {
10157 Pred
= ICmpInst::ICMP_SGT
;
10158 std::swap(LHS
, RHS
);
10159 std::swap(FoundLHS
, FoundRHS
);
10161 if (Pred
!= ICmpInst::ICMP_SGT
)
10164 auto GetOpFromSExt
= [&](const SCEV
*S
) {
10165 if (auto *Ext
= dyn_cast
<SCEVSignExtendExpr
>(S
))
10166 return Ext
->getOperand();
10167 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
10168 // the constant in some cases.
10172 // Acquire values from extensions.
10173 auto *OrigLHS
= LHS
;
10174 auto *OrigFoundLHS
= FoundLHS
;
10175 LHS
= GetOpFromSExt(LHS
);
10176 FoundLHS
= GetOpFromSExt(FoundLHS
);
10178 // Is the SGT predicate can be proved trivially or using the found context.
10179 auto IsSGTViaContext
= [&](const SCEV
*S1
, const SCEV
*S2
) {
10180 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT
, S1
, S2
) ||
10181 isImpliedViaOperations(ICmpInst::ICMP_SGT
, S1
, S2
, OrigFoundLHS
,
10182 FoundRHS
, Depth
+ 1);
10185 if (auto *LHSAddExpr
= dyn_cast
<SCEVAddExpr
>(LHS
)) {
10186 // We want to avoid creation of any new non-constant SCEV. Since we are
10187 // going to compare the operands to RHS, we should be certain that we don't
10188 // need any size extensions for this. So let's decline all cases when the
10189 // sizes of types of LHS and RHS do not match.
10190 // TODO: Maybe try to get RHS from sext to catch more cases?
10191 if (getTypeSizeInBits(LHS
->getType()) != getTypeSizeInBits(RHS
->getType()))
10194 // Should not overflow.
10195 if (!LHSAddExpr
->hasNoSignedWrap())
10198 auto *LL
= LHSAddExpr
->getOperand(0);
10199 auto *LR
= LHSAddExpr
->getOperand(1);
10200 auto *MinusOne
= getNegativeSCEV(getOne(RHS
->getType()));
10202 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
10203 auto IsSumGreaterThanRHS
= [&](const SCEV
*S1
, const SCEV
*S2
) {
10204 return IsSGTViaContext(S1
, MinusOne
) && IsSGTViaContext(S2
, RHS
);
10206 // Try to prove the following rule:
10207 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS).
10208 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS).
10209 if (IsSumGreaterThanRHS(LL
, LR
) || IsSumGreaterThanRHS(LR
, LL
))
10211 } else if (auto *LHSUnknownExpr
= dyn_cast
<SCEVUnknown
>(LHS
)) {
10213 // FIXME: Once we have SDiv implemented, we can get rid of this matching.
10215 using namespace llvm::PatternMatch
;
10217 if (match(LHSUnknownExpr
->getValue(), m_SDiv(m_Value(LL
), m_Value(LR
)))) {
10218 // Rules for division.
10219 // We are going to perform some comparisons with Denominator and its
10220 // derivative expressions. In general case, creating a SCEV for it may
10221 // lead to a complex analysis of the entire graph, and in particular it
10222 // can request trip count recalculation for the same loop. This would
10223 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid
10224 // this, we only want to create SCEVs that are constants in this section.
10225 // So we bail if Denominator is not a constant.
10226 if (!isa
<ConstantInt
>(LR
))
10229 auto *Denominator
= cast
<SCEVConstant
>(getSCEV(LR
));
10231 // We want to make sure that LHS = FoundLHS / Denominator. If it is so,
10232 // then a SCEV for the numerator already exists and matches with FoundLHS.
10233 auto *Numerator
= getExistingSCEV(LL
);
10234 if (!Numerator
|| Numerator
->getType() != FoundLHS
->getType())
10237 // Make sure that the numerator matches with FoundLHS and the denominator
10239 if (!HasSameValue(Numerator
, FoundLHS
) || !isKnownPositive(Denominator
))
10242 auto *DTy
= Denominator
->getType();
10243 auto *FRHSTy
= FoundRHS
->getType();
10244 if (DTy
->isPointerTy() != FRHSTy
->isPointerTy())
10245 // One of types is a pointer and another one is not. We cannot extend
10246 // them properly to a wider type, so let us just reject this case.
10247 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help
10248 // to avoid this check.
10252 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0.
10253 auto *WTy
= getWiderType(DTy
, FRHSTy
);
10254 auto *DenominatorExt
= getNoopOrSignExtend(Denominator
, WTy
);
10255 auto *FoundRHSExt
= getNoopOrSignExtend(FoundRHS
, WTy
);
10257 // Try to prove the following rule:
10258 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS).
10259 // For example, given that FoundLHS > 2. It means that FoundLHS is at
10260 // least 3. If we divide it by Denominator < 4, we will have at least 1.
10261 auto *DenomMinusTwo
= getMinusSCEV(DenominatorExt
, getConstant(WTy
, 2));
10262 if (isKnownNonPositive(RHS
) &&
10263 IsSGTViaContext(FoundRHSExt
, DenomMinusTwo
))
10266 // Try to prove the following rule:
10267 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS).
10268 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2.
10269 // If we divide it by Denominator > 2, then:
10270 // 1. If FoundLHS is negative, then the result is 0.
10271 // 2. If FoundLHS is non-negative, then the result is non-negative.
10272 // Anyways, the result is non-negative.
10273 auto *MinusOne
= getNegativeSCEV(getOne(WTy
));
10274 auto *NegDenomMinusOne
= getMinusSCEV(MinusOne
, DenominatorExt
);
10275 if (isKnownNegative(RHS
) &&
10276 IsSGTViaContext(FoundRHSExt
, NegDenomMinusOne
))
10281 // If our expression contained SCEVUnknown Phis, and we split it down and now
10282 // need to prove something for them, try to prove the predicate for every
10283 // possible incoming values of those Phis.
10284 if (isImpliedViaMerge(Pred
, OrigLHS
, RHS
, OrigFoundLHS
, FoundRHS
, Depth
+ 1))
10291 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred
,
10292 const SCEV
*LHS
, const SCEV
*RHS
) {
10293 return isKnownPredicateViaConstantRanges(Pred
, LHS
, RHS
) ||
10294 IsKnownPredicateViaMinOrMax(*this, Pred
, LHS
, RHS
) ||
10295 IsKnownPredicateViaAddRecStart(*this, Pred
, LHS
, RHS
) ||
10296 isKnownPredicateViaNoOverflow(Pred
, LHS
, RHS
);
10300 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred
,
10301 const SCEV
*LHS
, const SCEV
*RHS
,
10302 const SCEV
*FoundLHS
,
10303 const SCEV
*FoundRHS
) {
10305 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
10306 case ICmpInst::ICMP_EQ
:
10307 case ICmpInst::ICMP_NE
:
10308 if (HasSameValue(LHS
, FoundLHS
) && HasSameValue(RHS
, FoundRHS
))
10311 case ICmpInst::ICMP_SLT
:
10312 case ICmpInst::ICMP_SLE
:
10313 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE
, LHS
, FoundLHS
) &&
10314 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE
, RHS
, FoundRHS
))
10317 case ICmpInst::ICMP_SGT
:
10318 case ICmpInst::ICMP_SGE
:
10319 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE
, LHS
, FoundLHS
) &&
10320 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE
, RHS
, FoundRHS
))
10323 case ICmpInst::ICMP_ULT
:
10324 case ICmpInst::ICMP_ULE
:
10325 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE
, LHS
, FoundLHS
) &&
10326 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE
, RHS
, FoundRHS
))
10329 case ICmpInst::ICMP_UGT
:
10330 case ICmpInst::ICMP_UGE
:
10331 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE
, LHS
, FoundLHS
) &&
10332 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE
, RHS
, FoundRHS
))
10337 // Maybe it can be proved via operations?
10338 if (isImpliedViaOperations(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
10344 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred
,
10347 const SCEV
*FoundLHS
,
10348 const SCEV
*FoundRHS
) {
10349 if (!isa
<SCEVConstant
>(RHS
) || !isa
<SCEVConstant
>(FoundRHS
))
10350 // The restriction on `FoundRHS` be lifted easily -- it exists only to
10351 // reduce the compile time impact of this optimization.
10354 Optional
<APInt
> Addend
= computeConstantDifference(LHS
, FoundLHS
);
10358 const APInt
&ConstFoundRHS
= cast
<SCEVConstant
>(FoundRHS
)->getAPInt();
10360 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
10361 // antecedent "`FoundLHS` `Pred` `FoundRHS`".
10362 ConstantRange FoundLHSRange
=
10363 ConstantRange::makeAllowedICmpRegion(Pred
, ConstFoundRHS
);
10365 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`:
10366 ConstantRange LHSRange
= FoundLHSRange
.add(ConstantRange(*Addend
));
10368 // We can also compute the range of values for `LHS` that satisfy the
10369 // consequent, "`LHS` `Pred` `RHS`":
10370 const APInt
&ConstRHS
= cast
<SCEVConstant
>(RHS
)->getAPInt();
10371 ConstantRange SatisfyingLHSRange
=
10372 ConstantRange::makeSatisfyingICmpRegion(Pred
, ConstRHS
);
10374 // The antecedent implies the consequent if every value of `LHS` that
10375 // satisfies the antecedent also satisfies the consequent.
10376 return SatisfyingLHSRange
.contains(LHSRange
);
10379 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV
*RHS
, const SCEV
*Stride
,
10380 bool IsSigned
, bool NoWrap
) {
10381 assert(isKnownPositive(Stride
) && "Positive stride expected!");
10383 if (NoWrap
) return false;
10385 unsigned BitWidth
= getTypeSizeInBits(RHS
->getType());
10386 const SCEV
*One
= getOne(Stride
->getType());
10389 APInt MaxRHS
= getSignedRangeMax(RHS
);
10390 APInt MaxValue
= APInt::getSignedMaxValue(BitWidth
);
10391 APInt MaxStrideMinusOne
= getSignedRangeMax(getMinusSCEV(Stride
, One
));
10393 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
10394 return (std::move(MaxValue
) - MaxStrideMinusOne
).slt(MaxRHS
);
10397 APInt MaxRHS
= getUnsignedRangeMax(RHS
);
10398 APInt MaxValue
= APInt::getMaxValue(BitWidth
);
10399 APInt MaxStrideMinusOne
= getUnsignedRangeMax(getMinusSCEV(Stride
, One
));
10401 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
10402 return (std::move(MaxValue
) - MaxStrideMinusOne
).ult(MaxRHS
);
10405 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV
*RHS
, const SCEV
*Stride
,
10406 bool IsSigned
, bool NoWrap
) {
10407 if (NoWrap
) return false;
10409 unsigned BitWidth
= getTypeSizeInBits(RHS
->getType());
10410 const SCEV
*One
= getOne(Stride
->getType());
10413 APInt MinRHS
= getSignedRangeMin(RHS
);
10414 APInt MinValue
= APInt::getSignedMinValue(BitWidth
);
10415 APInt MaxStrideMinusOne
= getSignedRangeMax(getMinusSCEV(Stride
, One
));
10417 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
10418 return (std::move(MinValue
) + MaxStrideMinusOne
).sgt(MinRHS
);
10421 APInt MinRHS
= getUnsignedRangeMin(RHS
);
10422 APInt MinValue
= APInt::getMinValue(BitWidth
);
10423 APInt MaxStrideMinusOne
= getUnsignedRangeMax(getMinusSCEV(Stride
, One
));
10425 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
10426 return (std::move(MinValue
) + MaxStrideMinusOne
).ugt(MinRHS
);
10429 const SCEV
*ScalarEvolution::computeBECount(const SCEV
*Delta
, const SCEV
*Step
,
10431 const SCEV
*One
= getOne(Step
->getType());
10432 Delta
= Equality
? getAddExpr(Delta
, Step
)
10433 : getAddExpr(Delta
, getMinusSCEV(Step
, One
));
10434 return getUDivExpr(Delta
, Step
);
10437 const SCEV
*ScalarEvolution::computeMaxBECountForLT(const SCEV
*Start
,
10438 const SCEV
*Stride
,
10443 assert(!isKnownNonPositive(Stride
) &&
10444 "Stride is expected strictly positive!");
10445 // Calculate the maximum backedge count based on the range of values
10446 // permitted by Start, End, and Stride.
10447 const SCEV
*MaxBECount
;
10449 IsSigned
? getSignedRangeMin(Start
) : getUnsignedRangeMin(Start
);
10451 APInt StrideForMaxBECount
=
10452 IsSigned
? getSignedRangeMin(Stride
) : getUnsignedRangeMin(Stride
);
10454 // We already know that the stride is positive, so we paper over conservatism
10455 // in our range computation by forcing StrideForMaxBECount to be at least one.
10456 // In theory this is unnecessary, but we expect MaxBECount to be a
10457 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there
10458 // is nothing to constant fold it to).
10459 APInt
One(BitWidth
, 1, IsSigned
);
10460 StrideForMaxBECount
= APIntOps::smax(One
, StrideForMaxBECount
);
10462 APInt MaxValue
= IsSigned
? APInt::getSignedMaxValue(BitWidth
)
10463 : APInt::getMaxValue(BitWidth
);
10464 APInt Limit
= MaxValue
- (StrideForMaxBECount
- 1);
10466 // Although End can be a MAX expression we estimate MaxEnd considering only
10467 // the case End = RHS of the loop termination condition. This is safe because
10468 // in the other case (End - Start) is zero, leading to a zero maximum backedge
10470 APInt MaxEnd
= IsSigned
? APIntOps::smin(getSignedRangeMax(End
), Limit
)
10471 : APIntOps::umin(getUnsignedRangeMax(End
), Limit
);
10473 MaxBECount
= computeBECount(getConstant(MaxEnd
- MinStart
) /* Delta */,
10474 getConstant(StrideForMaxBECount
) /* Step */,
10475 false /* Equality */);
10480 ScalarEvolution::ExitLimit
10481 ScalarEvolution::howManyLessThans(const SCEV
*LHS
, const SCEV
*RHS
,
10482 const Loop
*L
, bool IsSigned
,
10483 bool ControlsExit
, bool AllowPredicates
) {
10484 SmallPtrSet
<const SCEVPredicate
*, 4> Predicates
;
10486 const SCEVAddRecExpr
*IV
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
10487 bool PredicatedIV
= false;
10489 if (!IV
&& AllowPredicates
) {
10490 // Try to make this an AddRec using runtime tests, in the first X
10491 // iterations of this loop, where X is the SCEV expression found by the
10492 // algorithm below.
10493 IV
= convertSCEVToAddRecWithPredicates(LHS
, L
, Predicates
);
10494 PredicatedIV
= true;
10497 // Avoid weird loops
10498 if (!IV
|| IV
->getLoop() != L
|| !IV
->isAffine())
10499 return getCouldNotCompute();
10501 bool NoWrap
= ControlsExit
&&
10502 IV
->getNoWrapFlags(IsSigned
? SCEV::FlagNSW
: SCEV::FlagNUW
);
10504 const SCEV
*Stride
= IV
->getStepRecurrence(*this);
10506 bool PositiveStride
= isKnownPositive(Stride
);
10508 // Avoid negative or zero stride values.
10509 if (!PositiveStride
) {
10510 // We can compute the correct backedge taken count for loops with unknown
10511 // strides if we can prove that the loop is not an infinite loop with side
10512 // effects. Here's the loop structure we are trying to handle -
10518 // } while (i < end);
10520 // The backedge taken count for such loops is evaluated as -
10521 // (max(end, start + stride) - start - 1) /u stride
10523 // The additional preconditions that we need to check to prove correctness
10524 // of the above formula is as follows -
10526 // a) IV is either nuw or nsw depending upon signedness (indicated by the
10528 // b) loop is single exit with no side effects.
10531 // Precondition a) implies that if the stride is negative, this is a single
10532 // trip loop. The backedge taken count formula reduces to zero in this case.
10534 // Precondition b) implies that the unknown stride cannot be zero otherwise
10537 // The positive stride case is the same as isKnownPositive(Stride) returning
10538 // true (original behavior of the function).
10540 // We want to make sure that the stride is truly unknown as there are edge
10541 // cases where ScalarEvolution propagates no wrap flags to the
10542 // post-increment/decrement IV even though the increment/decrement operation
10543 // itself is wrapping. The computed backedge taken count may be wrong in
10544 // such cases. This is prevented by checking that the stride is not known to
10545 // be either positive or non-positive. For example, no wrap flags are
10546 // propagated to the post-increment IV of this loop with a trip count of 2 -
10548 // unsigned char i;
10549 // for(i=127; i<128; i+=129)
10552 if (PredicatedIV
|| !NoWrap
|| isKnownNonPositive(Stride
) ||
10553 !loopHasNoSideEffects(L
))
10554 return getCouldNotCompute();
10555 } else if (!Stride
->isOne() &&
10556 doesIVOverflowOnLT(RHS
, Stride
, IsSigned
, NoWrap
))
10557 // Avoid proven overflow cases: this will ensure that the backedge taken
10558 // count will not generate any unsigned overflow. Relaxed no-overflow
10559 // conditions exploit NoWrapFlags, allowing to optimize in presence of
10560 // undefined behaviors like the case of C language.
10561 return getCouldNotCompute();
10563 ICmpInst::Predicate Cond
= IsSigned
? ICmpInst::ICMP_SLT
10564 : ICmpInst::ICMP_ULT
;
10565 const SCEV
*Start
= IV
->getStart();
10566 const SCEV
*End
= RHS
;
10567 // When the RHS is not invariant, we do not know the end bound of the loop and
10568 // cannot calculate the ExactBECount needed by ExitLimit. However, we can
10569 // calculate the MaxBECount, given the start, stride and max value for the end
10570 // bound of the loop (RHS), and the fact that IV does not overflow (which is
10572 if (!isLoopInvariant(RHS
, L
)) {
10573 const SCEV
*MaxBECount
= computeMaxBECountForLT(
10574 Start
, Stride
, RHS
, getTypeSizeInBits(LHS
->getType()), IsSigned
);
10575 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount
,
10576 false /*MaxOrZero*/, Predicates
);
10578 // If the backedge is taken at least once, then it will be taken
10579 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start
10580 // is the LHS value of the less-than comparison the first time it is evaluated
10581 // and End is the RHS.
10582 const SCEV
*BECountIfBackedgeTaken
=
10583 computeBECount(getMinusSCEV(End
, Start
), Stride
, false);
10584 // If the loop entry is guarded by the result of the backedge test of the
10585 // first loop iteration, then we know the backedge will be taken at least
10586 // once and so the backedge taken count is as above. If not then we use the
10587 // expression (max(End,Start)-Start)/Stride to describe the backedge count,
10588 // as if the backedge is taken at least once max(End,Start) is End and so the
10589 // result is as above, and if not max(End,Start) is Start so we get a backedge
10591 const SCEV
*BECount
;
10592 if (isLoopEntryGuardedByCond(L
, Cond
, getMinusSCEV(Start
, Stride
), RHS
))
10593 BECount
= BECountIfBackedgeTaken
;
10595 End
= IsSigned
? getSMaxExpr(RHS
, Start
) : getUMaxExpr(RHS
, Start
);
10596 BECount
= computeBECount(getMinusSCEV(End
, Start
), Stride
, false);
10599 const SCEV
*MaxBECount
;
10600 bool MaxOrZero
= false;
10601 if (isa
<SCEVConstant
>(BECount
))
10602 MaxBECount
= BECount
;
10603 else if (isa
<SCEVConstant
>(BECountIfBackedgeTaken
)) {
10604 // If we know exactly how many times the backedge will be taken if it's
10605 // taken at least once, then the backedge count will either be that or
10607 MaxBECount
= BECountIfBackedgeTaken
;
10610 MaxBECount
= computeMaxBECountForLT(
10611 Start
, Stride
, RHS
, getTypeSizeInBits(LHS
->getType()), IsSigned
);
10614 if (isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
10615 !isa
<SCEVCouldNotCompute
>(BECount
))
10616 MaxBECount
= getConstant(getUnsignedRangeMax(BECount
));
10618 return ExitLimit(BECount
, MaxBECount
, MaxOrZero
, Predicates
);
10621 ScalarEvolution::ExitLimit
10622 ScalarEvolution::howManyGreaterThans(const SCEV
*LHS
, const SCEV
*RHS
,
10623 const Loop
*L
, bool IsSigned
,
10624 bool ControlsExit
, bool AllowPredicates
) {
10625 SmallPtrSet
<const SCEVPredicate
*, 4> Predicates
;
10626 // We handle only IV > Invariant
10627 if (!isLoopInvariant(RHS
, L
))
10628 return getCouldNotCompute();
10630 const SCEVAddRecExpr
*IV
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
10631 if (!IV
&& AllowPredicates
)
10632 // Try to make this an AddRec using runtime tests, in the first X
10633 // iterations of this loop, where X is the SCEV expression found by the
10634 // algorithm below.
10635 IV
= convertSCEVToAddRecWithPredicates(LHS
, L
, Predicates
);
10637 // Avoid weird loops
10638 if (!IV
|| IV
->getLoop() != L
|| !IV
->isAffine())
10639 return getCouldNotCompute();
10641 bool NoWrap
= ControlsExit
&&
10642 IV
->getNoWrapFlags(IsSigned
? SCEV::FlagNSW
: SCEV::FlagNUW
);
10644 const SCEV
*Stride
= getNegativeSCEV(IV
->getStepRecurrence(*this));
10646 // Avoid negative or zero stride values
10647 if (!isKnownPositive(Stride
))
10648 return getCouldNotCompute();
10650 // Avoid proven overflow cases: this will ensure that the backedge taken count
10651 // will not generate any unsigned overflow. Relaxed no-overflow conditions
10652 // exploit NoWrapFlags, allowing to optimize in presence of undefined
10653 // behaviors like the case of C language.
10654 if (!Stride
->isOne() && doesIVOverflowOnGT(RHS
, Stride
, IsSigned
, NoWrap
))
10655 return getCouldNotCompute();
10657 ICmpInst::Predicate Cond
= IsSigned
? ICmpInst::ICMP_SGT
10658 : ICmpInst::ICMP_UGT
;
10660 const SCEV
*Start
= IV
->getStart();
10661 const SCEV
*End
= RHS
;
10662 if (!isLoopEntryGuardedByCond(L
, Cond
, getAddExpr(Start
, Stride
), RHS
))
10663 End
= IsSigned
? getSMinExpr(RHS
, Start
) : getUMinExpr(RHS
, Start
);
10665 const SCEV
*BECount
= computeBECount(getMinusSCEV(Start
, End
), Stride
, false);
10667 APInt MaxStart
= IsSigned
? getSignedRangeMax(Start
)
10668 : getUnsignedRangeMax(Start
);
10670 APInt MinStride
= IsSigned
? getSignedRangeMin(Stride
)
10671 : getUnsignedRangeMin(Stride
);
10673 unsigned BitWidth
= getTypeSizeInBits(LHS
->getType());
10674 APInt Limit
= IsSigned
? APInt::getSignedMinValue(BitWidth
) + (MinStride
- 1)
10675 : APInt::getMinValue(BitWidth
) + (MinStride
- 1);
10677 // Although End can be a MIN expression we estimate MinEnd considering only
10678 // the case End = RHS. This is safe because in the other case (Start - End)
10679 // is zero, leading to a zero maximum backedge taken count.
10681 IsSigned
? APIntOps::smax(getSignedRangeMin(RHS
), Limit
)
10682 : APIntOps::umax(getUnsignedRangeMin(RHS
), Limit
);
10685 const SCEV
*MaxBECount
= getCouldNotCompute();
10686 if (isa
<SCEVConstant
>(BECount
))
10687 MaxBECount
= BECount
;
10689 MaxBECount
= computeBECount(getConstant(MaxStart
- MinEnd
),
10690 getConstant(MinStride
), false);
10692 if (isa
<SCEVCouldNotCompute
>(MaxBECount
))
10693 MaxBECount
= BECount
;
10695 return ExitLimit(BECount
, MaxBECount
, false, Predicates
);
10698 const SCEV
*SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange
&Range
,
10699 ScalarEvolution
&SE
) const {
10700 if (Range
.isFullSet()) // Infinite loop.
10701 return SE
.getCouldNotCompute();
10703 // If the start is a non-zero constant, shift the range to simplify things.
10704 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(getStart()))
10705 if (!SC
->getValue()->isZero()) {
10706 SmallVector
<const SCEV
*, 4> Operands(op_begin(), op_end());
10707 Operands
[0] = SE
.getZero(SC
->getType());
10708 const SCEV
*Shifted
= SE
.getAddRecExpr(Operands
, getLoop(),
10709 getNoWrapFlags(FlagNW
));
10710 if (const auto *ShiftedAddRec
= dyn_cast
<SCEVAddRecExpr
>(Shifted
))
10711 return ShiftedAddRec
->getNumIterationsInRange(
10712 Range
.subtract(SC
->getAPInt()), SE
);
10713 // This is strange and shouldn't happen.
10714 return SE
.getCouldNotCompute();
10717 // The only time we can solve this is when we have all constant indices.
10718 // Otherwise, we cannot determine the overflow conditions.
10719 if (any_of(operands(), [](const SCEV
*Op
) { return !isa
<SCEVConstant
>(Op
); }))
10720 return SE
.getCouldNotCompute();
10722 // Okay at this point we know that all elements of the chrec are constants and
10723 // that the start element is zero.
10725 // First check to see if the range contains zero. If not, the first
10726 // iteration exits.
10727 unsigned BitWidth
= SE
.getTypeSizeInBits(getType());
10728 if (!Range
.contains(APInt(BitWidth
, 0)))
10729 return SE
.getZero(getType());
10732 // If this is an affine expression then we have this situation:
10733 // Solve {0,+,A} in Range === Ax in Range
10735 // We know that zero is in the range. If A is positive then we know that
10736 // the upper value of the range must be the first possible exit value.
10737 // If A is negative then the lower of the range is the last possible loop
10738 // value. Also note that we already checked for a full range.
10739 APInt A
= cast
<SCEVConstant
>(getOperand(1))->getAPInt();
10740 APInt End
= A
.sge(1) ? (Range
.getUpper() - 1) : Range
.getLower();
10742 // The exit value should be (End+A)/A.
10743 APInt ExitVal
= (End
+ A
).udiv(A
);
10744 ConstantInt
*ExitValue
= ConstantInt::get(SE
.getContext(), ExitVal
);
10746 // Evaluate at the exit value. If we really did fall out of the valid
10747 // range, then we computed our trip count, otherwise wrap around or other
10748 // things must have happened.
10749 ConstantInt
*Val
= EvaluateConstantChrecAtConstant(this, ExitValue
, SE
);
10750 if (Range
.contains(Val
->getValue()))
10751 return SE
.getCouldNotCompute(); // Something strange happened
10753 // Ensure that the previous value is in the range. This is a sanity check.
10754 assert(Range
.contains(
10755 EvaluateConstantChrecAtConstant(this,
10756 ConstantInt::get(SE
.getContext(), ExitVal
- 1), SE
)->getValue()) &&
10757 "Linear scev computation is off in a bad way!");
10758 return SE
.getConstant(ExitValue
);
10761 if (isQuadratic()) {
10762 if (auto S
= SolveQuadraticAddRecRange(this, Range
, SE
))
10763 return SE
.getConstant(S
.getValue());
10766 return SE
.getCouldNotCompute();
10769 const SCEVAddRecExpr
*
10770 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution
&SE
) const {
10771 assert(getNumOperands() > 1 && "AddRec with zero step?");
10772 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)),
10773 // but in this case we cannot guarantee that the value returned will be an
10774 // AddRec because SCEV does not have a fixed point where it stops
10775 // simplification: it is legal to return ({rec1} + {rec2}). For example, it
10776 // may happen if we reach arithmetic depth limit while simplifying. So we
10777 // construct the returned value explicitly.
10778 SmallVector
<const SCEV
*, 3> Ops
;
10779 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
10780 // (this + Step) is {A+B,+,B+C,+...,+,N}.
10781 for (unsigned i
= 0, e
= getNumOperands() - 1; i
< e
; ++i
)
10782 Ops
.push_back(SE
.getAddExpr(getOperand(i
), getOperand(i
+ 1)));
10783 // We know that the last operand is not a constant zero (otherwise it would
10784 // have been popped out earlier). This guarantees us that if the result has
10785 // the same last operand, then it will also not be popped out, meaning that
10786 // the returned value will be an AddRec.
10787 const SCEV
*Last
= getOperand(getNumOperands() - 1);
10788 assert(!Last
->isZero() && "Recurrency with zero step?");
10789 Ops
.push_back(Last
);
10790 return cast
<SCEVAddRecExpr
>(SE
.getAddRecExpr(Ops
, getLoop(),
10791 SCEV::FlagAnyWrap
));
10794 // Return true when S contains at least an undef value.
10795 static inline bool containsUndefs(const SCEV
*S
) {
10796 return SCEVExprContains(S
, [](const SCEV
*S
) {
10797 if (const auto *SU
= dyn_cast
<SCEVUnknown
>(S
))
10798 return isa
<UndefValue
>(SU
->getValue());
10799 else if (const auto *SC
= dyn_cast
<SCEVConstant
>(S
))
10800 return isa
<UndefValue
>(SC
->getValue());
10807 // Collect all steps of SCEV expressions.
10808 struct SCEVCollectStrides
{
10809 ScalarEvolution
&SE
;
10810 SmallVectorImpl
<const SCEV
*> &Strides
;
10812 SCEVCollectStrides(ScalarEvolution
&SE
, SmallVectorImpl
<const SCEV
*> &S
)
10813 : SE(SE
), Strides(S
) {}
10815 bool follow(const SCEV
*S
) {
10816 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
))
10817 Strides
.push_back(AR
->getStepRecurrence(SE
));
10821 bool isDone() const { return false; }
10824 // Collect all SCEVUnknown and SCEVMulExpr expressions.
10825 struct SCEVCollectTerms
{
10826 SmallVectorImpl
<const SCEV
*> &Terms
;
10828 SCEVCollectTerms(SmallVectorImpl
<const SCEV
*> &T
) : Terms(T
) {}
10830 bool follow(const SCEV
*S
) {
10831 if (isa
<SCEVUnknown
>(S
) || isa
<SCEVMulExpr
>(S
) ||
10832 isa
<SCEVSignExtendExpr
>(S
)) {
10833 if (!containsUndefs(S
))
10834 Terms
.push_back(S
);
10836 // Stop recursion: once we collected a term, do not walk its operands.
10844 bool isDone() const { return false; }
10847 // Check if a SCEV contains an AddRecExpr.
10848 struct SCEVHasAddRec
{
10849 bool &ContainsAddRec
;
10851 SCEVHasAddRec(bool &ContainsAddRec
) : ContainsAddRec(ContainsAddRec
) {
10852 ContainsAddRec
= false;
10855 bool follow(const SCEV
*S
) {
10856 if (isa
<SCEVAddRecExpr
>(S
)) {
10857 ContainsAddRec
= true;
10859 // Stop recursion: once we collected a term, do not walk its operands.
10867 bool isDone() const { return false; }
10870 // Find factors that are multiplied with an expression that (possibly as a
10871 // subexpression) contains an AddRecExpr. In the expression:
10873 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop))
10875 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)"
10876 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size
10877 // parameters as they form a product with an induction variable.
10879 // This collector expects all array size parameters to be in the same MulExpr.
10880 // It might be necessary to later add support for collecting parameters that are
10881 // spread over different nested MulExpr.
10882 struct SCEVCollectAddRecMultiplies
{
10883 SmallVectorImpl
<const SCEV
*> &Terms
;
10884 ScalarEvolution
&SE
;
10886 SCEVCollectAddRecMultiplies(SmallVectorImpl
<const SCEV
*> &T
, ScalarEvolution
&SE
)
10887 : Terms(T
), SE(SE
) {}
10889 bool follow(const SCEV
*S
) {
10890 if (auto *Mul
= dyn_cast
<SCEVMulExpr
>(S
)) {
10891 bool HasAddRec
= false;
10892 SmallVector
<const SCEV
*, 0> Operands
;
10893 for (auto Op
: Mul
->operands()) {
10894 const SCEVUnknown
*Unknown
= dyn_cast
<SCEVUnknown
>(Op
);
10895 if (Unknown
&& !isa
<CallInst
>(Unknown
->getValue())) {
10896 Operands
.push_back(Op
);
10897 } else if (Unknown
) {
10900 bool ContainsAddRec
;
10901 SCEVHasAddRec
ContiansAddRec(ContainsAddRec
);
10902 visitAll(Op
, ContiansAddRec
);
10903 HasAddRec
|= ContainsAddRec
;
10906 if (Operands
.size() == 0)
10912 Terms
.push_back(SE
.getMulExpr(Operands
));
10913 // Stop recursion: once we collected a term, do not walk its operands.
10921 bool isDone() const { return false; }
10924 } // end anonymous namespace
10926 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in
10928 /// 1) The strides of AddRec expressions.
10929 /// 2) Unknowns that are multiplied with AddRec expressions.
10930 void ScalarEvolution::collectParametricTerms(const SCEV
*Expr
,
10931 SmallVectorImpl
<const SCEV
*> &Terms
) {
10932 SmallVector
<const SCEV
*, 4> Strides
;
10933 SCEVCollectStrides
StrideCollector(*this, Strides
);
10934 visitAll(Expr
, StrideCollector
);
10937 dbgs() << "Strides:\n";
10938 for (const SCEV
*S
: Strides
)
10939 dbgs() << *S
<< "\n";
10942 for (const SCEV
*S
: Strides
) {
10943 SCEVCollectTerms
TermCollector(Terms
);
10944 visitAll(S
, TermCollector
);
10948 dbgs() << "Terms:\n";
10949 for (const SCEV
*T
: Terms
)
10950 dbgs() << *T
<< "\n";
10953 SCEVCollectAddRecMultiplies
MulCollector(Terms
, *this);
10954 visitAll(Expr
, MulCollector
);
10957 static bool findArrayDimensionsRec(ScalarEvolution
&SE
,
10958 SmallVectorImpl
<const SCEV
*> &Terms
,
10959 SmallVectorImpl
<const SCEV
*> &Sizes
) {
10960 int Last
= Terms
.size() - 1;
10961 const SCEV
*Step
= Terms
[Last
];
10963 // End of recursion.
10965 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(Step
)) {
10966 SmallVector
<const SCEV
*, 2> Qs
;
10967 for (const SCEV
*Op
: M
->operands())
10968 if (!isa
<SCEVConstant
>(Op
))
10971 Step
= SE
.getMulExpr(Qs
);
10974 Sizes
.push_back(Step
);
10978 for (const SCEV
*&Term
: Terms
) {
10979 // Normalize the terms before the next call to findArrayDimensionsRec.
10981 SCEVDivision::divide(SE
, Term
, Step
, &Q
, &R
);
10983 // Bail out when GCD does not evenly divide one of the terms.
10990 // Remove all SCEVConstants.
10992 remove_if(Terms
, [](const SCEV
*E
) { return isa
<SCEVConstant
>(E
); }),
10995 if (Terms
.size() > 0)
10996 if (!findArrayDimensionsRec(SE
, Terms
, Sizes
))
10999 Sizes
.push_back(Step
);
11003 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
11004 static inline bool containsParameters(SmallVectorImpl
<const SCEV
*> &Terms
) {
11005 for (const SCEV
*T
: Terms
)
11006 if (SCEVExprContains(T
, isa
<SCEVUnknown
, const SCEV
*>))
11011 // Return the number of product terms in S.
11012 static inline int numberOfTerms(const SCEV
*S
) {
11013 if (const SCEVMulExpr
*Expr
= dyn_cast
<SCEVMulExpr
>(S
))
11014 return Expr
->getNumOperands();
11018 static const SCEV
*removeConstantFactors(ScalarEvolution
&SE
, const SCEV
*T
) {
11019 if (isa
<SCEVConstant
>(T
))
11022 if (isa
<SCEVUnknown
>(T
))
11025 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(T
)) {
11026 SmallVector
<const SCEV
*, 2> Factors
;
11027 for (const SCEV
*Op
: M
->operands())
11028 if (!isa
<SCEVConstant
>(Op
))
11029 Factors
.push_back(Op
);
11031 return SE
.getMulExpr(Factors
);
11037 /// Return the size of an element read or written by Inst.
11038 const SCEV
*ScalarEvolution::getElementSize(Instruction
*Inst
) {
11040 if (StoreInst
*Store
= dyn_cast
<StoreInst
>(Inst
))
11041 Ty
= Store
->getValueOperand()->getType();
11042 else if (LoadInst
*Load
= dyn_cast
<LoadInst
>(Inst
))
11043 Ty
= Load
->getType();
11047 Type
*ETy
= getEffectiveSCEVType(PointerType::getUnqual(Ty
));
11048 return getSizeOfExpr(ETy
, Ty
);
11051 void ScalarEvolution::findArrayDimensions(SmallVectorImpl
<const SCEV
*> &Terms
,
11052 SmallVectorImpl
<const SCEV
*> &Sizes
,
11053 const SCEV
*ElementSize
) {
11054 if (Terms
.size() < 1 || !ElementSize
)
11057 // Early return when Terms do not contain parameters: we do not delinearize
11058 // non parametric SCEVs.
11059 if (!containsParameters(Terms
))
11063 dbgs() << "Terms:\n";
11064 for (const SCEV
*T
: Terms
)
11065 dbgs() << *T
<< "\n";
11068 // Remove duplicates.
11069 array_pod_sort(Terms
.begin(), Terms
.end());
11070 Terms
.erase(std::unique(Terms
.begin(), Terms
.end()), Terms
.end());
11072 // Put larger terms first.
11073 llvm::sort(Terms
, [](const SCEV
*LHS
, const SCEV
*RHS
) {
11074 return numberOfTerms(LHS
) > numberOfTerms(RHS
);
11077 // Try to divide all terms by the element size. If term is not divisible by
11078 // element size, proceed with the original term.
11079 for (const SCEV
*&Term
: Terms
) {
11081 SCEVDivision::divide(*this, Term
, ElementSize
, &Q
, &R
);
11086 SmallVector
<const SCEV
*, 4> NewTerms
;
11088 // Remove constant factors.
11089 for (const SCEV
*T
: Terms
)
11090 if (const SCEV
*NewT
= removeConstantFactors(*this, T
))
11091 NewTerms
.push_back(NewT
);
11094 dbgs() << "Terms after sorting:\n";
11095 for (const SCEV
*T
: NewTerms
)
11096 dbgs() << *T
<< "\n";
11099 if (NewTerms
.empty() || !findArrayDimensionsRec(*this, NewTerms
, Sizes
)) {
11104 // The last element to be pushed into Sizes is the size of an element.
11105 Sizes
.push_back(ElementSize
);
11108 dbgs() << "Sizes:\n";
11109 for (const SCEV
*S
: Sizes
)
11110 dbgs() << *S
<< "\n";
11114 void ScalarEvolution::computeAccessFunctions(
11115 const SCEV
*Expr
, SmallVectorImpl
<const SCEV
*> &Subscripts
,
11116 SmallVectorImpl
<const SCEV
*> &Sizes
) {
11117 // Early exit in case this SCEV is not an affine multivariate function.
11121 if (auto *AR
= dyn_cast
<SCEVAddRecExpr
>(Expr
))
11122 if (!AR
->isAffine())
11125 const SCEV
*Res
= Expr
;
11126 int Last
= Sizes
.size() - 1;
11127 for (int i
= Last
; i
>= 0; i
--) {
11129 SCEVDivision::divide(*this, Res
, Sizes
[i
], &Q
, &R
);
11132 dbgs() << "Res: " << *Res
<< "\n";
11133 dbgs() << "Sizes[i]: " << *Sizes
[i
] << "\n";
11134 dbgs() << "Res divided by Sizes[i]:\n";
11135 dbgs() << "Quotient: " << *Q
<< "\n";
11136 dbgs() << "Remainder: " << *R
<< "\n";
11141 // Do not record the last subscript corresponding to the size of elements in
11145 // Bail out if the remainder is too complex.
11146 if (isa
<SCEVAddRecExpr
>(R
)) {
11147 Subscripts
.clear();
11155 // Record the access function for the current subscript.
11156 Subscripts
.push_back(R
);
11159 // Also push in last position the remainder of the last division: it will be
11160 // the access function of the innermost dimension.
11161 Subscripts
.push_back(Res
);
11163 std::reverse(Subscripts
.begin(), Subscripts
.end());
11166 dbgs() << "Subscripts:\n";
11167 for (const SCEV
*S
: Subscripts
)
11168 dbgs() << *S
<< "\n";
11172 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and
11173 /// sizes of an array access. Returns the remainder of the delinearization that
11174 /// is the offset start of the array. The SCEV->delinearize algorithm computes
11175 /// the multiples of SCEV coefficients: that is a pattern matching of sub
11176 /// expressions in the stride and base of a SCEV corresponding to the
11177 /// computation of a GCD (greatest common divisor) of base and stride. When
11178 /// SCEV->delinearize fails, it returns the SCEV unchanged.
11180 /// For example: when analyzing the memory access A[i][j][k] in this loop nest
11182 /// void foo(long n, long m, long o, double A[n][m][o]) {
11184 /// for (long i = 0; i < n; i++)
11185 /// for (long j = 0; j < m; j++)
11186 /// for (long k = 0; k < o; k++)
11187 /// A[i][j][k] = 1.0;
11190 /// the delinearization input is the following AddRec SCEV:
11192 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
11194 /// From this SCEV, we are able to say that the base offset of the access is %A
11195 /// because it appears as an offset that does not divide any of the strides in
11198 /// CHECK: Base offset: %A
11200 /// and then SCEV->delinearize determines the size of some of the dimensions of
11201 /// the array as these are the multiples by which the strides are happening:
11203 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
11205 /// Note that the outermost dimension remains of UnknownSize because there are
11206 /// no strides that would help identifying the size of the last dimension: when
11207 /// the array has been statically allocated, one could compute the size of that
11208 /// dimension by dividing the overall size of the array by the size of the known
11209 /// dimensions: %m * %o * 8.
11211 /// Finally delinearize provides the access functions for the array reference
11212 /// that does correspond to A[i][j][k] of the above C testcase:
11214 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
11216 /// The testcases are checking the output of a function pass:
11217 /// DelinearizationPass that walks through all loads and stores of a function
11218 /// asking for the SCEV of the memory access with respect to all enclosing
11219 /// loops, calling SCEV->delinearize on that and printing the results.
11220 void ScalarEvolution::delinearize(const SCEV
*Expr
,
11221 SmallVectorImpl
<const SCEV
*> &Subscripts
,
11222 SmallVectorImpl
<const SCEV
*> &Sizes
,
11223 const SCEV
*ElementSize
) {
11224 // First step: collect parametric terms.
11225 SmallVector
<const SCEV
*, 4> Terms
;
11226 collectParametricTerms(Expr
, Terms
);
11231 // Second step: find subscript sizes.
11232 findArrayDimensions(Terms
, Sizes
, ElementSize
);
11237 // Third step: compute the access functions for each subscript.
11238 computeAccessFunctions(Expr
, Subscripts
, Sizes
);
11240 if (Subscripts
.empty())
11244 dbgs() << "succeeded to delinearize " << *Expr
<< "\n";
11245 dbgs() << "ArrayDecl[UnknownSize]";
11246 for (const SCEV
*S
: Sizes
)
11247 dbgs() << "[" << *S
<< "]";
11249 dbgs() << "\nArrayRef";
11250 for (const SCEV
*S
: Subscripts
)
11251 dbgs() << "[" << *S
<< "]";
11256 //===----------------------------------------------------------------------===//
11257 // SCEVCallbackVH Class Implementation
11258 //===----------------------------------------------------------------------===//
11260 void ScalarEvolution::SCEVCallbackVH::deleted() {
11261 assert(SE
&& "SCEVCallbackVH called with a null ScalarEvolution!");
11262 if (PHINode
*PN
= dyn_cast
<PHINode
>(getValPtr()))
11263 SE
->ConstantEvolutionLoopExitValue
.erase(PN
);
11264 SE
->eraseValueFromMap(getValPtr());
11265 // this now dangles!
11268 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value
*V
) {
11269 assert(SE
&& "SCEVCallbackVH called with a null ScalarEvolution!");
11271 // Forget all the expressions associated with users of the old value,
11272 // so that future queries will recompute the expressions using the new
11274 Value
*Old
= getValPtr();
11275 SmallVector
<User
*, 16> Worklist(Old
->user_begin(), Old
->user_end());
11276 SmallPtrSet
<User
*, 8> Visited
;
11277 while (!Worklist
.empty()) {
11278 User
*U
= Worklist
.pop_back_val();
11279 // Deleting the Old value will cause this to dangle. Postpone
11280 // that until everything else is done.
11283 if (!Visited
.insert(U
).second
)
11285 if (PHINode
*PN
= dyn_cast
<PHINode
>(U
))
11286 SE
->ConstantEvolutionLoopExitValue
.erase(PN
);
11287 SE
->eraseValueFromMap(U
);
11288 Worklist
.insert(Worklist
.end(), U
->user_begin(), U
->user_end());
11290 // Delete the Old value.
11291 if (PHINode
*PN
= dyn_cast
<PHINode
>(Old
))
11292 SE
->ConstantEvolutionLoopExitValue
.erase(PN
);
11293 SE
->eraseValueFromMap(Old
);
11294 // this now dangles!
11297 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value
*V
, ScalarEvolution
*se
)
11298 : CallbackVH(V
), SE(se
) {}
11300 //===----------------------------------------------------------------------===//
11301 // ScalarEvolution Class Implementation
11302 //===----------------------------------------------------------------------===//
11304 ScalarEvolution::ScalarEvolution(Function
&F
, TargetLibraryInfo
&TLI
,
11305 AssumptionCache
&AC
, DominatorTree
&DT
,
11307 : F(F
), TLI(TLI
), AC(AC
), DT(DT
), LI(LI
),
11308 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64),
11309 LoopDispositions(64), BlockDispositions(64) {
11310 // To use guards for proving predicates, we need to scan every instruction in
11311 // relevant basic blocks, and not just terminators. Doing this is a waste of
11312 // time if the IR does not actually contain any calls to
11313 // @llvm.experimental.guard, so do a quick check and remember this beforehand.
11315 // This pessimizes the case where a pass that preserves ScalarEvolution wants
11316 // to _add_ guards to the module when there weren't any before, and wants
11317 // ScalarEvolution to optimize based on those guards. For now we prefer to be
11318 // efficient in lieu of being smart in that rather obscure case.
11320 auto *GuardDecl
= F
.getParent()->getFunction(
11321 Intrinsic::getName(Intrinsic::experimental_guard
));
11322 HasGuards
= GuardDecl
&& !GuardDecl
->use_empty();
11325 ScalarEvolution::ScalarEvolution(ScalarEvolution
&&Arg
)
11326 : F(Arg
.F
), HasGuards(Arg
.HasGuards
), TLI(Arg
.TLI
), AC(Arg
.AC
), DT(Arg
.DT
),
11327 LI(Arg
.LI
), CouldNotCompute(std::move(Arg
.CouldNotCompute
)),
11328 ValueExprMap(std::move(Arg
.ValueExprMap
)),
11329 PendingLoopPredicates(std::move(Arg
.PendingLoopPredicates
)),
11330 PendingPhiRanges(std::move(Arg
.PendingPhiRanges
)),
11331 PendingMerges(std::move(Arg
.PendingMerges
)),
11332 MinTrailingZerosCache(std::move(Arg
.MinTrailingZerosCache
)),
11333 BackedgeTakenCounts(std::move(Arg
.BackedgeTakenCounts
)),
11334 PredicatedBackedgeTakenCounts(
11335 std::move(Arg
.PredicatedBackedgeTakenCounts
)),
11336 ConstantEvolutionLoopExitValue(
11337 std::move(Arg
.ConstantEvolutionLoopExitValue
)),
11338 ValuesAtScopes(std::move(Arg
.ValuesAtScopes
)),
11339 LoopDispositions(std::move(Arg
.LoopDispositions
)),
11340 LoopPropertiesCache(std::move(Arg
.LoopPropertiesCache
)),
11341 BlockDispositions(std::move(Arg
.BlockDispositions
)),
11342 UnsignedRanges(std::move(Arg
.UnsignedRanges
)),
11343 SignedRanges(std::move(Arg
.SignedRanges
)),
11344 UniqueSCEVs(std::move(Arg
.UniqueSCEVs
)),
11345 UniquePreds(std::move(Arg
.UniquePreds
)),
11346 SCEVAllocator(std::move(Arg
.SCEVAllocator
)),
11347 LoopUsers(std::move(Arg
.LoopUsers
)),
11348 PredicatedSCEVRewrites(std::move(Arg
.PredicatedSCEVRewrites
)),
11349 FirstUnknown(Arg
.FirstUnknown
) {
11350 Arg
.FirstUnknown
= nullptr;
11353 ScalarEvolution::~ScalarEvolution() {
11354 // Iterate through all the SCEVUnknown instances and call their
11355 // destructors, so that they release their references to their values.
11356 for (SCEVUnknown
*U
= FirstUnknown
; U
;) {
11357 SCEVUnknown
*Tmp
= U
;
11359 Tmp
->~SCEVUnknown();
11361 FirstUnknown
= nullptr;
11363 ExprValueMap
.clear();
11364 ValueExprMap
.clear();
11367 // Free any extra memory created for ExitNotTakenInfo in the unlikely event
11368 // that a loop had multiple computable exits.
11369 for (auto &BTCI
: BackedgeTakenCounts
)
11370 BTCI
.second
.clear();
11371 for (auto &BTCI
: PredicatedBackedgeTakenCounts
)
11372 BTCI
.second
.clear();
11374 assert(PendingLoopPredicates
.empty() && "isImpliedCond garbage");
11375 assert(PendingPhiRanges
.empty() && "getRangeRef garbage");
11376 assert(PendingMerges
.empty() && "isImpliedViaMerge garbage");
11377 assert(!WalkingBEDominatingConds
&& "isLoopBackedgeGuardedByCond garbage!");
11378 assert(!ProvingSplitPredicate
&& "ProvingSplitPredicate garbage!");
11381 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop
*L
) {
11382 return !isa
<SCEVCouldNotCompute
>(getBackedgeTakenCount(L
));
11385 static void PrintLoopInfo(raw_ostream
&OS
, ScalarEvolution
*SE
,
11387 // Print all inner loops first
11389 PrintLoopInfo(OS
, SE
, I
);
11392 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11395 SmallVector
<BasicBlock
*, 8> ExitBlocks
;
11396 L
->getExitBlocks(ExitBlocks
);
11397 if (ExitBlocks
.size() != 1)
11398 OS
<< "<multiple exits> ";
11400 if (SE
->hasLoopInvariantBackedgeTakenCount(L
)) {
11401 OS
<< "backedge-taken count is " << *SE
->getBackedgeTakenCount(L
);
11403 OS
<< "Unpredictable backedge-taken count. ";
11408 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11411 if (!isa
<SCEVCouldNotCompute
>(SE
->getMaxBackedgeTakenCount(L
))) {
11412 OS
<< "max backedge-taken count is " << *SE
->getMaxBackedgeTakenCount(L
);
11413 if (SE
->isBackedgeTakenCountMaxOrZero(L
))
11414 OS
<< ", actual taken count either this or zero.";
11416 OS
<< "Unpredictable max backedge-taken count. ";
11421 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11424 SCEVUnionPredicate Pred
;
11425 auto PBT
= SE
->getPredicatedBackedgeTakenCount(L
, Pred
);
11426 if (!isa
<SCEVCouldNotCompute
>(PBT
)) {
11427 OS
<< "Predicated backedge-taken count is " << *PBT
<< "\n";
11428 OS
<< " Predicates:\n";
11431 OS
<< "Unpredictable predicated backedge-taken count. ";
11435 if (SE
->hasLoopInvariantBackedgeTakenCount(L
)) {
11437 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11439 OS
<< "Trip multiple is " << SE
->getSmallConstantTripMultiple(L
) << "\n";
11443 static StringRef
loopDispositionToStr(ScalarEvolution::LoopDisposition LD
) {
11445 case ScalarEvolution::LoopVariant
:
11447 case ScalarEvolution::LoopInvariant
:
11448 return "Invariant";
11449 case ScalarEvolution::LoopComputable
:
11450 return "Computable";
11452 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!");
11455 void ScalarEvolution::print(raw_ostream
&OS
) const {
11456 // ScalarEvolution's implementation of the print method is to print
11457 // out SCEV values of all instructions that are interesting. Doing
11458 // this potentially causes it to create new SCEV objects though,
11459 // which technically conflicts with the const qualifier. This isn't
11460 // observable from outside the class though, so casting away the
11461 // const isn't dangerous.
11462 ScalarEvolution
&SE
= *const_cast<ScalarEvolution
*>(this);
11464 OS
<< "Classifying expressions for: ";
11465 F
.printAsOperand(OS
, /*PrintType=*/false);
11467 for (Instruction
&I
: instructions(F
))
11468 if (isSCEVable(I
.getType()) && !isa
<CmpInst
>(I
)) {
11471 const SCEV
*SV
= SE
.getSCEV(&I
);
11473 if (!isa
<SCEVCouldNotCompute
>(SV
)) {
11475 SE
.getUnsignedRange(SV
).print(OS
);
11477 SE
.getSignedRange(SV
).print(OS
);
11480 const Loop
*L
= LI
.getLoopFor(I
.getParent());
11482 const SCEV
*AtUse
= SE
.getSCEVAtScope(SV
, L
);
11486 if (!isa
<SCEVCouldNotCompute
>(AtUse
)) {
11488 SE
.getUnsignedRange(AtUse
).print(OS
);
11490 SE
.getSignedRange(AtUse
).print(OS
);
11495 OS
<< "\t\t" "Exits: ";
11496 const SCEV
*ExitValue
= SE
.getSCEVAtScope(SV
, L
->getParentLoop());
11497 if (!SE
.isLoopInvariant(ExitValue
, L
)) {
11498 OS
<< "<<Unknown>>";
11504 for (auto *Iter
= L
; Iter
; Iter
= Iter
->getParentLoop()) {
11506 OS
<< "\t\t" "LoopDispositions: { ";
11512 Iter
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11513 OS
<< ": " << loopDispositionToStr(SE
.getLoopDisposition(SV
, Iter
));
11516 for (auto *InnerL
: depth_first(L
)) {
11520 OS
<< "\t\t" "LoopDispositions: { ";
11526 InnerL
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11527 OS
<< ": " << loopDispositionToStr(SE
.getLoopDisposition(SV
, InnerL
));
11536 OS
<< "Determining loop execution counts for: ";
11537 F
.printAsOperand(OS
, /*PrintType=*/false);
11540 PrintLoopInfo(OS
, &SE
, I
);
11543 ScalarEvolution::LoopDisposition
11544 ScalarEvolution::getLoopDisposition(const SCEV
*S
, const Loop
*L
) {
11545 auto &Values
= LoopDispositions
[S
];
11546 for (auto &V
: Values
) {
11547 if (V
.getPointer() == L
)
11550 Values
.emplace_back(L
, LoopVariant
);
11551 LoopDisposition D
= computeLoopDisposition(S
, L
);
11552 auto &Values2
= LoopDispositions
[S
];
11553 for (auto &V
: make_range(Values2
.rbegin(), Values2
.rend())) {
11554 if (V
.getPointer() == L
) {
11562 ScalarEvolution::LoopDisposition
11563 ScalarEvolution::computeLoopDisposition(const SCEV
*S
, const Loop
*L
) {
11564 switch (static_cast<SCEVTypes
>(S
->getSCEVType())) {
11566 return LoopInvariant
;
11570 return getLoopDisposition(cast
<SCEVCastExpr
>(S
)->getOperand(), L
);
11571 case scAddRecExpr
: {
11572 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(S
);
11574 // If L is the addrec's loop, it's computable.
11575 if (AR
->getLoop() == L
)
11576 return LoopComputable
;
11578 // Add recurrences are never invariant in the function-body (null loop).
11580 return LoopVariant
;
11582 // Everything that is not defined at loop entry is variant.
11583 if (DT
.dominates(L
->getHeader(), AR
->getLoop()->getHeader()))
11584 return LoopVariant
;
11585 assert(!L
->contains(AR
->getLoop()) && "Containing loop's header does not"
11586 " dominate the contained loop's header?");
11588 // This recurrence is invariant w.r.t. L if AR's loop contains L.
11589 if (AR
->getLoop()->contains(L
))
11590 return LoopInvariant
;
11592 // This recurrence is variant w.r.t. L if any of its operands
11594 for (auto *Op
: AR
->operands())
11595 if (!isLoopInvariant(Op
, L
))
11596 return LoopVariant
;
11598 // Otherwise it's loop-invariant.
11599 return LoopInvariant
;
11605 bool HasVarying
= false;
11606 for (auto *Op
: cast
<SCEVNAryExpr
>(S
)->operands()) {
11607 LoopDisposition D
= getLoopDisposition(Op
, L
);
11608 if (D
== LoopVariant
)
11609 return LoopVariant
;
11610 if (D
== LoopComputable
)
11613 return HasVarying
? LoopComputable
: LoopInvariant
;
11616 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(S
);
11617 LoopDisposition LD
= getLoopDisposition(UDiv
->getLHS(), L
);
11618 if (LD
== LoopVariant
)
11619 return LoopVariant
;
11620 LoopDisposition RD
= getLoopDisposition(UDiv
->getRHS(), L
);
11621 if (RD
== LoopVariant
)
11622 return LoopVariant
;
11623 return (LD
== LoopInvariant
&& RD
== LoopInvariant
) ?
11624 LoopInvariant
: LoopComputable
;
11627 // All non-instruction values are loop invariant. All instructions are loop
11628 // invariant if they are not contained in the specified loop.
11629 // Instructions are never considered invariant in the function body
11630 // (null loop) because they are defined within the "loop".
11631 if (auto *I
= dyn_cast
<Instruction
>(cast
<SCEVUnknown
>(S
)->getValue()))
11632 return (L
&& !L
->contains(I
)) ? LoopInvariant
: LoopVariant
;
11633 return LoopInvariant
;
11634 case scCouldNotCompute
:
11635 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
11637 llvm_unreachable("Unknown SCEV kind!");
11640 bool ScalarEvolution::isLoopInvariant(const SCEV
*S
, const Loop
*L
) {
11641 return getLoopDisposition(S
, L
) == LoopInvariant
;
11644 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV
*S
, const Loop
*L
) {
11645 return getLoopDisposition(S
, L
) == LoopComputable
;
11648 ScalarEvolution::BlockDisposition
11649 ScalarEvolution::getBlockDisposition(const SCEV
*S
, const BasicBlock
*BB
) {
11650 auto &Values
= BlockDispositions
[S
];
11651 for (auto &V
: Values
) {
11652 if (V
.getPointer() == BB
)
11655 Values
.emplace_back(BB
, DoesNotDominateBlock
);
11656 BlockDisposition D
= computeBlockDisposition(S
, BB
);
11657 auto &Values2
= BlockDispositions
[S
];
11658 for (auto &V
: make_range(Values2
.rbegin(), Values2
.rend())) {
11659 if (V
.getPointer() == BB
) {
11667 ScalarEvolution::BlockDisposition
11668 ScalarEvolution::computeBlockDisposition(const SCEV
*S
, const BasicBlock
*BB
) {
11669 switch (static_cast<SCEVTypes
>(S
->getSCEVType())) {
11671 return ProperlyDominatesBlock
;
11675 return getBlockDisposition(cast
<SCEVCastExpr
>(S
)->getOperand(), BB
);
11676 case scAddRecExpr
: {
11677 // This uses a "dominates" query instead of "properly dominates" query
11678 // to test for proper dominance too, because the instruction which
11679 // produces the addrec's value is a PHI, and a PHI effectively properly
11680 // dominates its entire containing block.
11681 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(S
);
11682 if (!DT
.dominates(AR
->getLoop()->getHeader(), BB
))
11683 return DoesNotDominateBlock
;
11685 // Fall through into SCEVNAryExpr handling.
11692 const SCEVNAryExpr
*NAry
= cast
<SCEVNAryExpr
>(S
);
11693 bool Proper
= true;
11694 for (const SCEV
*NAryOp
: NAry
->operands()) {
11695 BlockDisposition D
= getBlockDisposition(NAryOp
, BB
);
11696 if (D
== DoesNotDominateBlock
)
11697 return DoesNotDominateBlock
;
11698 if (D
== DominatesBlock
)
11701 return Proper
? ProperlyDominatesBlock
: DominatesBlock
;
11704 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(S
);
11705 const SCEV
*LHS
= UDiv
->getLHS(), *RHS
= UDiv
->getRHS();
11706 BlockDisposition LD
= getBlockDisposition(LHS
, BB
);
11707 if (LD
== DoesNotDominateBlock
)
11708 return DoesNotDominateBlock
;
11709 BlockDisposition RD
= getBlockDisposition(RHS
, BB
);
11710 if (RD
== DoesNotDominateBlock
)
11711 return DoesNotDominateBlock
;
11712 return (LD
== ProperlyDominatesBlock
&& RD
== ProperlyDominatesBlock
) ?
11713 ProperlyDominatesBlock
: DominatesBlock
;
11716 if (Instruction
*I
=
11717 dyn_cast
<Instruction
>(cast
<SCEVUnknown
>(S
)->getValue())) {
11718 if (I
->getParent() == BB
)
11719 return DominatesBlock
;
11720 if (DT
.properlyDominates(I
->getParent(), BB
))
11721 return ProperlyDominatesBlock
;
11722 return DoesNotDominateBlock
;
11724 return ProperlyDominatesBlock
;
11725 case scCouldNotCompute
:
11726 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
11728 llvm_unreachable("Unknown SCEV kind!");
11731 bool ScalarEvolution::dominates(const SCEV
*S
, const BasicBlock
*BB
) {
11732 return getBlockDisposition(S
, BB
) >= DominatesBlock
;
11735 bool ScalarEvolution::properlyDominates(const SCEV
*S
, const BasicBlock
*BB
) {
11736 return getBlockDisposition(S
, BB
) == ProperlyDominatesBlock
;
11739 bool ScalarEvolution::hasOperand(const SCEV
*S
, const SCEV
*Op
) const {
11740 return SCEVExprContains(S
, [&](const SCEV
*Expr
) { return Expr
== Op
; });
11743 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV
*S
) const {
11744 auto IsS
= [&](const SCEV
*X
) { return S
== X
; };
11745 auto ContainsS
= [&](const SCEV
*X
) {
11746 return !isa
<SCEVCouldNotCompute
>(X
) && SCEVExprContains(X
, IsS
);
11748 return ContainsS(ExactNotTaken
) || ContainsS(MaxNotTaken
);
11752 ScalarEvolution::forgetMemoizedResults(const SCEV
*S
) {
11753 ValuesAtScopes
.erase(S
);
11754 LoopDispositions
.erase(S
);
11755 BlockDispositions
.erase(S
);
11756 UnsignedRanges
.erase(S
);
11757 SignedRanges
.erase(S
);
11758 ExprValueMap
.erase(S
);
11759 HasRecMap
.erase(S
);
11760 MinTrailingZerosCache
.erase(S
);
11762 for (auto I
= PredicatedSCEVRewrites
.begin();
11763 I
!= PredicatedSCEVRewrites
.end();) {
11764 std::pair
<const SCEV
*, const Loop
*> Entry
= I
->first
;
11765 if (Entry
.first
== S
)
11766 PredicatedSCEVRewrites
.erase(I
++);
11771 auto RemoveSCEVFromBackedgeMap
=
11772 [S
, this](DenseMap
<const Loop
*, BackedgeTakenInfo
> &Map
) {
11773 for (auto I
= Map
.begin(), E
= Map
.end(); I
!= E
;) {
11774 BackedgeTakenInfo
&BEInfo
= I
->second
;
11775 if (BEInfo
.hasOperand(S
, this)) {
11783 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts
);
11784 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts
);
11788 ScalarEvolution::getUsedLoops(const SCEV
*S
,
11789 SmallPtrSetImpl
<const Loop
*> &LoopsUsed
) {
11790 struct FindUsedLoops
{
11791 FindUsedLoops(SmallPtrSetImpl
<const Loop
*> &LoopsUsed
)
11792 : LoopsUsed(LoopsUsed
) {}
11793 SmallPtrSetImpl
<const Loop
*> &LoopsUsed
;
11794 bool follow(const SCEV
*S
) {
11795 if (auto *AR
= dyn_cast
<SCEVAddRecExpr
>(S
))
11796 LoopsUsed
.insert(AR
->getLoop());
11800 bool isDone() const { return false; }
11803 FindUsedLoops
F(LoopsUsed
);
11804 SCEVTraversal
<FindUsedLoops
>(F
).visitAll(S
);
11807 void ScalarEvolution::addToLoopUseLists(const SCEV
*S
) {
11808 SmallPtrSet
<const Loop
*, 8> LoopsUsed
;
11809 getUsedLoops(S
, LoopsUsed
);
11810 for (auto *L
: LoopsUsed
)
11811 LoopUsers
[L
].push_back(S
);
11814 void ScalarEvolution::verify() const {
11815 ScalarEvolution
&SE
= *const_cast<ScalarEvolution
*>(this);
11816 ScalarEvolution
SE2(F
, TLI
, AC
, DT
, LI
);
11818 SmallVector
<Loop
*, 8> LoopStack(LI
.begin(), LI
.end());
11820 // Map's SCEV expressions from one ScalarEvolution "universe" to another.
11821 struct SCEVMapper
: public SCEVRewriteVisitor
<SCEVMapper
> {
11822 SCEVMapper(ScalarEvolution
&SE
) : SCEVRewriteVisitor
<SCEVMapper
>(SE
) {}
11824 const SCEV
*visitConstant(const SCEVConstant
*Constant
) {
11825 return SE
.getConstant(Constant
->getAPInt());
11828 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
11829 return SE
.getUnknown(Expr
->getValue());
11832 const SCEV
*visitCouldNotCompute(const SCEVCouldNotCompute
*Expr
) {
11833 return SE
.getCouldNotCompute();
11837 SCEVMapper
SCM(SE2
);
11839 while (!LoopStack
.empty()) {
11840 auto *L
= LoopStack
.pop_back_val();
11841 LoopStack
.insert(LoopStack
.end(), L
->begin(), L
->end());
11843 auto *CurBECount
= SCM
.visit(
11844 const_cast<ScalarEvolution
*>(this)->getBackedgeTakenCount(L
));
11845 auto *NewBECount
= SE2
.getBackedgeTakenCount(L
);
11847 if (CurBECount
== SE2
.getCouldNotCompute() ||
11848 NewBECount
== SE2
.getCouldNotCompute()) {
11849 // NB! This situation is legal, but is very suspicious -- whatever pass
11850 // change the loop to make a trip count go from could not compute to
11851 // computable or vice-versa *should have* invalidated SCEV. However, we
11852 // choose not to assert here (for now) since we don't want false
11857 if (containsUndefs(CurBECount
) || containsUndefs(NewBECount
)) {
11858 // SCEV treats "undef" as an unknown but consistent value (i.e. it does
11859 // not propagate undef aggressively). This means we can (and do) fail
11860 // verification in cases where a transform makes the trip count of a loop
11861 // go from "undef" to "undef+1" (say). The transform is fine, since in
11862 // both cases the loop iterates "undef" times, but SCEV thinks we
11863 // increased the trip count of the loop by 1 incorrectly.
11867 if (SE
.getTypeSizeInBits(CurBECount
->getType()) >
11868 SE
.getTypeSizeInBits(NewBECount
->getType()))
11869 NewBECount
= SE2
.getZeroExtendExpr(NewBECount
, CurBECount
->getType());
11870 else if (SE
.getTypeSizeInBits(CurBECount
->getType()) <
11871 SE
.getTypeSizeInBits(NewBECount
->getType()))
11872 CurBECount
= SE2
.getZeroExtendExpr(CurBECount
, NewBECount
->getType());
11874 auto *ConstantDelta
=
11875 dyn_cast
<SCEVConstant
>(SE2
.getMinusSCEV(CurBECount
, NewBECount
));
11877 if (ConstantDelta
&& ConstantDelta
->getAPInt() != 0) {
11878 dbgs() << "Trip Count Changed!\n";
11879 dbgs() << "Old: " << *CurBECount
<< "\n";
11880 dbgs() << "New: " << *NewBECount
<< "\n";
11881 dbgs() << "Delta: " << *ConstantDelta
<< "\n";
11887 bool ScalarEvolution::invalidate(
11888 Function
&F
, const PreservedAnalyses
&PA
,
11889 FunctionAnalysisManager::Invalidator
&Inv
) {
11890 // Invalidate the ScalarEvolution object whenever it isn't preserved or one
11891 // of its dependencies is invalidated.
11892 auto PAC
= PA
.getChecker
<ScalarEvolutionAnalysis
>();
11893 return !(PAC
.preserved() || PAC
.preservedSet
<AllAnalysesOn
<Function
>>()) ||
11894 Inv
.invalidate
<AssumptionAnalysis
>(F
, PA
) ||
11895 Inv
.invalidate
<DominatorTreeAnalysis
>(F
, PA
) ||
11896 Inv
.invalidate
<LoopAnalysis
>(F
, PA
);
11899 AnalysisKey
ScalarEvolutionAnalysis::Key
;
11901 ScalarEvolution
ScalarEvolutionAnalysis::run(Function
&F
,
11902 FunctionAnalysisManager
&AM
) {
11903 return ScalarEvolution(F
, AM
.getResult
<TargetLibraryAnalysis
>(F
),
11904 AM
.getResult
<AssumptionAnalysis
>(F
),
11905 AM
.getResult
<DominatorTreeAnalysis
>(F
),
11906 AM
.getResult
<LoopAnalysis
>(F
));
11910 ScalarEvolutionPrinterPass::run(Function
&F
, FunctionAnalysisManager
&AM
) {
11911 AM
.getResult
<ScalarEvolutionAnalysis
>(F
).print(OS
);
11912 return PreservedAnalyses::all();
11915 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass
, "scalar-evolution",
11916 "Scalar Evolution Analysis", false, true)
11917 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
11918 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
)
11919 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
11920 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
11921 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass
, "scalar-evolution",
11922 "Scalar Evolution Analysis", false, true)
11924 char ScalarEvolutionWrapperPass::ID
= 0;
11926 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID
) {
11927 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry());
11930 bool ScalarEvolutionWrapperPass::runOnFunction(Function
&F
) {
11931 SE
.reset(new ScalarEvolution(
11932 F
, getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(),
11933 getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
),
11934 getAnalysis
<DominatorTreeWrapperPass
>().getDomTree(),
11935 getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo()));
11939 void ScalarEvolutionWrapperPass::releaseMemory() { SE
.reset(); }
11941 void ScalarEvolutionWrapperPass::print(raw_ostream
&OS
, const Module
*) const {
11945 void ScalarEvolutionWrapperPass::verifyAnalysis() const {
11952 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage
&AU
) const {
11953 AU
.setPreservesAll();
11954 AU
.addRequiredTransitive
<AssumptionCacheTracker
>();
11955 AU
.addRequiredTransitive
<LoopInfoWrapperPass
>();
11956 AU
.addRequiredTransitive
<DominatorTreeWrapperPass
>();
11957 AU
.addRequiredTransitive
<TargetLibraryInfoWrapperPass
>();
11960 const SCEVPredicate
*ScalarEvolution::getEqualPredicate(const SCEV
*LHS
,
11962 FoldingSetNodeID ID
;
11963 assert(LHS
->getType() == RHS
->getType() &&
11964 "Type mismatch between LHS and RHS");
11965 // Unique this node based on the arguments
11966 ID
.AddInteger(SCEVPredicate::P_Equal
);
11967 ID
.AddPointer(LHS
);
11968 ID
.AddPointer(RHS
);
11969 void *IP
= nullptr;
11970 if (const auto *S
= UniquePreds
.FindNodeOrInsertPos(ID
, IP
))
11972 SCEVEqualPredicate
*Eq
= new (SCEVAllocator
)
11973 SCEVEqualPredicate(ID
.Intern(SCEVAllocator
), LHS
, RHS
);
11974 UniquePreds
.InsertNode(Eq
, IP
);
11978 const SCEVPredicate
*ScalarEvolution::getWrapPredicate(
11979 const SCEVAddRecExpr
*AR
,
11980 SCEVWrapPredicate::IncrementWrapFlags AddedFlags
) {
11981 FoldingSetNodeID ID
;
11982 // Unique this node based on the arguments
11983 ID
.AddInteger(SCEVPredicate::P_Wrap
);
11985 ID
.AddInteger(AddedFlags
);
11986 void *IP
= nullptr;
11987 if (const auto *S
= UniquePreds
.FindNodeOrInsertPos(ID
, IP
))
11989 auto *OF
= new (SCEVAllocator
)
11990 SCEVWrapPredicate(ID
.Intern(SCEVAllocator
), AR
, AddedFlags
);
11991 UniquePreds
.InsertNode(OF
, IP
);
11997 class SCEVPredicateRewriter
: public SCEVRewriteVisitor
<SCEVPredicateRewriter
> {
12000 /// Rewrites \p S in the context of a loop L and the SCEV predication
12001 /// infrastructure.
12003 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the
12004 /// equivalences present in \p Pred.
12006 /// If \p NewPreds is non-null, rewrite is free to add further predicates to
12007 /// \p NewPreds such that the result will be an AddRecExpr.
12008 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
, ScalarEvolution
&SE
,
12009 SmallPtrSetImpl
<const SCEVPredicate
*> *NewPreds
,
12010 SCEVUnionPredicate
*Pred
) {
12011 SCEVPredicateRewriter
Rewriter(L
, SE
, NewPreds
, Pred
);
12012 return Rewriter
.visit(S
);
12015 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
12017 auto ExprPreds
= Pred
->getPredicatesForExpr(Expr
);
12018 for (auto *Pred
: ExprPreds
)
12019 if (const auto *IPred
= dyn_cast
<SCEVEqualPredicate
>(Pred
))
12020 if (IPred
->getLHS() == Expr
)
12021 return IPred
->getRHS();
12023 return convertToAddRecWithPreds(Expr
);
12026 const SCEV
*visitZeroExtendExpr(const SCEVZeroExtendExpr
*Expr
) {
12027 const SCEV
*Operand
= visit(Expr
->getOperand());
12028 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Operand
);
12029 if (AR
&& AR
->getLoop() == L
&& AR
->isAffine()) {
12030 // This couldn't be folded because the operand didn't have the nuw
12031 // flag. Add the nusw flag as an assumption that we could make.
12032 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
12033 Type
*Ty
= Expr
->getType();
12034 if (addOverflowAssumption(AR
, SCEVWrapPredicate::IncrementNUSW
))
12035 return SE
.getAddRecExpr(SE
.getZeroExtendExpr(AR
->getStart(), Ty
),
12036 SE
.getSignExtendExpr(Step
, Ty
), L
,
12037 AR
->getNoWrapFlags());
12039 return SE
.getZeroExtendExpr(Operand
, Expr
->getType());
12042 const SCEV
*visitSignExtendExpr(const SCEVSignExtendExpr
*Expr
) {
12043 const SCEV
*Operand
= visit(Expr
->getOperand());
12044 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Operand
);
12045 if (AR
&& AR
->getLoop() == L
&& AR
->isAffine()) {
12046 // This couldn't be folded because the operand didn't have the nsw
12047 // flag. Add the nssw flag as an assumption that we could make.
12048 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
12049 Type
*Ty
= Expr
->getType();
12050 if (addOverflowAssumption(AR
, SCEVWrapPredicate::IncrementNSSW
))
12051 return SE
.getAddRecExpr(SE
.getSignExtendExpr(AR
->getStart(), Ty
),
12052 SE
.getSignExtendExpr(Step
, Ty
), L
,
12053 AR
->getNoWrapFlags());
12055 return SE
.getSignExtendExpr(Operand
, Expr
->getType());
12059 explicit SCEVPredicateRewriter(const Loop
*L
, ScalarEvolution
&SE
,
12060 SmallPtrSetImpl
<const SCEVPredicate
*> *NewPreds
,
12061 SCEVUnionPredicate
*Pred
)
12062 : SCEVRewriteVisitor(SE
), NewPreds(NewPreds
), Pred(Pred
), L(L
) {}
12064 bool addOverflowAssumption(const SCEVPredicate
*P
) {
12066 // Check if we've already made this assumption.
12067 return Pred
&& Pred
->implies(P
);
12069 NewPreds
->insert(P
);
12073 bool addOverflowAssumption(const SCEVAddRecExpr
*AR
,
12074 SCEVWrapPredicate::IncrementWrapFlags AddedFlags
) {
12075 auto *A
= SE
.getWrapPredicate(AR
, AddedFlags
);
12076 return addOverflowAssumption(A
);
12079 // If \p Expr represents a PHINode, we try to see if it can be represented
12080 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible
12081 // to add this predicate as a runtime overflow check, we return the AddRec.
12082 // If \p Expr does not meet these conditions (is not a PHI node, or we
12083 // couldn't create an AddRec for it, or couldn't add the predicate), we just
12085 const SCEV
*convertToAddRecWithPreds(const SCEVUnknown
*Expr
) {
12086 if (!isa
<PHINode
>(Expr
->getValue()))
12088 Optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
12089 PredicatedRewrite
= SE
.createAddRecFromPHIWithCasts(Expr
);
12090 if (!PredicatedRewrite
)
12092 for (auto *P
: PredicatedRewrite
->second
){
12093 // Wrap predicates from outer loops are not supported.
12094 if (auto *WP
= dyn_cast
<const SCEVWrapPredicate
>(P
)) {
12095 auto *AR
= cast
<const SCEVAddRecExpr
>(WP
->getExpr());
12096 if (L
!= AR
->getLoop())
12099 if (!addOverflowAssumption(P
))
12102 return PredicatedRewrite
->first
;
12105 SmallPtrSetImpl
<const SCEVPredicate
*> *NewPreds
;
12106 SCEVUnionPredicate
*Pred
;
12110 } // end anonymous namespace
12112 const SCEV
*ScalarEvolution::rewriteUsingPredicate(const SCEV
*S
, const Loop
*L
,
12113 SCEVUnionPredicate
&Preds
) {
12114 return SCEVPredicateRewriter::rewrite(S
, L
, *this, nullptr, &Preds
);
12117 const SCEVAddRecExpr
*ScalarEvolution::convertSCEVToAddRecWithPredicates(
12118 const SCEV
*S
, const Loop
*L
,
12119 SmallPtrSetImpl
<const SCEVPredicate
*> &Preds
) {
12120 SmallPtrSet
<const SCEVPredicate
*, 4> TransformPreds
;
12121 S
= SCEVPredicateRewriter::rewrite(S
, L
, *this, &TransformPreds
, nullptr);
12122 auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(S
);
12127 // Since the transformation was successful, we can now transfer the SCEV
12129 for (auto *P
: TransformPreds
)
12135 /// SCEV predicates
12136 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID
,
12137 SCEVPredicateKind Kind
)
12138 : FastID(ID
), Kind(Kind
) {}
12140 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID
,
12141 const SCEV
*LHS
, const SCEV
*RHS
)
12142 : SCEVPredicate(ID
, P_Equal
), LHS(LHS
), RHS(RHS
) {
12143 assert(LHS
->getType() == RHS
->getType() && "LHS and RHS types don't match");
12144 assert(LHS
!= RHS
&& "LHS and RHS are the same SCEV");
12147 bool SCEVEqualPredicate::implies(const SCEVPredicate
*N
) const {
12148 const auto *Op
= dyn_cast
<SCEVEqualPredicate
>(N
);
12153 return Op
->LHS
== LHS
&& Op
->RHS
== RHS
;
12156 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; }
12158 const SCEV
*SCEVEqualPredicate::getExpr() const { return LHS
; }
12160 void SCEVEqualPredicate::print(raw_ostream
&OS
, unsigned Depth
) const {
12161 OS
.indent(Depth
) << "Equal predicate: " << *LHS
<< " == " << *RHS
<< "\n";
12164 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID
,
12165 const SCEVAddRecExpr
*AR
,
12166 IncrementWrapFlags Flags
)
12167 : SCEVPredicate(ID
, P_Wrap
), AR(AR
), Flags(Flags
) {}
12169 const SCEV
*SCEVWrapPredicate::getExpr() const { return AR
; }
12171 bool SCEVWrapPredicate::implies(const SCEVPredicate
*N
) const {
12172 const auto *Op
= dyn_cast
<SCEVWrapPredicate
>(N
);
12174 return Op
&& Op
->AR
== AR
&& setFlags(Flags
, Op
->Flags
) == Flags
;
12177 bool SCEVWrapPredicate::isAlwaysTrue() const {
12178 SCEV::NoWrapFlags ScevFlags
= AR
->getNoWrapFlags();
12179 IncrementWrapFlags IFlags
= Flags
;
12181 if (ScalarEvolution::setFlags(ScevFlags
, SCEV::FlagNSW
) == ScevFlags
)
12182 IFlags
= clearFlags(IFlags
, IncrementNSSW
);
12184 return IFlags
== IncrementAnyWrap
;
12187 void SCEVWrapPredicate::print(raw_ostream
&OS
, unsigned Depth
) const {
12188 OS
.indent(Depth
) << *getExpr() << " Added Flags: ";
12189 if (SCEVWrapPredicate::IncrementNUSW
& getFlags())
12191 if (SCEVWrapPredicate::IncrementNSSW
& getFlags())
12196 SCEVWrapPredicate::IncrementWrapFlags
12197 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr
*AR
,
12198 ScalarEvolution
&SE
) {
12199 IncrementWrapFlags ImpliedFlags
= IncrementAnyWrap
;
12200 SCEV::NoWrapFlags StaticFlags
= AR
->getNoWrapFlags();
12202 // We can safely transfer the NSW flag as NSSW.
12203 if (ScalarEvolution::setFlags(StaticFlags
, SCEV::FlagNSW
) == StaticFlags
)
12204 ImpliedFlags
= IncrementNSSW
;
12206 if (ScalarEvolution::setFlags(StaticFlags
, SCEV::FlagNUW
) == StaticFlags
) {
12207 // If the increment is positive, the SCEV NUW flag will also imply the
12208 // WrapPredicate NUSW flag.
12209 if (const auto *Step
= dyn_cast
<SCEVConstant
>(AR
->getStepRecurrence(SE
)))
12210 if (Step
->getValue()->getValue().isNonNegative())
12211 ImpliedFlags
= setFlags(ImpliedFlags
, IncrementNUSW
);
12214 return ImpliedFlags
;
12217 /// Union predicates don't get cached so create a dummy set ID for it.
12218 SCEVUnionPredicate::SCEVUnionPredicate()
12219 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union
) {}
12221 bool SCEVUnionPredicate::isAlwaysTrue() const {
12222 return all_of(Preds
,
12223 [](const SCEVPredicate
*I
) { return I
->isAlwaysTrue(); });
12226 ArrayRef
<const SCEVPredicate
*>
12227 SCEVUnionPredicate::getPredicatesForExpr(const SCEV
*Expr
) {
12228 auto I
= SCEVToPreds
.find(Expr
);
12229 if (I
== SCEVToPreds
.end())
12230 return ArrayRef
<const SCEVPredicate
*>();
12234 bool SCEVUnionPredicate::implies(const SCEVPredicate
*N
) const {
12235 if (const auto *Set
= dyn_cast
<SCEVUnionPredicate
>(N
))
12236 return all_of(Set
->Preds
,
12237 [this](const SCEVPredicate
*I
) { return this->implies(I
); });
12239 auto ScevPredsIt
= SCEVToPreds
.find(N
->getExpr());
12240 if (ScevPredsIt
== SCEVToPreds
.end())
12242 auto &SCEVPreds
= ScevPredsIt
->second
;
12244 return any_of(SCEVPreds
,
12245 [N
](const SCEVPredicate
*I
) { return I
->implies(N
); });
12248 const SCEV
*SCEVUnionPredicate::getExpr() const { return nullptr; }
12250 void SCEVUnionPredicate::print(raw_ostream
&OS
, unsigned Depth
) const {
12251 for (auto Pred
: Preds
)
12252 Pred
->print(OS
, Depth
);
12255 void SCEVUnionPredicate::add(const SCEVPredicate
*N
) {
12256 if (const auto *Set
= dyn_cast
<SCEVUnionPredicate
>(N
)) {
12257 for (auto Pred
: Set
->Preds
)
12265 const SCEV
*Key
= N
->getExpr();
12266 assert(Key
&& "Only SCEVUnionPredicate doesn't have an "
12267 " associated expression!");
12269 SCEVToPreds
[Key
].push_back(N
);
12270 Preds
.push_back(N
);
12273 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution
&SE
,
12277 const SCEV
*PredicatedScalarEvolution::getSCEV(Value
*V
) {
12278 const SCEV
*Expr
= SE
.getSCEV(V
);
12279 RewriteEntry
&Entry
= RewriteMap
[Expr
];
12281 // If we already have an entry and the version matches, return it.
12282 if (Entry
.second
&& Generation
== Entry
.first
)
12283 return Entry
.second
;
12285 // We found an entry but it's stale. Rewrite the stale entry
12286 // according to the current predicate.
12288 Expr
= Entry
.second
;
12290 const SCEV
*NewSCEV
= SE
.rewriteUsingPredicate(Expr
, &L
, Preds
);
12291 Entry
= {Generation
, NewSCEV
};
12296 const SCEV
*PredicatedScalarEvolution::getBackedgeTakenCount() {
12297 if (!BackedgeCount
) {
12298 SCEVUnionPredicate BackedgePred
;
12299 BackedgeCount
= SE
.getPredicatedBackedgeTakenCount(&L
, BackedgePred
);
12300 addPredicate(BackedgePred
);
12302 return BackedgeCount
;
12305 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate
&Pred
) {
12306 if (Preds
.implies(&Pred
))
12309 updateGeneration();
12312 const SCEVUnionPredicate
&PredicatedScalarEvolution::getUnionPredicate() const {
12316 void PredicatedScalarEvolution::updateGeneration() {
12317 // If the generation number wrapped recompute everything.
12318 if (++Generation
== 0) {
12319 for (auto &II
: RewriteMap
) {
12320 const SCEV
*Rewritten
= II
.second
.second
;
12321 II
.second
= {Generation
, SE
.rewriteUsingPredicate(Rewritten
, &L
, Preds
)};
12326 void PredicatedScalarEvolution::setNoOverflow(
12327 Value
*V
, SCEVWrapPredicate::IncrementWrapFlags Flags
) {
12328 const SCEV
*Expr
= getSCEV(V
);
12329 const auto *AR
= cast
<SCEVAddRecExpr
>(Expr
);
12331 auto ImpliedFlags
= SCEVWrapPredicate::getImpliedFlags(AR
, SE
);
12333 // Clear the statically implied flags.
12334 Flags
= SCEVWrapPredicate::clearFlags(Flags
, ImpliedFlags
);
12335 addPredicate(*SE
.getWrapPredicate(AR
, Flags
));
12337 auto II
= FlagsMap
.insert({V
, Flags
});
12339 II
.first
->second
= SCEVWrapPredicate::setFlags(Flags
, II
.first
->second
);
12342 bool PredicatedScalarEvolution::hasNoOverflow(
12343 Value
*V
, SCEVWrapPredicate::IncrementWrapFlags Flags
) {
12344 const SCEV
*Expr
= getSCEV(V
);
12345 const auto *AR
= cast
<SCEVAddRecExpr
>(Expr
);
12347 Flags
= SCEVWrapPredicate::clearFlags(
12348 Flags
, SCEVWrapPredicate::getImpliedFlags(AR
, SE
));
12350 auto II
= FlagsMap
.find(V
);
12352 if (II
!= FlagsMap
.end())
12353 Flags
= SCEVWrapPredicate::clearFlags(Flags
, II
->second
);
12355 return Flags
== SCEVWrapPredicate::IncrementAnyWrap
;
12358 const SCEVAddRecExpr
*PredicatedScalarEvolution::getAsAddRec(Value
*V
) {
12359 const SCEV
*Expr
= this->getSCEV(V
);
12360 SmallPtrSet
<const SCEVPredicate
*, 4> NewPreds
;
12361 auto *New
= SE
.convertSCEVToAddRecWithPredicates(Expr
, &L
, NewPreds
);
12366 for (auto *P
: NewPreds
)
12369 updateGeneration();
12370 RewriteMap
[SE
.getSCEV(V
)] = {Generation
, New
};
12374 PredicatedScalarEvolution::PredicatedScalarEvolution(
12375 const PredicatedScalarEvolution
&Init
)
12376 : RewriteMap(Init
.RewriteMap
), SE(Init
.SE
), L(Init
.L
), Preds(Init
.Preds
),
12377 Generation(Init
.Generation
), BackedgeCount(Init
.BackedgeCount
) {
12378 for (const auto &I
: Init
.FlagsMap
)
12379 FlagsMap
.insert(I
);
12382 void PredicatedScalarEvolution::print(raw_ostream
&OS
, unsigned Depth
) const {
12384 for (auto *BB
: L
.getBlocks())
12385 for (auto &I
: *BB
) {
12386 if (!SE
.isSCEVable(I
.getType()))
12389 auto *Expr
= SE
.getSCEV(&I
);
12390 auto II
= RewriteMap
.find(Expr
);
12392 if (II
== RewriteMap
.end())
12395 // Don't print things that are not interesting.
12396 if (II
->second
.second
== Expr
)
12399 OS
.indent(Depth
) << "[PSE]" << I
<< ":\n";
12400 OS
.indent(Depth
+ 2) << *Expr
<< "\n";
12401 OS
.indent(Depth
+ 2) << "--> " << *II
->second
.second
<< "\n";
12405 // Match the mathematical pattern A - (A / B) * B, where A and B can be
12406 // arbitrary expressions.
12407 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is
12408 // 4, A / B becomes X / 8).
12409 bool ScalarEvolution::matchURem(const SCEV
*Expr
, const SCEV
*&LHS
,
12410 const SCEV
*&RHS
) {
12411 const auto *Add
= dyn_cast
<SCEVAddExpr
>(Expr
);
12412 if (Add
== nullptr || Add
->getNumOperands() != 2)
12415 const SCEV
*A
= Add
->getOperand(1);
12416 const auto *Mul
= dyn_cast
<SCEVMulExpr
>(Add
->getOperand(0));
12418 if (Mul
== nullptr)
12421 const auto MatchURemWithDivisor
= [&](const SCEV
*B
) {
12422 // (SomeExpr + (-(SomeExpr / B) * B)).
12423 if (Expr
== getURemExpr(A
, B
)) {
12431 // (SomeExpr + (-1 * (SomeExpr / B) * B)).
12432 if (Mul
->getNumOperands() == 3 && isa
<SCEVConstant
>(Mul
->getOperand(0)))
12433 return MatchURemWithDivisor(Mul
->getOperand(1)) ||
12434 MatchURemWithDivisor(Mul
->getOperand(2));
12436 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)).
12437 if (Mul
->getNumOperands() == 2)
12438 return MatchURemWithDivisor(Mul
->getOperand(1)) ||
12439 MatchURemWithDivisor(Mul
->getOperand(0)) ||
12440 MatchURemWithDivisor(getNegativeSCEV(Mul
->getOperand(1))) ||
12441 MatchURemWithDivisor(getNegativeSCEV(Mul
->getOperand(0)));